Private
Public Access
1
0

Package restructure and API changes, several fixes

- More emphasis on `api` package. It now holds database model structs
  from `lmcli/models` (which is now gone) as well as the tool spec,
  call, and result types. `tools.Tool` is now `api.ToolSpec`.
  `api.ChatCompletionClient` was renamed to
  `api.ChatCompletionProvider`.

- Change ChatCompletion interface and implementations to no longer do
  automatic tool call recursion - they simply return a ToolCall message
  which the caller can decide what to do with (e.g. prompt for user
  confirmation before executing)

- `api.ChatCompletionProvider` functions have had their ReplyCallback
  parameter removed, as now they only return a single reply.

- Added a top-level `agent` package, moved the current built-in tools
  implementations under `agent/toolbox`. `tools.ExecuteToolCalls` is now
  `agent.ExecuteToolCalls`.

- Fixed request context handling in openai, google, ollama (use
  `NewRequestWithContext`), cleaned up request cancellation in TUI

- Fix tool call tui persistence bug (we were skipping message with empty
  content)

- Now handle tool calling from TUI layer

TODO:
- Prompt users before executing tool calls
- Automatically send tool results to the model (or make this toggleable)
This commit is contained in:
2024-06-12 08:35:07 +00:00
parent 85a2abbbf3
commit 3fde58b77d
35 changed files with 608 additions and 749 deletions

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -36,7 +36,7 @@ func ContinueCmd(ctx *lmcli.Context) *cobra.Command {
}
lastMessage := &messages[len(messages)-1]
if lastMessage.Role != model.MessageRoleAssistant {
if lastMessage.Role != api.MessageRoleAssistant {
return fmt.Errorf("the last message in the conversation is not an assistant message")
}
@@ -50,7 +50,7 @@ func ContinueCmd(ctx *lmcli.Context) *cobra.Command {
}
// Append the new response to the original message
lastMessage.Content += strings.TrimRight(continuedOutput, "\n\t ")
lastMessage.Content += strings.TrimRight(continuedOutput.Content, "\n\t ")
// Update the original message
err = ctx.Store.UpdateMessage(lastMessage)

View File

@@ -3,9 +3,9 @@ package cmd
import (
"fmt"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -53,10 +53,10 @@ func EditCmd(ctx *lmcli.Context) *cobra.Command {
role, _ := cmd.Flags().GetString("role")
if role != "" {
if role != string(model.MessageRoleUser) && role != string(model.MessageRoleAssistant) {
if role != string(api.MessageRoleUser) && role != string(api.MessageRoleAssistant) {
return fmt.Errorf("Invalid role specified. Please use 'user' or 'assistant'.")
}
toEdit.Role = model.MessageRole(role)
toEdit.Role = api.MessageRole(role)
}
// Update the message in-place

View File

@@ -3,9 +3,9 @@ package cmd
import (
"fmt"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -20,19 +20,19 @@ func NewCmd(ctx *lmcli.Context) *cobra.Command {
return fmt.Errorf("No message was provided.")
}
var messages []model.Message
var messages []api.Message
// TODO: probably just make this part of the conversation
system := ctx.GetSystemPrompt()
if system != "" {
messages = append(messages, model.Message{
Role: model.MessageRoleSystem,
messages = append(messages, api.Message{
Role: api.MessageRoleSystem,
Content: system,
})
}
messages = append(messages, model.Message{
Role: model.MessageRoleUser,
messages = append(messages, api.Message{
Role: api.MessageRoleUser,
Content: input,
})

View File

@@ -3,9 +3,9 @@ package cmd
import (
"fmt"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -20,19 +20,19 @@ func PromptCmd(ctx *lmcli.Context) *cobra.Command {
return fmt.Errorf("No message was provided.")
}
var messages []model.Message
var messages []api.Message
// TODO: stop supplying system prompt as a message
system := ctx.GetSystemPrompt()
if system != "" {
messages = append(messages, model.Message{
Role: model.MessageRoleSystem,
messages = append(messages, api.Message{
Role: api.MessageRoleSystem,
Content: system,
})
}
messages = append(messages, model.Message{
Role: model.MessageRoleUser,
messages = append(messages, api.Message{
Role: api.MessageRoleUser,
Content: input,
})

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -23,7 +23,7 @@ func RemoveCmd(ctx *lmcli.Context) *cobra.Command {
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
var toRemove []*model.Conversation
var toRemove []*api.Conversation
for _, shortName := range args {
conversation := cmdutil.LookupConversation(ctx, shortName)
toRemove = append(toRemove, conversation)

View File

@@ -3,9 +3,9 @@ package cmd
import (
"fmt"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -30,8 +30,8 @@ func ReplyCmd(ctx *lmcli.Context) *cobra.Command {
return fmt.Errorf("No reply was provided.")
}
cmdutil.HandleConversationReply(ctx, conversation, true, model.Message{
Role: model.MessageRoleUser,
cmdutil.HandleConversationReply(ctx, conversation, true, api.Message{
Role: api.MessageRoleUser,
Content: reply,
})
return nil

View File

@@ -3,9 +3,9 @@ package cmd
import (
"fmt"
"git.mlow.ca/mlow/lmcli/pkg/api"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
@@ -43,11 +43,11 @@ func RetryCmd(ctx *lmcli.Context) *cobra.Command {
retryFromIdx := len(messages) - 1 - offset
// decrease retryFromIdx until we hit a user message
for retryFromIdx >= 0 && messages[retryFromIdx].Role != model.MessageRoleUser {
for retryFromIdx >= 0 && messages[retryFromIdx].Role != api.MessageRoleUser {
retryFromIdx--
}
if messages[retryFromIdx].Role != model.MessageRoleUser {
if messages[retryFromIdx].Role != api.MessageRoleUser {
return fmt.Errorf("No user messages to retry")
}

View File

@@ -10,36 +10,36 @@ import (
"git.mlow.ca/mlow/lmcli/pkg/api"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/util"
"github.com/charmbracelet/lipgloss"
)
// Prompt prompts the configured the configured model and streams the response
// to stdout. Returns all model reply messages.
func Prompt(ctx *lmcli.Context, messages []model.Message, callback func(model.Message)) (string, error) {
content := make(chan api.Chunk) // receives the reponse from LLM
defer close(content)
// render all content received over the channel
go ShowDelayedContent(content)
func Prompt(ctx *lmcli.Context, messages []api.Message, callback func(api.Message)) (*api.Message, error) {
m, provider, err := ctx.GetModelProvider(*ctx.Config.Defaults.Model)
if err != nil {
return "", err
return nil, err
}
requestParams := model.RequestParameters{
requestParams := api.RequestParameters{
Model: m,
MaxTokens: *ctx.Config.Defaults.MaxTokens,
Temperature: *ctx.Config.Defaults.Temperature,
ToolBag: ctx.EnabledTools,
}
response, err := provider.CreateChatCompletionStream(
context.Background(), requestParams, messages, callback, content,
content := make(chan api.Chunk)
defer close(content)
// render the content received over the channel
go ShowDelayedContent(content)
reply, err := provider.CreateChatCompletionStream(
context.Background(), requestParams, messages, content,
)
if response != "" {
if reply.Content != "" {
// there was some content, so break to a new line after it
fmt.Println()
@@ -48,12 +48,12 @@ func Prompt(ctx *lmcli.Context, messages []model.Message, callback func(model.Me
err = nil
}
}
return response, err
return reply, err
}
// lookupConversation either returns the conversation found by the
// short name or exits the program
func LookupConversation(ctx *lmcli.Context, shortName string) *model.Conversation {
func LookupConversation(ctx *lmcli.Context, shortName string) *api.Conversation {
c, err := ctx.Store.ConversationByShortName(shortName)
if err != nil {
lmcli.Fatal("Could not lookup conversation: %v\n", err)
@@ -64,7 +64,7 @@ func LookupConversation(ctx *lmcli.Context, shortName string) *model.Conversatio
return c
}
func LookupConversationE(ctx *lmcli.Context, shortName string) (*model.Conversation, error) {
func LookupConversationE(ctx *lmcli.Context, shortName string) (*api.Conversation, error) {
c, err := ctx.Store.ConversationByShortName(shortName)
if err != nil {
return nil, fmt.Errorf("Could not lookup conversation: %v", err)
@@ -75,7 +75,7 @@ func LookupConversationE(ctx *lmcli.Context, shortName string) (*model.Conversat
return c, nil
}
func HandleConversationReply(ctx *lmcli.Context, c *model.Conversation, persist bool, toSend ...model.Message) {
func HandleConversationReply(ctx *lmcli.Context, c *api.Conversation, persist bool, toSend ...api.Message) {
messages, err := ctx.Store.PathToLeaf(c.SelectedRoot)
if err != nil {
lmcli.Fatal("Could not load messages: %v\n", err)
@@ -85,7 +85,7 @@ func HandleConversationReply(ctx *lmcli.Context, c *model.Conversation, persist
// handleConversationReply handles sending messages to an existing
// conversation, optionally persisting both the sent replies and responses.
func HandleReply(ctx *lmcli.Context, to *model.Message, persist bool, messages ...model.Message) {
func HandleReply(ctx *lmcli.Context, to *api.Message, persist bool, messages ...api.Message) {
if to == nil {
lmcli.Fatal("Can't prompt from an empty message.")
}
@@ -97,7 +97,7 @@ func HandleReply(ctx *lmcli.Context, to *model.Message, persist bool, messages .
RenderConversation(ctx, append(existing, messages...), true)
var savedReplies []model.Message
var savedReplies []api.Message
if persist && len(messages) > 0 {
savedReplies, err = ctx.Store.Reply(to, messages...)
if err != nil {
@@ -106,15 +106,15 @@ func HandleReply(ctx *lmcli.Context, to *model.Message, persist bool, messages .
}
// render a message header with no contents
RenderMessage(ctx, (&model.Message{Role: model.MessageRoleAssistant}))
RenderMessage(ctx, (&api.Message{Role: api.MessageRoleAssistant}))
var lastSavedMessage *model.Message
var lastSavedMessage *api.Message
lastSavedMessage = to
if len(savedReplies) > 0 {
lastSavedMessage = &savedReplies[len(savedReplies)-1]
}
replyCallback := func(reply model.Message) {
replyCallback := func(reply api.Message) {
if !persist {
return
}
@@ -131,16 +131,16 @@ func HandleReply(ctx *lmcli.Context, to *model.Message, persist bool, messages .
}
}
func FormatForExternalPrompt(messages []model.Message, system bool) string {
func FormatForExternalPrompt(messages []api.Message, system bool) string {
sb := strings.Builder{}
for _, message := range messages {
if message.Content == "" {
continue
}
switch message.Role {
case model.MessageRoleAssistant, model.MessageRoleToolCall:
case api.MessageRoleAssistant, api.MessageRoleToolCall:
sb.WriteString("Assistant:\n\n")
case model.MessageRoleUser:
case api.MessageRoleUser:
sb.WriteString("User:\n\n")
default:
continue
@@ -150,7 +150,7 @@ func FormatForExternalPrompt(messages []model.Message, system bool) string {
return sb.String()
}
func GenerateTitle(ctx *lmcli.Context, messages []model.Message) (string, error) {
func GenerateTitle(ctx *lmcli.Context, messages []api.Message) (string, error) {
const systemPrompt = `You will be shown a conversation between a user and an AI assistant. Your task is to generate a short title (8 words or less) for the provided conversation that reflects the conversation's topic. Your response is expected to be in JSON in the format shown below.
Example conversation:
@@ -177,28 +177,32 @@ Example response:
return "", err
}
generateRequest := []model.Message{
generateRequest := []api.Message{
{
Role: model.MessageRoleSystem,
Role: api.MessageRoleSystem,
Content: systemPrompt,
},
{
Role: model.MessageRoleUser,
Role: api.MessageRoleUser,
Content: string(conversation),
},
}
m, provider, err := ctx.GetModelProvider(*ctx.Config.Conversations.TitleGenerationModel)
m, provider, err := ctx.GetModelProvider(
*ctx.Config.Conversations.TitleGenerationModel,
)
if err != nil {
return "", err
}
requestParams := model.RequestParameters{
requestParams := api.RequestParameters{
Model: m,
MaxTokens: 25,
}
response, err := provider.CreateChatCompletion(context.Background(), requestParams, generateRequest, nil)
response, err := provider.CreateChatCompletion(
context.Background(), requestParams, generateRequest,
)
if err != nil {
return "", err
}
@@ -207,7 +211,7 @@ Example response:
var jsonResponse struct {
Title string `json:"title"`
}
err = json.Unmarshal([]byte(response), &jsonResponse)
err = json.Unmarshal([]byte(response.Content), &jsonResponse)
if err != nil {
return "", err
}
@@ -272,7 +276,7 @@ func ShowDelayedContent(content <-chan api.Chunk) {
// RenderConversation renders the given messages to TTY, with optional space
// for a subsequent message. spaceForResponse controls how many '\n' characters
// are printed immediately after the final message (1 if false, 2 if true)
func RenderConversation(ctx *lmcli.Context, messages []model.Message, spaceForResponse bool) {
func RenderConversation(ctx *lmcli.Context, messages []api.Message, spaceForResponse bool) {
l := len(messages)
for i, message := range messages {
RenderMessage(ctx, &message)
@@ -283,7 +287,7 @@ func RenderConversation(ctx *lmcli.Context, messages []model.Message, spaceForRe
}
}
func RenderMessage(ctx *lmcli.Context, m *model.Message) {
func RenderMessage(ctx *lmcli.Context, m *api.Message) {
var messageAge string
if m.CreatedAt.IsZero() {
messageAge = "now"
@@ -295,11 +299,11 @@ func RenderMessage(ctx *lmcli.Context, m *model.Message) {
headerStyle := lipgloss.NewStyle().Bold(true)
switch m.Role {
case model.MessageRoleSystem:
case api.MessageRoleSystem:
headerStyle = headerStyle.Foreground(lipgloss.Color("9")) // bright red
case model.MessageRoleUser:
case api.MessageRoleUser:
headerStyle = headerStyle.Foreground(lipgloss.Color("10")) // bright green
case model.MessageRoleAssistant:
case api.MessageRoleAssistant:
headerStyle = headerStyle.Foreground(lipgloss.Color("12")) // bright blue
}

View File

@@ -20,7 +20,7 @@ func ViewCmd(ctx *lmcli.Context) *cobra.Command {
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
shortName := args[0]
conversation := cmdutil.LookupConversation(ctx, shortName)