Private
Public Access
1
0

Package restructure and API changes, several fixes

- More emphasis on `api` package. It now holds database model structs
  from `lmcli/models` (which is now gone) as well as the tool spec,
  call, and result types. `tools.Tool` is now `api.ToolSpec`.
  `api.ChatCompletionClient` was renamed to
  `api.ChatCompletionProvider`.

- Change ChatCompletion interface and implementations to no longer do
  automatic tool call recursion - they simply return a ToolCall message
  which the caller can decide what to do with (e.g. prompt for user
  confirmation before executing)

- `api.ChatCompletionProvider` functions have had their ReplyCallback
  parameter removed, as now they only return a single reply.

- Added a top-level `agent` package, moved the current built-in tools
  implementations under `agent/toolbox`. `tools.ExecuteToolCalls` is now
  `agent.ExecuteToolCalls`.

- Fixed request context handling in openai, google, ollama (use
  `NewRequestWithContext`), cleaned up request cancellation in TUI

- Fix tool call tui persistence bug (we were skipping message with empty
  content)

- Now handle tool calling from TUI layer

TODO:
- Prompt users before executing tool calls
- Automatically send tool results to the model (or make this toggleable)
This commit is contained in:
2024-06-12 08:35:07 +00:00
parent 85a2abbbf3
commit 3fde58b77d
35 changed files with 608 additions and 749 deletions

View File

@@ -4,7 +4,7 @@ import (
"strings"
"time"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/api"
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
tuiutil "git.mlow.ca/mlow/lmcli/pkg/tui/util"
"github.com/charmbracelet/bubbles/cursor"
@@ -21,15 +21,9 @@ func (m *Model) HandleResize(width, height int) {
}
}
func (m *Model) waitForResponse() tea.Cmd {
return func() tea.Msg {
return msgResponse(<-m.replyChan)
}
}
func (m *Model) waitForResponseChunk() tea.Cmd {
return func() tea.Msg {
return msgResponseChunk(<-m.replyChunkChan)
return msgChatResponseChunk(<-m.chatReplyChunks)
}
}
@@ -48,7 +42,7 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
if m.conversation.ShortName.String != m.Shared.Values.ConvShortname {
// clear existing messages if we're loading a new conversation
m.messages = []models.Message{}
m.messages = []api.Message{}
m.selectedMessage = 0
}
}
@@ -87,7 +81,7 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
}
m.rebuildMessageCache()
m.updateContent()
case msgResponseChunk:
case msgChatResponseChunk:
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
if msg.Content == "" {
@@ -100,8 +94,8 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
m.setMessageContents(last, m.messages[last].Content+msg.Content)
} else {
// use chunk in new message
m.addMessage(models.Message{
Role: models.MessageRoleAssistant,
m.addMessage(api.Message{
Role: api.MessageRoleAssistant,
Content: msg.Content,
})
}
@@ -113,10 +107,10 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
m.tokenCount += msg.TokenCount
m.elapsed = time.Now().Sub(m.startTime)
case msgResponse:
cmds = append(cmds, m.waitForResponse()) // wait for the next response
case msgChatResponse:
m.state = idle
reply := models.Message(msg)
reply := (*api.Message)(msg)
reply.Content = strings.TrimSpace(reply.Content)
last := len(m.messages) - 1
@@ -124,11 +118,18 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
panic("Unexpected empty messages handling msgAssistantReply")
}
if reply.Role.IsAssistant() && m.messages[last].Role.IsAssistant() {
// this was a continuation, so replace the previous message with the completed reply
m.setMessage(last, reply)
if m.messages[last].Role.IsAssistant() {
// TODO: handle continuations gracefully - some models support them well, others fail horribly.
m.setMessage(last, *reply)
} else {
m.addMessage(reply)
m.addMessage(*reply)
}
switch reply.Role {
case api.MessageRoleToolCall:
// TODO: user confirmation before execution
// m.state = waitingForConfirmation
cmds = append(cmds, m.executeToolCalls(reply.ToolCalls))
}
if m.persistence {
@@ -140,17 +141,32 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
}
m.updateContent()
case msgResponseEnd:
case msgChatResponseCanceled:
m.state = idle
last := len(m.messages) - 1
if last < 0 {
panic("Unexpected empty messages handling msgResponseEnd")
}
m.setMessageContents(last, strings.TrimSpace(m.messages[last].Content))
m.updateContent()
case msgResponseError:
case msgChatResponseError:
m.state = idle
m.Shared.Err = error(msg)
m.updateContent()
case msgToolResults:
last := len(m.messages) - 1
if last < 0 {
panic("Unexpected empty messages handling msgAssistantReply")
}
if m.messages[last].Role != api.MessageRoleToolCall {
panic("Previous message not a tool call, unexpected")
}
m.addMessage(api.Message{
Role: api.MessageRoleToolResult,
ToolResults: api.ToolResults(msg),
})
if m.persistence {
cmds = append(cmds, m.persistConversation())
}
m.updateContent()
case msgConversationTitleGenerated:
title := string(msg)
@@ -167,7 +183,7 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
m.conversation = msg.conversation
m.messages = msg.messages
if msg.isNew {
m.rootMessages = []models.Message{m.messages[0]}
m.rootMessages = []api.Message{m.messages[0]}
}
m.rebuildMessageCache()
m.updateContent()