Package restructure and API changes, several fixes
- More emphasis on `api` package. It now holds database model structs from `lmcli/models` (which is now gone) as well as the tool spec, call, and result types. `tools.Tool` is now `api.ToolSpec`. `api.ChatCompletionClient` was renamed to `api.ChatCompletionProvider`. - Change ChatCompletion interface and implementations to no longer do automatic tool call recursion - they simply return a ToolCall message which the caller can decide what to do with (e.g. prompt for user confirmation before executing) - `api.ChatCompletionProvider` functions have had their ReplyCallback parameter removed, as now they only return a single reply. - Added a top-level `agent` package, moved the current built-in tools implementations under `agent/toolbox`. `tools.ExecuteToolCalls` is now `agent.ExecuteToolCalls`. - Fixed request context handling in openai, google, ollama (use `NewRequestWithContext`), cleaned up request cancellation in TUI - Fix tool call tui persistence bug (we were skipping message with empty content) - Now handle tool calling from TUI layer TODO: - Prompt users before executing tool calls - Automatically send tool results to the model (or make this toggleable)
This commit is contained in:
@@ -4,7 +4,6 @@ import (
|
||||
"time"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
||||
"github.com/charmbracelet/bubbles/cursor"
|
||||
"github.com/charmbracelet/bubbles/spinner"
|
||||
@@ -16,37 +15,39 @@ import (
|
||||
|
||||
// custom tea.Msg types
|
||||
type (
|
||||
// sent on each chunk received from LLM
|
||||
msgResponseChunk api.Chunk
|
||||
// sent when response is finished being received
|
||||
msgResponseEnd string
|
||||
// a special case of common.MsgError that stops the response waiting animation
|
||||
msgResponseError error
|
||||
// sent on each completed reply
|
||||
msgResponse models.Message
|
||||
// sent when a conversation is (re)loaded
|
||||
msgConversationLoaded struct {
|
||||
conversation *models.Conversation
|
||||
rootMessages []models.Message
|
||||
conversation *api.Conversation
|
||||
rootMessages []api.Message
|
||||
}
|
||||
// sent when a new conversation title generated
|
||||
msgConversationTitleGenerated string
|
||||
// sent when a conversation's messages are laoded
|
||||
msgMessagesLoaded []models.Message
|
||||
// sent when the conversation has been persisted, triggers a reload of contents
|
||||
msgConversationPersisted struct {
|
||||
isNew bool
|
||||
conversation *models.Conversation
|
||||
messages []models.Message
|
||||
conversation *api.Conversation
|
||||
messages []api.Message
|
||||
}
|
||||
// sent when a conversation's messages are laoded
|
||||
msgMessagesLoaded []api.Message
|
||||
// a special case of common.MsgError that stops the response waiting animation
|
||||
msgChatResponseError error
|
||||
// sent on each chunk received from LLM
|
||||
msgChatResponseChunk api.Chunk
|
||||
// sent on each completed reply
|
||||
msgChatResponse *api.Message
|
||||
// sent when the response is canceled
|
||||
msgChatResponseCanceled struct{}
|
||||
// sent when results from a tool call are returned
|
||||
msgToolResults []api.ToolResult
|
||||
// sent when the given message is made the new selected reply of its parent
|
||||
msgSelectedReplyCycled *models.Message
|
||||
msgSelectedReplyCycled *api.Message
|
||||
// sent when the given message is made the new selected root of the current conversation
|
||||
msgSelectedRootCycled *models.Message
|
||||
msgSelectedRootCycled *api.Message
|
||||
// sent when a message's contents are updated and saved
|
||||
msgMessageUpdated *models.Message
|
||||
msgMessageUpdated *api.Message
|
||||
// sent when a message is cloned, with the cloned message
|
||||
msgMessageCloned *models.Message
|
||||
msgMessageCloned *api.Message
|
||||
)
|
||||
|
||||
type focusState int
|
||||
@@ -77,14 +78,14 @@ type Model struct {
|
||||
|
||||
// app state
|
||||
state state // current overall status of the view
|
||||
conversation *models.Conversation
|
||||
rootMessages []models.Message
|
||||
messages []models.Message
|
||||
conversation *api.Conversation
|
||||
rootMessages []api.Message
|
||||
messages []api.Message
|
||||
selectedMessage int
|
||||
editorTarget editorTarget
|
||||
stopSignal chan struct{}
|
||||
replyChan chan models.Message
|
||||
replyChunkChan chan api.Chunk
|
||||
replyChan chan api.Message
|
||||
chatReplyChunks chan api.Chunk
|
||||
persistence bool // whether we will save new messages in the conversation
|
||||
|
||||
// ui state
|
||||
@@ -111,12 +112,12 @@ func Chat(shared shared.Shared) Model {
|
||||
Shared: shared,
|
||||
|
||||
state: idle,
|
||||
conversation: &models.Conversation{},
|
||||
conversation: &api.Conversation{},
|
||||
persistence: true,
|
||||
|
||||
stopSignal: make(chan struct{}),
|
||||
replyChan: make(chan models.Message),
|
||||
replyChunkChan: make(chan api.Chunk),
|
||||
stopSignal: make(chan struct{}),
|
||||
replyChan: make(chan api.Message),
|
||||
chatReplyChunks: make(chan api.Chunk),
|
||||
|
||||
wrap: true,
|
||||
selectedMessage: -1,
|
||||
@@ -144,8 +145,8 @@ func Chat(shared shared.Shared) Model {
|
||||
|
||||
system := shared.Ctx.GetSystemPrompt()
|
||||
if system != "" {
|
||||
m.messages = []models.Message{{
|
||||
Role: models.MessageRoleSystem,
|
||||
m.messages = []api.Message{{
|
||||
Role: api.MessageRoleSystem,
|
||||
Content: system,
|
||||
}}
|
||||
}
|
||||
@@ -166,6 +167,5 @@ func Chat(shared shared.Shared) Model {
|
||||
func (m Model) Init() tea.Cmd {
|
||||
return tea.Batch(
|
||||
m.waitForResponseChunk(),
|
||||
m.waitForResponse(),
|
||||
)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user