package chat import ( "time" "git.mlow.ca/mlow/lmcli/pkg/api" "git.mlow.ca/mlow/lmcli/pkg/conversation" "git.mlow.ca/mlow/lmcli/pkg/provider" "git.mlow.ca/mlow/lmcli/pkg/tui/model" "git.mlow.ca/mlow/lmcli/pkg/tui/shared" tea "github.com/charmbracelet/bubbletea" ) func (m *Model) waitForResponseChunk() tea.Cmd { return func() tea.Msg { return msgChatResponseChunk(<-m.chatReplyChunks) } } func (m *Model) loadConversationMessages() tea.Cmd { return func() tea.Msg { messages, err := m.App.LoadConversationMessages() if err != nil { return shared.AsMsgError(err) } return msgConversationMessagesLoaded{messages} } } func (m *Model) generateConversationTitle() tea.Cmd { return func() tea.Msg { title, err := m.App.GenerateConversationTitle(m.App.Messages) if err != nil { return shared.AsMsgError(err) } return msgConversationTitleGenerated(title) } } func (m *Model) cloneMessage(message conversation.Message, selected bool) tea.Cmd { return func() tea.Msg { msg, err := m.App.CloneMessage(message, selected) if err != nil { return shared.WrapError(err) } return msgMessageCloned(msg) } } func (m *Model) updateMessageContent(message *conversation.Message) tea.Cmd { return func() tea.Msg { err := m.App.UpdateMessageContent(message) if err != nil { return shared.WrapError(err) } return msgMessageUpdated(message) } } func (m *Model) cycleSelectedRoot(conv *conversation.Conversation, dir model.MessageCycleDirection) tea.Cmd { if len(conv.RootMessages) < 2 { return nil } return func() tea.Msg { nextRoot, err := m.App.CycleSelectedRoot(conv, dir) if err != nil { return shared.WrapError(err) } return msgSelectedRootCycled(nextRoot) } } func (m *Model) cycleSelectedReply(message *conversation.Message, dir model.MessageCycleDirection) tea.Cmd { if len(message.Replies) < 2 { return nil } return func() tea.Msg { nextReply, err := m.App.CycleSelectedReply(message, dir) if err != nil { return shared.WrapError(err) } return msgSelectedReplyCycled(nextReply) } } func (m *Model) persistConversation() tea.Cmd { return func() tea.Msg { conversation, err := m.App.PersistConversation() if err != nil { return shared.AsMsgError(err) } return msgConversationPersisted(conversation) } } func (m *Model) persistMessages() tea.Cmd { return func() tea.Msg { messages, err := m.App.PersistMessages() if err != nil { return shared.AsMsgError(err) } return msgMessagesPersisted(messages) } } func (m *Model) executeToolCalls(toolCalls []api.ToolCall) tea.Cmd { return func() tea.Msg { results, err := m.App.ExecuteToolCalls(toolCalls) if err != nil { return shared.AsMsgError(err) } return msgToolResults(results) } } func (m *Model) promptLLM() tea.Cmd { m.state = pendingResponse m.spinner = getSpinner() m.replyCursor.Blink = false m.startTime = time.Now() m.elapsed = 0 m.tokenCount = 0 return tea.Batch( m.spinner.Tick, func() tea.Msg { resp, err := m.App.Prompt(m.App.Messages, m.chatReplyChunks, m.stopSignal) // These empty chunk sends prevent a race condition where a final // chunk may be received on m.chatReplyChunks after the // msgChatResponse message is handled, resulting in that chunk // appearing twice at the end of the final output // One send reduces the frequency of the race, two seems to // eliminate it m.chatReplyChunks <- provider.Chunk{} m.chatReplyChunks <- provider.Chunk{} if err != nil { return msgChatResponseError{Err: err} } return msgChatResponse(*resp) }, ) }