Matt Low
0384c7cb66
This refactor splits out all conversation concerns into a new `conversation` package. There is now a split between `conversation` and `api`s representation of `Message`, the latter storing the minimum information required for interaction with LLM providers. There is necessary conversation between the two when making LLM calls.
275 lines
7.2 KiB
Go
275 lines
7.2 KiB
Go
package chat
|
|
|
|
import (
|
|
"strings"
|
|
"time"
|
|
|
|
"git.mlow.ca/mlow/lmcli/pkg/api"
|
|
"git.mlow.ca/mlow/lmcli/pkg/conversation"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
|
tuiutil "git.mlow.ca/mlow/lmcli/pkg/tui/util"
|
|
"github.com/charmbracelet/bubbles/cursor"
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
)
|
|
|
|
func (m *Model) setMessage(i int, msg conversation.Message) {
|
|
if i >= len(m.App.Messages) {
|
|
panic("i out of range")
|
|
}
|
|
m.App.Messages[i] = msg
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
|
|
func (m *Model) addMessage(msg conversation.Message) {
|
|
m.App.Messages = append(m.App.Messages, msg)
|
|
m.messageCache = append(m.messageCache, m.renderMessage(len(m.App.Messages)-1))
|
|
}
|
|
|
|
func (m *Model) setMessageContents(i int, content string) {
|
|
if i >= len(m.App.Messages) {
|
|
panic("i out of range")
|
|
}
|
|
m.App.Messages[i].Content = content
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
|
|
func (m *Model) rebuildMessageCache() {
|
|
m.messageCache = make([]string, len(m.App.Messages))
|
|
for i := range m.App.Messages {
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
}
|
|
|
|
func (m *Model) updateContent() {
|
|
atBottom := m.content.AtBottom()
|
|
m.content.SetContent(m.conversationMessagesView())
|
|
if atBottom {
|
|
m.content.GotoBottom()
|
|
}
|
|
}
|
|
|
|
func (m *Model) Update(msg tea.Msg) (shared.ViewModel, tea.Cmd) {
|
|
inputHandled := false
|
|
|
|
var cmds []tea.Cmd
|
|
switch msg := msg.(type) {
|
|
case tea.KeyMsg:
|
|
cmd := m.handleInput(msg)
|
|
if cmd != nil {
|
|
inputHandled = true
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
case tea.WindowSizeMsg:
|
|
m.Width, m.Height = msg.Width, msg.Height
|
|
m.content.Width = msg.Width
|
|
m.input.SetWidth(msg.Width - m.input.FocusedStyle.Base.GetHorizontalFrameSize())
|
|
if len(m.App.Messages) > 0 {
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
}
|
|
case shared.MsgViewEnter:
|
|
// wake up spinners and cursors
|
|
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
|
|
|
|
// Refresh view
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
|
|
if m.App.Conversation != nil && m.App.Conversation.ID > 0 {
|
|
// (re)load conversation contents
|
|
cmds = append(cmds, m.loadConversationMessages())
|
|
}
|
|
case tuiutil.MsgTempfileEditorClosed:
|
|
contents := string(msg)
|
|
switch m.editorTarget {
|
|
case input:
|
|
m.input.SetValue(contents)
|
|
case selectedMessage:
|
|
toEdit := m.App.Messages[m.selectedMessage]
|
|
if toEdit.Content != contents {
|
|
toEdit.Content = contents
|
|
m.setMessage(m.selectedMessage, toEdit)
|
|
if m.persistence && toEdit.ID > 0 {
|
|
// create clone of message with its new contents
|
|
cmds = append(cmds, m.cloneMessage(toEdit, true))
|
|
}
|
|
}
|
|
}
|
|
case msgConversationMessagesLoaded:
|
|
m.App.Messages = msg.messages
|
|
if m.selectedMessage == -1 {
|
|
m.selectedMessage = len(msg.messages) - 1
|
|
} else {
|
|
m.selectedMessage = min(m.selectedMessage, len(m.App.Messages))
|
|
}
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
case msgChatResponseChunk:
|
|
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
|
|
|
|
if msg.Content == "" {
|
|
break
|
|
}
|
|
|
|
last := len(m.App.Messages) - 1
|
|
if last >= 0 && m.App.Messages[last].Role.IsAssistant() {
|
|
// append chunk to existing message
|
|
m.setMessageContents(last, m.App.Messages[last].Content+msg.Content)
|
|
} else {
|
|
// use chunk in a new message
|
|
m.addMessage(conversation.Message{
|
|
Role: api.MessageRoleAssistant,
|
|
Content: msg.Content,
|
|
})
|
|
}
|
|
m.updateContent()
|
|
|
|
// show cursor and reset blink interval (simulate typing)
|
|
m.replyCursor.Blink = false
|
|
cmds = append(cmds, m.replyCursor.BlinkCmd())
|
|
|
|
m.tokenCount += msg.TokenCount
|
|
m.elapsed = time.Now().Sub(m.startTime)
|
|
case msgChatResponse:
|
|
m.state = idle
|
|
|
|
reply := (*conversation.Message)(msg)
|
|
reply.Content = strings.TrimSpace(reply.Content)
|
|
|
|
last := len(m.App.Messages) - 1
|
|
if last < 0 {
|
|
panic("Unexpected empty messages handling msgAssistantReply")
|
|
}
|
|
|
|
if m.App.Messages[last].Role.IsAssistant() {
|
|
// TODO: handle continuations gracefully - some models support them well, others fail horribly.
|
|
m.setMessage(last, *reply)
|
|
} else {
|
|
m.addMessage(*reply)
|
|
}
|
|
|
|
switch reply.Role {
|
|
case api.MessageRoleToolCall:
|
|
// TODO: user confirmation before execution
|
|
// m.state = waitingForConfirmation
|
|
cmds = append(cmds, m.executeToolCalls(reply.ToolCalls))
|
|
}
|
|
|
|
if m.persistence {
|
|
cmds = append(cmds, m.persistConversation())
|
|
}
|
|
|
|
if m.App.Conversation.Title == "" {
|
|
cmds = append(cmds, m.generateConversationTitle())
|
|
}
|
|
|
|
m.updateContent()
|
|
case msgChatResponseCanceled:
|
|
m.state = idle
|
|
m.updateContent()
|
|
case msgChatResponseError:
|
|
m.state = idle
|
|
m.updateContent()
|
|
return m, shared.WrapError(msg.Err)
|
|
case msgToolResults:
|
|
last := len(m.App.Messages) - 1
|
|
if last < 0 {
|
|
panic("Unexpected empty messages handling msgAssistantReply")
|
|
}
|
|
|
|
if m.App.Messages[last].Role != api.MessageRoleToolCall {
|
|
panic("Previous message not a tool call, unexpected")
|
|
}
|
|
|
|
m.addMessage(conversation.Message{
|
|
Role: api.MessageRoleToolResult,
|
|
ToolResults: conversation.ToolResults(msg),
|
|
})
|
|
|
|
if m.persistence {
|
|
cmds = append(cmds, m.persistConversation())
|
|
}
|
|
|
|
m.updateContent()
|
|
case msgConversationTitleGenerated:
|
|
title := string(msg)
|
|
m.App.Conversation.Title = title
|
|
if m.persistence {
|
|
cmds = append(cmds, m.updateConversationTitle(m.App.Conversation))
|
|
}
|
|
case cursor.BlinkMsg:
|
|
if m.state == pendingResponse {
|
|
// ensure we show the updated "wait for response" cursor blink state
|
|
last := len(m.App.Messages) - 1
|
|
m.messageCache[last] = m.renderMessage(last)
|
|
m.updateContent()
|
|
}
|
|
case msgConversationPersisted:
|
|
m.App.Conversation = msg.conversation
|
|
m.App.Messages = msg.messages
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
case msgMessageCloned:
|
|
if msg.Parent == nil {
|
|
m.App.Conversation = msg.Conversation
|
|
}
|
|
cmds = append(cmds, m.loadConversationMessages())
|
|
case msgSelectedRootCycled, msgSelectedReplyCycled, msgMessageUpdated:
|
|
cmds = append(cmds, m.loadConversationMessages())
|
|
}
|
|
|
|
var cmd tea.Cmd
|
|
m.spinner, cmd = m.spinner.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
m.replyCursor, cmd = m.replyCursor.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
|
|
prevInputLineCnt := m.input.LineCount()
|
|
|
|
if !inputHandled {
|
|
m.input, cmd = m.input.Update(msg)
|
|
if cmd != nil {
|
|
inputHandled = true
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
}
|
|
|
|
if !inputHandled {
|
|
m.content, cmd = m.content.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
}
|
|
|
|
// this is a pretty nasty hack to ensure the input area viewport doesn't
|
|
// scroll below its content, which can happen when the input viewport
|
|
// height has grown, or previously entered lines have been deleted
|
|
if prevInputLineCnt != m.input.LineCount() {
|
|
// dist is the distance we'd need to scroll up from the current cursor
|
|
// position to position the last input line at the bottom of the
|
|
// viewport. if negative, we're already scrolled above the bottom
|
|
dist := m.input.Line() - (m.input.LineCount() - m.input.Height())
|
|
if dist > 0 {
|
|
for i := 0; i < dist; i++ {
|
|
// move cursor up until content reaches the bottom of the viewport
|
|
m.input.CursorUp()
|
|
}
|
|
m.input, _ = m.input.Update(nil)
|
|
for i := 0; i < dist; i++ {
|
|
// move cursor back down to its previous position
|
|
m.input.CursorDown()
|
|
}
|
|
m.input, _ = m.input.Update(nil)
|
|
}
|
|
}
|
|
|
|
if len(cmds) > 0 {
|
|
return m, tea.Batch(cmds...)
|
|
}
|
|
return m, nil
|
|
}
|