269 lines
7.3 KiB
Go
269 lines
7.3 KiB
Go
package chat
|
|
|
|
import (
|
|
"strings"
|
|
"time"
|
|
|
|
"git.mlow.ca/mlow/lmcli/pkg/api"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
|
tuiutil "git.mlow.ca/mlow/lmcli/pkg/tui/util"
|
|
"github.com/charmbracelet/bubbles/cursor"
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
)
|
|
|
|
func (m *Model) HandleResize(width, height int) {
|
|
m.Width, m.Height = width, height
|
|
m.content.Width = width
|
|
m.input.SetWidth(width - m.input.FocusedStyle.Base.GetHorizontalFrameSize())
|
|
if len(m.messages) > 0 {
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
}
|
|
}
|
|
|
|
func (m *Model) waitForResponseChunk() tea.Cmd {
|
|
return func() tea.Msg {
|
|
return msgChatResponseChunk(<-m.chatReplyChunks)
|
|
}
|
|
}
|
|
|
|
func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
|
var cmds []tea.Cmd
|
|
switch msg := msg.(type) {
|
|
case tea.WindowSizeMsg:
|
|
m.HandleResize(msg.Width, msg.Height)
|
|
case shared.MsgViewEnter:
|
|
// wake up spinners and cursors
|
|
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
|
|
|
|
if m.Shared.Values.ConvShortname != "" {
|
|
// (re)load conversation contents
|
|
cmds = append(cmds, m.loadConversation(m.Shared.Values.ConvShortname))
|
|
|
|
if m.conversation.ShortName.String != m.Shared.Values.ConvShortname {
|
|
// clear existing messages if we're loading a new conversation
|
|
m.messages = []api.Message{}
|
|
m.selectedMessage = 0
|
|
}
|
|
}
|
|
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
case tuiutil.MsgTempfileEditorClosed:
|
|
contents := string(msg)
|
|
switch m.editorTarget {
|
|
case input:
|
|
m.input.SetValue(contents)
|
|
case selectedMessage:
|
|
toEdit := m.messages[m.selectedMessage]
|
|
if toEdit.Content != contents {
|
|
toEdit.Content = contents
|
|
m.setMessage(m.selectedMessage, toEdit)
|
|
if m.persistence && toEdit.ID > 0 {
|
|
// create clone of message with its new contents
|
|
cmds = append(cmds, m.cloneMessage(toEdit, true))
|
|
}
|
|
}
|
|
}
|
|
case msgConversationLoaded:
|
|
m.conversation = msg.conversation
|
|
m.rootMessages = msg.rootMessages
|
|
m.selectedMessage = -1
|
|
if len(m.rootMessages) > 0 {
|
|
cmds = append(cmds, m.loadConversationMessages())
|
|
}
|
|
case msgMessagesLoaded:
|
|
m.messages = msg
|
|
if m.selectedMessage == -1 {
|
|
m.selectedMessage = len(msg) - 1
|
|
} else {
|
|
m.selectedMessage = min(m.selectedMessage, len(m.messages))
|
|
}
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
case msgChatResponseChunk:
|
|
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
|
|
|
|
if msg.Content == "" {
|
|
break
|
|
}
|
|
|
|
last := len(m.messages) - 1
|
|
if last >= 0 && m.messages[last].Role.IsAssistant() {
|
|
// append chunk to existing message
|
|
m.setMessageContents(last, m.messages[last].Content+msg.Content)
|
|
} else {
|
|
// use chunk in a new message
|
|
m.addMessage(api.Message{
|
|
Role: api.MessageRoleAssistant,
|
|
Content: msg.Content,
|
|
})
|
|
}
|
|
m.updateContent()
|
|
|
|
// show cursor and reset blink interval (simulate typing)
|
|
m.replyCursor.Blink = false
|
|
cmds = append(cmds, m.replyCursor.BlinkCmd())
|
|
|
|
m.tokenCount += msg.TokenCount
|
|
m.elapsed = time.Now().Sub(m.startTime)
|
|
case msgChatResponse:
|
|
m.state = idle
|
|
|
|
reply := (*api.Message)(msg)
|
|
reply.Content = strings.TrimSpace(reply.Content)
|
|
|
|
last := len(m.messages) - 1
|
|
if last < 0 {
|
|
panic("Unexpected empty messages handling msgAssistantReply")
|
|
}
|
|
|
|
if m.messages[last].Role.IsAssistant() {
|
|
// TODO: handle continuations gracefully - some models support them well, others fail horribly.
|
|
m.setMessage(last, *reply)
|
|
} else {
|
|
m.addMessage(*reply)
|
|
}
|
|
|
|
switch reply.Role {
|
|
case api.MessageRoleToolCall:
|
|
// TODO: user confirmation before execution
|
|
// m.state = waitingForConfirmation
|
|
cmds = append(cmds, m.executeToolCalls(reply.ToolCalls))
|
|
}
|
|
|
|
if m.persistence {
|
|
cmds = append(cmds, m.persistConversation())
|
|
}
|
|
|
|
if m.conversation.Title == "" {
|
|
cmds = append(cmds, m.generateConversationTitle())
|
|
}
|
|
|
|
m.updateContent()
|
|
case msgChatResponseCanceled:
|
|
m.state = idle
|
|
m.updateContent()
|
|
case msgChatResponseError:
|
|
m.state = idle
|
|
m.Shared.Err = error(msg)
|
|
m.updateContent()
|
|
case msgToolResults:
|
|
last := len(m.messages) - 1
|
|
if last < 0 {
|
|
panic("Unexpected empty messages handling msgAssistantReply")
|
|
}
|
|
|
|
if m.messages[last].Role != api.MessageRoleToolCall {
|
|
panic("Previous message not a tool call, unexpected")
|
|
}
|
|
|
|
m.addMessage(api.Message{
|
|
Role: api.MessageRoleToolResult,
|
|
ToolResults: api.ToolResults(msg),
|
|
})
|
|
|
|
if m.persistence {
|
|
cmds = append(cmds, m.persistConversation())
|
|
}
|
|
|
|
m.updateContent()
|
|
case msgConversationTitleGenerated:
|
|
title := string(msg)
|
|
m.conversation.Title = title
|
|
if m.persistence {
|
|
cmds = append(cmds, m.updateConversationTitle(m.conversation))
|
|
}
|
|
case cursor.BlinkMsg:
|
|
if m.state == pendingResponse {
|
|
// ensure we show the updated "wait for response" cursor blink state
|
|
last := len(m.messages)-1
|
|
m.messageCache[last] = m.renderMessage(last)
|
|
m.updateContent()
|
|
}
|
|
case msgConversationPersisted:
|
|
m.conversation = msg.conversation
|
|
m.messages = msg.messages
|
|
if msg.isNew {
|
|
m.rootMessages = []api.Message{m.messages[0]}
|
|
}
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
case msgMessageCloned:
|
|
if msg.Parent == nil {
|
|
m.conversation = msg.Conversation
|
|
m.rootMessages = append(m.rootMessages, *msg)
|
|
}
|
|
cmds = append(cmds, m.loadConversationMessages())
|
|
case msgSelectedRootCycled, msgSelectedReplyCycled, msgMessageUpdated:
|
|
cmds = append(cmds, m.loadConversationMessages())
|
|
}
|
|
|
|
var cmd tea.Cmd
|
|
m.spinner, cmd = m.spinner.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
m.replyCursor, cmd = m.replyCursor.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
|
|
prevInputLineCnt := m.input.LineCount()
|
|
inputCaptured := false
|
|
m.input, cmd = m.input.Update(msg)
|
|
if cmd != nil {
|
|
inputCaptured = true
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
|
|
if !inputCaptured {
|
|
m.content, cmd = m.content.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
}
|
|
|
|
// update views once window dimensions are known
|
|
if m.Width > 0 {
|
|
m.Header = m.headerView()
|
|
m.Footer = m.footerView()
|
|
m.Error = tuiutil.ErrorBanner(m.Err, m.Width)
|
|
fixedHeight := tuiutil.Height(m.Header) + tuiutil.Height(m.Error) + tuiutil.Height(m.Footer)
|
|
|
|
// calculate clamped input height to accomodate input text
|
|
// minimum 4 lines, maximum half of content area
|
|
newHeight := max(4, min((m.Height-fixedHeight-1)/2, m.input.LineCount()))
|
|
m.input.SetHeight(newHeight)
|
|
m.Input = m.input.View()
|
|
|
|
// remaining height towards content
|
|
m.content.Height = m.Height - fixedHeight - tuiutil.Height(m.Input)
|
|
m.Content = m.content.View()
|
|
}
|
|
|
|
// this is a pretty nasty hack to ensure the input area viewport doesn't
|
|
// scroll below its content, which can happen when the input viewport
|
|
// height has grown, or previously entered lines have been deleted
|
|
if prevInputLineCnt != m.input.LineCount() {
|
|
// dist is the distance we'd need to scroll up from the current cursor
|
|
// position to position the last input line at the bottom of the
|
|
// viewport. if negative, we're already scrolled above the bottom
|
|
dist := m.input.Line() - (m.input.LineCount() - m.input.Height())
|
|
if dist > 0 {
|
|
for i := 0; i < dist; i++ {
|
|
// move cursor up until content reaches the bottom of the viewport
|
|
m.input.CursorUp()
|
|
}
|
|
m.input, _ = m.input.Update(nil)
|
|
for i := 0; i < dist; i++ {
|
|
// move cursor back down to its previous position
|
|
m.input.CursorDown()
|
|
}
|
|
m.input, _ = m.input.Update(nil)
|
|
}
|
|
}
|
|
|
|
return m, tea.Batch(cmds...)
|
|
}
|