lmcli/pkg/tui/views/chat/update.go
Matt Low 676aa7b004 Refactor TUI rendering handling and general cleanup
Improves render handling by moving the responsibility of laying out the
whole UI from each view and into the main `tui` model. Our `ViewModel`
interface has now diverged from bubbletea's `Model` and introduces
individual `Header`, `Content`, and `Footer` methods for rendering those
UI elements.

Also moved away from using value receivers on our Update and View
functions (as is common across Bubbletea) to pointer receivers, which
cleaned up some of the weirder aspects of the code (e.g. before we
essentially had no choice but to do our rendering in `Update` in order
to calculate and update the final height of the main content's
`viewport`).
2024-09-23 02:49:08 +00:00

279 lines
7.3 KiB
Go

package chat
import (
"strings"
"time"
"git.mlow.ca/mlow/lmcli/pkg/api"
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
tuiutil "git.mlow.ca/mlow/lmcli/pkg/tui/util"
"github.com/charmbracelet/bubbles/cursor"
tea "github.com/charmbracelet/bubbletea"
)
func (m *Model) setMessage(i int, msg api.Message) {
if i >= len(m.App.Messages) {
panic("i out of range")
}
m.App.Messages[i] = msg
m.messageCache[i] = m.renderMessage(i)
}
func (m *Model) addMessage(msg api.Message) {
m.App.Messages = append(m.App.Messages, msg)
m.messageCache = append(m.messageCache, m.renderMessage(len(m.App.Messages)-1))
}
func (m *Model) setMessageContents(i int, content string) {
if i >= len(m.App.Messages) {
panic("i out of range")
}
m.App.Messages[i].Content = content
m.messageCache[i] = m.renderMessage(i)
}
func (m *Model) rebuildMessageCache() {
m.messageCache = make([]string, len(m.App.Messages))
for i := range m.App.Messages {
m.messageCache[i] = m.renderMessage(i)
}
}
func (m *Model) updateContent() {
atBottom := m.content.AtBottom()
m.content.SetContent(m.conversationMessagesView())
if atBottom {
m.content.GotoBottom()
}
}
func (m *Model) Update(msg tea.Msg) (shared.ViewModel, tea.Cmd) {
inputHandled := false
var cmds []tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
cmd := m.handleInput(msg)
if cmd != nil {
inputHandled = true
cmds = append(cmds, cmd)
}
case tea.WindowSizeMsg:
m.Width, m.Height = msg.Width, msg.Height
m.content.Width = msg.Width
m.input.SetWidth(msg.Width - m.input.FocusedStyle.Base.GetHorizontalFrameSize())
if len(m.App.Messages) > 0 {
m.rebuildMessageCache()
m.updateContent()
}
case shared.MsgViewEnter:
// wake up spinners and cursors
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
// Refresh view
m.rebuildMessageCache()
m.updateContent()
if m.App.Conversation != nil && m.App.Conversation.ID > 0 {
// (re)load conversation contents
cmds = append(cmds, m.loadConversationMessages())
}
case tuiutil.MsgTempfileEditorClosed:
contents := string(msg)
switch m.editorTarget {
case input:
m.input.SetValue(contents)
case selectedMessage:
toEdit := m.App.Messages[m.selectedMessage]
if toEdit.Content != contents {
toEdit.Content = contents
m.setMessage(m.selectedMessage, toEdit)
if m.persistence && toEdit.ID > 0 {
// create clone of message with its new contents
cmds = append(cmds, m.cloneMessage(toEdit, true))
}
}
}
case msgConversationMessagesLoaded:
m.App.RootMessages = msg.rootMessages
m.App.Messages = msg.messages
if m.selectedMessage == -1 {
m.selectedMessage = len(msg.messages) - 1
} else {
m.selectedMessage = min(m.selectedMessage, len(m.App.Messages))
}
m.rebuildMessageCache()
m.updateContent()
case msgChatResponseChunk:
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
if msg.Content == "" {
break
}
last := len(m.App.Messages) - 1
if last >= 0 && m.App.Messages[last].Role.IsAssistant() {
// append chunk to existing message
m.setMessageContents(last, m.App.Messages[last].Content+msg.Content)
} else {
// use chunk in a new message
m.addMessage(api.Message{
Role: api.MessageRoleAssistant,
Content: msg.Content,
})
}
m.updateContent()
// show cursor and reset blink interval (simulate typing)
m.replyCursor.Blink = false
cmds = append(cmds, m.replyCursor.BlinkCmd())
m.tokenCount += msg.TokenCount
m.elapsed = time.Now().Sub(m.startTime)
case msgChatResponse:
m.state = idle
reply := (*api.Message)(msg)
reply.Content = strings.TrimSpace(reply.Content)
last := len(m.App.Messages) - 1
if last < 0 {
panic("Unexpected empty messages handling msgAssistantReply")
}
if m.App.Messages[last].Role.IsAssistant() {
// TODO: handle continuations gracefully - some models support them well, others fail horribly.
m.setMessage(last, *reply)
} else {
m.addMessage(*reply)
}
switch reply.Role {
case api.MessageRoleToolCall:
// TODO: user confirmation before execution
// m.state = waitingForConfirmation
cmds = append(cmds, m.executeToolCalls(reply.ToolCalls))
}
if m.persistence {
cmds = append(cmds, m.persistConversation())
}
if m.App.Conversation.Title == "" {
cmds = append(cmds, m.generateConversationTitle())
}
m.updateContent()
case msgChatResponseCanceled:
m.state = idle
m.updateContent()
case msgChatResponseError:
m.state = idle
m.updateContent()
return m, shared.WrapError(msg)
case msgToolResults:
last := len(m.App.Messages) - 1
if last < 0 {
panic("Unexpected empty messages handling msgAssistantReply")
}
if m.App.Messages[last].Role != api.MessageRoleToolCall {
panic("Previous message not a tool call, unexpected")
}
m.addMessage(api.Message{
Role: api.MessageRoleToolResult,
ToolResults: api.ToolResults(msg),
})
if m.persistence {
cmds = append(cmds, m.persistConversation())
}
m.updateContent()
case msgConversationTitleGenerated:
title := string(msg)
m.App.Conversation.Title = title
if m.persistence {
cmds = append(cmds, m.updateConversationTitle(m.App.Conversation))
}
case cursor.BlinkMsg:
if m.state == pendingResponse {
// ensure we show the updated "wait for response" cursor blink state
last := len(m.App.Messages) - 1
m.messageCache[last] = m.renderMessage(last)
m.updateContent()
}
case msgConversationPersisted:
m.App.Conversation = msg.conversation
m.App.Messages = msg.messages
if msg.isNew {
m.App.RootMessages = []api.Message{m.App.Messages[0]}
}
m.rebuildMessageCache()
m.updateContent()
case msgMessageCloned:
if msg.Parent == nil {
m.App.Conversation = msg.Conversation
m.App.RootMessages = append(m.App.RootMessages, *msg)
}
cmds = append(cmds, m.loadConversationMessages())
case msgSelectedRootCycled, msgSelectedReplyCycled, msgMessageUpdated:
cmds = append(cmds, m.loadConversationMessages())
}
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
if cmd != nil {
cmds = append(cmds, cmd)
}
m.replyCursor, cmd = m.replyCursor.Update(msg)
if cmd != nil {
cmds = append(cmds, cmd)
}
prevInputLineCnt := m.input.LineCount()
if !inputHandled {
m.input, cmd = m.input.Update(msg)
if cmd != nil {
inputHandled = true
cmds = append(cmds, cmd)
}
}
if !inputHandled {
m.content, cmd = m.content.Update(msg)
if cmd != nil {
cmds = append(cmds, cmd)
}
}
// this is a pretty nasty hack to ensure the input area viewport doesn't
// scroll below its content, which can happen when the input viewport
// height has grown, or previously entered lines have been deleted
if prevInputLineCnt != m.input.LineCount() {
// dist is the distance we'd need to scroll up from the current cursor
// position to position the last input line at the bottom of the
// viewport. if negative, we're already scrolled above the bottom
dist := m.input.Line() - (m.input.LineCount() - m.input.Height())
if dist > 0 {
for i := 0; i < dist; i++ {
// move cursor up until content reaches the bottom of the viewport
m.input.CursorUp()
}
m.input, _ = m.input.Update(nil)
for i := 0; i < dist; i++ {
// move cursor back down to its previous position
m.input.CursorDown()
}
m.input, _ = m.input.Update(nil)
}
}
if len(cmds) > 0 {
return m, tea.Batch(cmds...)
}
return m, nil
}