From 3536438dd11081e7582590feac346c4fc6d7c5f4 Mon Sep 17 00:00:00 2001 From: Matt Low Date: Wed, 22 May 2024 16:25:16 +0000 Subject: [PATCH] Add cursor to indicate the assistant is responding A better/more natural indication that the model is doing something --- pkg/tui/chat.go | 98 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 80 insertions(+), 18 deletions(-) diff --git a/pkg/tui/chat.go b/pkg/tui/chat.go index 21360df..71f77cb 100644 --- a/pkg/tui/chat.go +++ b/pkg/tui/chat.go @@ -9,6 +9,7 @@ import ( cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util" models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model" + "github.com/charmbracelet/bubbles/cursor" "github.com/charmbracelet/bubbles/spinner" "github.com/charmbracelet/bubbles/textarea" "github.com/charmbracelet/bubbles/viewport" @@ -79,9 +80,10 @@ type chatModel struct { messageOffsets []int // ui elements - content viewport.Model - input textarea.Model - spinner spinner.Model + content viewport.Model + input textarea.Model + spinner spinner.Model + replyCursor cursor.Model // cursor to indicate incoming response } func newChatModel(tui *model) chatModel { @@ -117,8 +119,12 @@ func newChatModel(tui *model) chatModel { FPS: time.Second / 3, }, )), + replyCursor: cursor.New(), } + m.replyCursor.SetChar(" ") + m.replyCursor.Focus() + system := tui.ctx.GetSystemPrompt() if system != "" { m.messages = []models.Message{{ @@ -220,8 +226,6 @@ func (m *chatModel) handleInput(msg tea.KeyMsg) (bool, tea.Cmd) { func (m chatModel) Init() tea.Cmd { return tea.Batch( - textarea.Blink, - m.spinner.Tick, m.waitForChunk(), m.waitForReply(), ) @@ -241,9 +245,13 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) { var cmds []tea.Cmd switch msg := msg.(type) { case msgStateEnter: + // wake up spinners and cursors + cmds = append(cmds, cursor.Blink, m.spinner.Tick) + if m.opts.convShortname != "" && m.conversation.ShortName.String != m.opts.convShortname { cmds = append(cmds, m.loadConversation(m.opts.convShortname)) } + m.rebuildMessageCache() m.updateContent() case tea.WindowSizeMsg: @@ -275,10 +283,16 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) { m.content.GotoBottom() case msgResponseChunk: chunk := string(msg) + if chunk == "" { + break + } + last := len(m.messages) - 1 if last >= 0 && m.messages[last].Role.IsAssistant() { + // append chunk to existing message m.setMessageContents(last, m.messages[last].Content+chunk) } else { + // use chunk in new message m.addMessage(models.Message{ Role: models.MessageRoleAssistant, Content: chunk, @@ -287,6 +301,10 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) { m.updateContent() cmds = append(cmds, m.waitForChunk()) // wait for the next chunk + // show cursor and reset blink interval (simulate typing) + m.replyCursor.Blink = false + cmds = append(cmds, m.replyCursor.BlinkCmd()) + m.tokenCount++ m.elapsed = time.Now().Sub(m.startTime) case msgAssistantReply: @@ -332,6 +350,7 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) { m.waitingForReply = false m.status = "Press ctrl+s to send" m.err = error(msg) + m.updateContent() case msgConversationTitleChanged: title := string(msg) m.conversation.Title = title @@ -341,6 +360,11 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) { cmds = append(cmds, wrapError(err)) } } + case cursor.BlinkMsg: + if m.waitingForReply { + // ensure we show updated "wait for response" cursor blink state + m.updateContent() + } } var cmd tea.Cmd @@ -348,6 +372,10 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) { if cmd != nil { cmds = append(cmds, cmd) } + m.replyCursor, cmd = m.replyCursor.Update(msg) + if cmd != nil { + cmds = append(cmds, cmd) + } prevInputLineCnt := m.input.LineCount() inputCaptured := false @@ -442,9 +470,10 @@ func (m *chatModel) handleMessagesKey(msg tea.KeyMsg) (bool, tea.Cmd) { } m.messages = m.messages[:m.selectedMessage+1] m.messageCache = m.messageCache[:m.selectedMessage+1] + cmd := m.promptLLM() m.updateContent() m.content.GotoBottom() - return true, m.promptLLM() + return true, cmd } return false, nil } @@ -464,6 +493,10 @@ func (m *chatModel) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) { m.input.Blur() return true, nil case "ctrl+s": + if m.waitingForReply { + return false, nil + } + input := strings.TrimSpace(m.input.Value()) if input == "" { return true, nil @@ -487,9 +520,10 @@ func (m *chatModel) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) { } } + cmd := m.promptLLM() m.updateContent() m.content.GotoBottom() - return true, m.promptLLM() + return true, cmd case "ctrl+e": cmd := openTempfileEditor("message.*.md", m.input.Value(), "# Edit your input below\n") m.editorTarget = input @@ -536,7 +570,10 @@ func (m *chatModel) renderMessageHeading(i int, message *models.Message) string return messageHeadingStyle.Render(prefix + user + suffix) } -func (m *chatModel) renderMessage(msg *models.Message) string { +func (m *chatModel) renderMessage(i int) string { + msg := &m.messages[i] + + // Write message contents sb := &strings.Builder{} sb.Grow(len(msg.Content) * 2) if msg.Content != "" { @@ -547,6 +584,12 @@ func (m *chatModel) renderMessage(msg *models.Message) string { } } + // Show the assistant's cursor + if m.waitingForReply && i == len(m.messages)-1 { + sb.WriteString(m.replyCursor.View()) + } + + // Write tool call info var toolString string switch msg.Role { case models.MessageRoleToolCall: @@ -636,10 +679,18 @@ func (m *chatModel) conversationMessagesView() string { sb.WriteString("\n") lineCnt += lipgloss.Height(heading) - cached := m.messageCache[i] - sb.WriteString(cached) + var rendered string + if m.waitingForReply && i == len(m.messages)-1 { + // do a direct render of final (assistant) message to handle the + // assistant cursor blink + rendered = m.renderMessage(i) + } else { + rendered = m.messageCache[i] + } + + sb.WriteString(rendered) sb.WriteString("\n") - lineCnt += lipgloss.Height(cached) + lineCnt += lipgloss.Height(rendered) } return sb.String() @@ -712,12 +763,12 @@ func (m *chatModel) setMessage(i int, msg models.Message) { panic("i out of range") } m.messages[i] = msg - m.messageCache[i] = m.renderMessage(&msg) + m.messageCache[i] = m.renderMessage(i) } func (m *chatModel) addMessage(msg models.Message) { m.messages = append(m.messages, msg) - m.messageCache = append(m.messageCache, m.renderMessage(&msg)) + m.messageCache = append(m.messageCache, m.renderMessage(len(m.messages)-1)) } func (m *chatModel) setMessageContents(i int, content string) { @@ -725,13 +776,13 @@ func (m *chatModel) setMessageContents(i int, content string) { panic("i out of range") } m.messages[i].Content = content - m.messageCache[i] = m.renderMessage(&m.messages[i]) + m.messageCache[i] = m.renderMessage(i) } func (m *chatModel) rebuildMessageCache() { m.messageCache = make([]string, len(m.messages)) - for i, msg := range m.messages { - m.messageCache[i] = m.renderMessage(&msg) + for i := range m.messages { + m.messageCache[i] = m.renderMessage(i) } } @@ -792,7 +843,7 @@ func (m *chatModel) persistConversation() error { if err != nil { return err } - } else if i > 0 { + } else if i > 0 { // messages is new, so add it as a reply to previous message saved, err := m.ctx.Store.Reply(&m.messages[i-1], m.messages[i]) if err != nil { @@ -833,8 +884,19 @@ func (m *chatModel) waitForChunk() tea.Cmd { func (m *chatModel) promptLLM() tea.Cmd { m.waitingForReply = true + m.replyCursor.Blink = false m.status = "Press ctrl+c to cancel" + toPrompt := m.messages + + // Add response placeholder message + if m.messages[len(m.messages)-1].Role != models.MessageRoleAssistant { + m.addMessage(models.Message{ + Role: models.MessageRoleAssistant, + Content: "", + }) + } + m.tokenCount = 0 m.startTime = time.Now() m.elapsed = 0 @@ -868,7 +930,7 @@ func (m *chatModel) promptLLM() tea.Cmd { }() resp, err := completionProvider.CreateChatCompletionStream( - ctx, requestParams, m.messages, replyHandler, m.replyChunkChan, + ctx, requestParams, toPrompt, replyHandler, m.replyChunkChan, ) if err != nil && !canceled {