Add cursor to indicate the assistant is responding

A better/more natural indication that the model is doing something
This commit is contained in:
Matt Low 2024-05-22 16:25:16 +00:00
parent f5ce970102
commit 3536438dd1

View File

@ -9,6 +9,7 @@ import (
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util" cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model" models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/charmbracelet/bubbles/cursor"
"github.com/charmbracelet/bubbles/spinner" "github.com/charmbracelet/bubbles/spinner"
"github.com/charmbracelet/bubbles/textarea" "github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport" "github.com/charmbracelet/bubbles/viewport"
@ -79,9 +80,10 @@ type chatModel struct {
messageOffsets []int messageOffsets []int
// ui elements // ui elements
content viewport.Model content viewport.Model
input textarea.Model input textarea.Model
spinner spinner.Model spinner spinner.Model
replyCursor cursor.Model // cursor to indicate incoming response
} }
func newChatModel(tui *model) chatModel { func newChatModel(tui *model) chatModel {
@ -117,8 +119,12 @@ func newChatModel(tui *model) chatModel {
FPS: time.Second / 3, FPS: time.Second / 3,
}, },
)), )),
replyCursor: cursor.New(),
} }
m.replyCursor.SetChar(" ")
m.replyCursor.Focus()
system := tui.ctx.GetSystemPrompt() system := tui.ctx.GetSystemPrompt()
if system != "" { if system != "" {
m.messages = []models.Message{{ m.messages = []models.Message{{
@ -220,8 +226,6 @@ func (m *chatModel) handleInput(msg tea.KeyMsg) (bool, tea.Cmd) {
func (m chatModel) Init() tea.Cmd { func (m chatModel) Init() tea.Cmd {
return tea.Batch( return tea.Batch(
textarea.Blink,
m.spinner.Tick,
m.waitForChunk(), m.waitForChunk(),
m.waitForReply(), m.waitForReply(),
) )
@ -241,9 +245,13 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
var cmds []tea.Cmd var cmds []tea.Cmd
switch msg := msg.(type) { switch msg := msg.(type) {
case msgStateEnter: case msgStateEnter:
// wake up spinners and cursors
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
if m.opts.convShortname != "" && m.conversation.ShortName.String != m.opts.convShortname { if m.opts.convShortname != "" && m.conversation.ShortName.String != m.opts.convShortname {
cmds = append(cmds, m.loadConversation(m.opts.convShortname)) cmds = append(cmds, m.loadConversation(m.opts.convShortname))
} }
m.rebuildMessageCache() m.rebuildMessageCache()
m.updateContent() m.updateContent()
case tea.WindowSizeMsg: case tea.WindowSizeMsg:
@ -275,10 +283,16 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
m.content.GotoBottom() m.content.GotoBottom()
case msgResponseChunk: case msgResponseChunk:
chunk := string(msg) chunk := string(msg)
if chunk == "" {
break
}
last := len(m.messages) - 1 last := len(m.messages) - 1
if last >= 0 && m.messages[last].Role.IsAssistant() { if last >= 0 && m.messages[last].Role.IsAssistant() {
// append chunk to existing message
m.setMessageContents(last, m.messages[last].Content+chunk) m.setMessageContents(last, m.messages[last].Content+chunk)
} else { } else {
// use chunk in new message
m.addMessage(models.Message{ m.addMessage(models.Message{
Role: models.MessageRoleAssistant, Role: models.MessageRoleAssistant,
Content: chunk, Content: chunk,
@ -287,6 +301,10 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
m.updateContent() m.updateContent()
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
// show cursor and reset blink interval (simulate typing)
m.replyCursor.Blink = false
cmds = append(cmds, m.replyCursor.BlinkCmd())
m.tokenCount++ m.tokenCount++
m.elapsed = time.Now().Sub(m.startTime) m.elapsed = time.Now().Sub(m.startTime)
case msgAssistantReply: case msgAssistantReply:
@ -332,6 +350,7 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
m.waitingForReply = false m.waitingForReply = false
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
m.err = error(msg) m.err = error(msg)
m.updateContent()
case msgConversationTitleChanged: case msgConversationTitleChanged:
title := string(msg) title := string(msg)
m.conversation.Title = title m.conversation.Title = title
@ -341,6 +360,11 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
cmds = append(cmds, wrapError(err)) cmds = append(cmds, wrapError(err))
} }
} }
case cursor.BlinkMsg:
if m.waitingForReply {
// ensure we show updated "wait for response" cursor blink state
m.updateContent()
}
} }
var cmd tea.Cmd var cmd tea.Cmd
@ -348,6 +372,10 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
if cmd != nil { if cmd != nil {
cmds = append(cmds, cmd) cmds = append(cmds, cmd)
} }
m.replyCursor, cmd = m.replyCursor.Update(msg)
if cmd != nil {
cmds = append(cmds, cmd)
}
prevInputLineCnt := m.input.LineCount() prevInputLineCnt := m.input.LineCount()
inputCaptured := false inputCaptured := false
@ -442,9 +470,10 @@ func (m *chatModel) handleMessagesKey(msg tea.KeyMsg) (bool, tea.Cmd) {
} }
m.messages = m.messages[:m.selectedMessage+1] m.messages = m.messages[:m.selectedMessage+1]
m.messageCache = m.messageCache[:m.selectedMessage+1] m.messageCache = m.messageCache[:m.selectedMessage+1]
cmd := m.promptLLM()
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
return true, m.promptLLM() return true, cmd
} }
return false, nil return false, nil
} }
@ -464,6 +493,10 @@ func (m *chatModel) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) {
m.input.Blur() m.input.Blur()
return true, nil return true, nil
case "ctrl+s": case "ctrl+s":
if m.waitingForReply {
return false, nil
}
input := strings.TrimSpace(m.input.Value()) input := strings.TrimSpace(m.input.Value())
if input == "" { if input == "" {
return true, nil return true, nil
@ -487,9 +520,10 @@ func (m *chatModel) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) {
} }
} }
cmd := m.promptLLM()
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
return true, m.promptLLM() return true, cmd
case "ctrl+e": case "ctrl+e":
cmd := openTempfileEditor("message.*.md", m.input.Value(), "# Edit your input below\n") cmd := openTempfileEditor("message.*.md", m.input.Value(), "# Edit your input below\n")
m.editorTarget = input m.editorTarget = input
@ -536,7 +570,10 @@ func (m *chatModel) renderMessageHeading(i int, message *models.Message) string
return messageHeadingStyle.Render(prefix + user + suffix) return messageHeadingStyle.Render(prefix + user + suffix)
} }
func (m *chatModel) renderMessage(msg *models.Message) string { func (m *chatModel) renderMessage(i int) string {
msg := &m.messages[i]
// Write message contents
sb := &strings.Builder{} sb := &strings.Builder{}
sb.Grow(len(msg.Content) * 2) sb.Grow(len(msg.Content) * 2)
if msg.Content != "" { if msg.Content != "" {
@ -547,6 +584,12 @@ func (m *chatModel) renderMessage(msg *models.Message) string {
} }
} }
// Show the assistant's cursor
if m.waitingForReply && i == len(m.messages)-1 {
sb.WriteString(m.replyCursor.View())
}
// Write tool call info
var toolString string var toolString string
switch msg.Role { switch msg.Role {
case models.MessageRoleToolCall: case models.MessageRoleToolCall:
@ -636,10 +679,18 @@ func (m *chatModel) conversationMessagesView() string {
sb.WriteString("\n") sb.WriteString("\n")
lineCnt += lipgloss.Height(heading) lineCnt += lipgloss.Height(heading)
cached := m.messageCache[i] var rendered string
sb.WriteString(cached) if m.waitingForReply && i == len(m.messages)-1 {
// do a direct render of final (assistant) message to handle the
// assistant cursor blink
rendered = m.renderMessage(i)
} else {
rendered = m.messageCache[i]
}
sb.WriteString(rendered)
sb.WriteString("\n") sb.WriteString("\n")
lineCnt += lipgloss.Height(cached) lineCnt += lipgloss.Height(rendered)
} }
return sb.String() return sb.String()
@ -712,12 +763,12 @@ func (m *chatModel) setMessage(i int, msg models.Message) {
panic("i out of range") panic("i out of range")
} }
m.messages[i] = msg m.messages[i] = msg
m.messageCache[i] = m.renderMessage(&msg) m.messageCache[i] = m.renderMessage(i)
} }
func (m *chatModel) addMessage(msg models.Message) { func (m *chatModel) addMessage(msg models.Message) {
m.messages = append(m.messages, msg) m.messages = append(m.messages, msg)
m.messageCache = append(m.messageCache, m.renderMessage(&msg)) m.messageCache = append(m.messageCache, m.renderMessage(len(m.messages)-1))
} }
func (m *chatModel) setMessageContents(i int, content string) { func (m *chatModel) setMessageContents(i int, content string) {
@ -725,13 +776,13 @@ func (m *chatModel) setMessageContents(i int, content string) {
panic("i out of range") panic("i out of range")
} }
m.messages[i].Content = content m.messages[i].Content = content
m.messageCache[i] = m.renderMessage(&m.messages[i]) m.messageCache[i] = m.renderMessage(i)
} }
func (m *chatModel) rebuildMessageCache() { func (m *chatModel) rebuildMessageCache() {
m.messageCache = make([]string, len(m.messages)) m.messageCache = make([]string, len(m.messages))
for i, msg := range m.messages { for i := range m.messages {
m.messageCache[i] = m.renderMessage(&msg) m.messageCache[i] = m.renderMessage(i)
} }
} }
@ -792,7 +843,7 @@ func (m *chatModel) persistConversation() error {
if err != nil { if err != nil {
return err return err
} }
} else if i > 0 { } else if i > 0 {
// messages is new, so add it as a reply to previous message // messages is new, so add it as a reply to previous message
saved, err := m.ctx.Store.Reply(&m.messages[i-1], m.messages[i]) saved, err := m.ctx.Store.Reply(&m.messages[i-1], m.messages[i])
if err != nil { if err != nil {
@ -833,8 +884,19 @@ func (m *chatModel) waitForChunk() tea.Cmd {
func (m *chatModel) promptLLM() tea.Cmd { func (m *chatModel) promptLLM() tea.Cmd {
m.waitingForReply = true m.waitingForReply = true
m.replyCursor.Blink = false
m.status = "Press ctrl+c to cancel" m.status = "Press ctrl+c to cancel"
toPrompt := m.messages
// Add response placeholder message
if m.messages[len(m.messages)-1].Role != models.MessageRoleAssistant {
m.addMessage(models.Message{
Role: models.MessageRoleAssistant,
Content: "",
})
}
m.tokenCount = 0 m.tokenCount = 0
m.startTime = time.Now() m.startTime = time.Now()
m.elapsed = 0 m.elapsed = 0
@ -868,7 +930,7 @@ func (m *chatModel) promptLLM() tea.Cmd {
}() }()
resp, err := completionProvider.CreateChatCompletionStream( resp, err := completionProvider.CreateChatCompletionStream(
ctx, requestParams, m.messages, replyHandler, m.replyChunkChan, ctx, requestParams, toPrompt, replyHandler, m.replyChunkChan,
) )
if err != nil && !canceled { if err != nil && !canceled {