Compare commits

..

3 Commits

Author SHA1 Message Date
437997872a Improve message wrapping behavior 2024-05-22 16:57:52 +00:00
3536438dd1 Add cursor to indicate the assistant is responding
A better/more natural indication that the model is doing something
2024-05-22 16:25:16 +00:00
f5ce970102 Set default retry offset to 0 2024-05-21 00:13:56 +00:00
2 changed files with 87 additions and 21 deletions

View File

@ -66,7 +66,7 @@ func RetryCmd(ctx *lmcli.Context) *cobra.Command {
}, },
} }
cmd.Flags().Int("offset", 1, "Offset from the last message retry from.") cmd.Flags().Int("offset", 0, "Offset from the last message to retry from.")
applyPromptFlags(ctx, cmd) applyPromptFlags(ctx, cmd)
return cmd return cmd

View File

@ -9,12 +9,14 @@ import (
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util" cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model" models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/charmbracelet/bubbles/cursor"
"github.com/charmbracelet/bubbles/spinner" "github.com/charmbracelet/bubbles/spinner"
"github.com/charmbracelet/bubbles/textarea" "github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport" "github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea" tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss" "github.com/charmbracelet/lipgloss"
"github.com/muesli/reflow/wordwrap" "github.com/muesli/reflow/wordwrap"
"github.com/muesli/reflow/wrap"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
@ -82,6 +84,7 @@ type chatModel struct {
content viewport.Model content viewport.Model
input textarea.Model input textarea.Model
spinner spinner.Model spinner spinner.Model
replyCursor cursor.Model // cursor to indicate incoming response
} }
func newChatModel(tui *model) chatModel { func newChatModel(tui *model) chatModel {
@ -117,8 +120,12 @@ func newChatModel(tui *model) chatModel {
FPS: time.Second / 3, FPS: time.Second / 3,
}, },
)), )),
replyCursor: cursor.New(),
} }
m.replyCursor.SetChar(" ")
m.replyCursor.Focus()
system := tui.ctx.GetSystemPrompt() system := tui.ctx.GetSystemPrompt()
if system != "" { if system != "" {
m.messages = []models.Message{{ m.messages = []models.Message{{
@ -220,8 +227,6 @@ func (m *chatModel) handleInput(msg tea.KeyMsg) (bool, tea.Cmd) {
func (m chatModel) Init() tea.Cmd { func (m chatModel) Init() tea.Cmd {
return tea.Batch( return tea.Batch(
textarea.Blink,
m.spinner.Tick,
m.waitForChunk(), m.waitForChunk(),
m.waitForReply(), m.waitForReply(),
) )
@ -241,9 +246,13 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
var cmds []tea.Cmd var cmds []tea.Cmd
switch msg := msg.(type) { switch msg := msg.(type) {
case msgStateEnter: case msgStateEnter:
// wake up spinners and cursors
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
if m.opts.convShortname != "" && m.conversation.ShortName.String != m.opts.convShortname { if m.opts.convShortname != "" && m.conversation.ShortName.String != m.opts.convShortname {
cmds = append(cmds, m.loadConversation(m.opts.convShortname)) cmds = append(cmds, m.loadConversation(m.opts.convShortname))
} }
m.rebuildMessageCache() m.rebuildMessageCache()
m.updateContent() m.updateContent()
case tea.WindowSizeMsg: case tea.WindowSizeMsg:
@ -275,10 +284,16 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
m.content.GotoBottom() m.content.GotoBottom()
case msgResponseChunk: case msgResponseChunk:
chunk := string(msg) chunk := string(msg)
if chunk == "" {
break
}
last := len(m.messages) - 1 last := len(m.messages) - 1
if last >= 0 && m.messages[last].Role.IsAssistant() { if last >= 0 && m.messages[last].Role.IsAssistant() {
// append chunk to existing message
m.setMessageContents(last, m.messages[last].Content+chunk) m.setMessageContents(last, m.messages[last].Content+chunk)
} else { } else {
// use chunk in new message
m.addMessage(models.Message{ m.addMessage(models.Message{
Role: models.MessageRoleAssistant, Role: models.MessageRoleAssistant,
Content: chunk, Content: chunk,
@ -287,6 +302,10 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
m.updateContent() m.updateContent()
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
// show cursor and reset blink interval (simulate typing)
m.replyCursor.Blink = false
cmds = append(cmds, m.replyCursor.BlinkCmd())
m.tokenCount++ m.tokenCount++
m.elapsed = time.Now().Sub(m.startTime) m.elapsed = time.Now().Sub(m.startTime)
case msgAssistantReply: case msgAssistantReply:
@ -332,6 +351,7 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
m.waitingForReply = false m.waitingForReply = false
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
m.err = error(msg) m.err = error(msg)
m.updateContent()
case msgConversationTitleChanged: case msgConversationTitleChanged:
title := string(msg) title := string(msg)
m.conversation.Title = title m.conversation.Title = title
@ -341,6 +361,11 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
cmds = append(cmds, wrapError(err)) cmds = append(cmds, wrapError(err))
} }
} }
case cursor.BlinkMsg:
if m.waitingForReply {
// ensure we show updated "wait for response" cursor blink state
m.updateContent()
}
} }
var cmd tea.Cmd var cmd tea.Cmd
@ -348,6 +373,10 @@ func (m chatModel) Update(msg tea.Msg) (chatModel, tea.Cmd) {
if cmd != nil { if cmd != nil {
cmds = append(cmds, cmd) cmds = append(cmds, cmd)
} }
m.replyCursor, cmd = m.replyCursor.Update(msg)
if cmd != nil {
cmds = append(cmds, cmd)
}
prevInputLineCnt := m.input.LineCount() prevInputLineCnt := m.input.LineCount()
inputCaptured := false inputCaptured := false
@ -442,9 +471,10 @@ func (m *chatModel) handleMessagesKey(msg tea.KeyMsg) (bool, tea.Cmd) {
} }
m.messages = m.messages[:m.selectedMessage+1] m.messages = m.messages[:m.selectedMessage+1]
m.messageCache = m.messageCache[:m.selectedMessage+1] m.messageCache = m.messageCache[:m.selectedMessage+1]
cmd := m.promptLLM()
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
return true, m.promptLLM() return true, cmd
} }
return false, nil return false, nil
} }
@ -464,6 +494,10 @@ func (m *chatModel) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) {
m.input.Blur() m.input.Blur()
return true, nil return true, nil
case "ctrl+s": case "ctrl+s":
if m.waitingForReply {
return false, nil
}
input := strings.TrimSpace(m.input.Value()) input := strings.TrimSpace(m.input.Value())
if input == "" { if input == "" {
return true, nil return true, nil
@ -487,9 +521,10 @@ func (m *chatModel) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) {
} }
} }
cmd := m.promptLLM()
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
return true, m.promptLLM() return true, cmd
case "ctrl+e": case "ctrl+e":
cmd := openTempfileEditor("message.*.md", m.input.Value(), "# Edit your input below\n") cmd := openTempfileEditor("message.*.md", m.input.Value(), "# Edit your input below\n")
m.editorTarget = input m.editorTarget = input
@ -536,7 +571,10 @@ func (m *chatModel) renderMessageHeading(i int, message *models.Message) string
return messageHeadingStyle.Render(prefix + user + suffix) return messageHeadingStyle.Render(prefix + user + suffix)
} }
func (m *chatModel) renderMessage(msg *models.Message) string { func (m *chatModel) renderMessage(i int) string {
msg := &m.messages[i]
// Write message contents
sb := &strings.Builder{} sb := &strings.Builder{}
sb.Grow(len(msg.Content) * 2) sb.Grow(len(msg.Content) * 2)
if msg.Content != "" { if msg.Content != "" {
@ -547,6 +585,12 @@ func (m *chatModel) renderMessage(msg *models.Message) string {
} }
} }
// Show the assistant's cursor
if m.waitingForReply && i == len(m.messages)-1 {
sb.WriteString(m.replyCursor.View())
}
// Write tool call info
var toolString string var toolString string
switch msg.Role { switch msg.Role {
case models.MessageRoleToolCall: case models.MessageRoleToolCall:
@ -604,8 +648,11 @@ func (m *chatModel) renderMessage(msg *models.Message) string {
content := strings.TrimRight(sb.String(), "\n") content := strings.TrimRight(sb.String(), "\n")
if m.wrap { if m.wrap {
wrapWidth := m.content.Width - messageStyle.GetHorizontalPadding() - 2 wrapWidth := m.content.Width - messageStyle.GetHorizontalPadding()
content = wordwrap.String(content, wrapWidth) // first we word-wrap text to slightly less than desired width (since
// wordwrap seems to have an off-by-1 issue), then hard wrap at
// desired with
content = wrap.String(wordwrap.String(content, wrapWidth-2), wrapWidth)
} }
return messageStyle.Width(0).Render(content) return messageStyle.Width(0).Render(content)
@ -636,10 +683,18 @@ func (m *chatModel) conversationMessagesView() string {
sb.WriteString("\n") sb.WriteString("\n")
lineCnt += lipgloss.Height(heading) lineCnt += lipgloss.Height(heading)
cached := m.messageCache[i] var rendered string
sb.WriteString(cached) if m.waitingForReply && i == len(m.messages)-1 {
// do a direct render of final (assistant) message to handle the
// assistant cursor blink
rendered = m.renderMessage(i)
} else {
rendered = m.messageCache[i]
}
sb.WriteString(rendered)
sb.WriteString("\n") sb.WriteString("\n")
lineCnt += lipgloss.Height(cached) lineCnt += lipgloss.Height(rendered)
} }
return sb.String() return sb.String()
@ -712,12 +767,12 @@ func (m *chatModel) setMessage(i int, msg models.Message) {
panic("i out of range") panic("i out of range")
} }
m.messages[i] = msg m.messages[i] = msg
m.messageCache[i] = m.renderMessage(&msg) m.messageCache[i] = m.renderMessage(i)
} }
func (m *chatModel) addMessage(msg models.Message) { func (m *chatModel) addMessage(msg models.Message) {
m.messages = append(m.messages, msg) m.messages = append(m.messages, msg)
m.messageCache = append(m.messageCache, m.renderMessage(&msg)) m.messageCache = append(m.messageCache, m.renderMessage(len(m.messages)-1))
} }
func (m *chatModel) setMessageContents(i int, content string) { func (m *chatModel) setMessageContents(i int, content string) {
@ -725,13 +780,13 @@ func (m *chatModel) setMessageContents(i int, content string) {
panic("i out of range") panic("i out of range")
} }
m.messages[i].Content = content m.messages[i].Content = content
m.messageCache[i] = m.renderMessage(&m.messages[i]) m.messageCache[i] = m.renderMessage(i)
} }
func (m *chatModel) rebuildMessageCache() { func (m *chatModel) rebuildMessageCache() {
m.messageCache = make([]string, len(m.messages)) m.messageCache = make([]string, len(m.messages))
for i, msg := range m.messages { for i := range m.messages {
m.messageCache[i] = m.renderMessage(&msg) m.messageCache[i] = m.renderMessage(i)
} }
} }
@ -833,8 +888,19 @@ func (m *chatModel) waitForChunk() tea.Cmd {
func (m *chatModel) promptLLM() tea.Cmd { func (m *chatModel) promptLLM() tea.Cmd {
m.waitingForReply = true m.waitingForReply = true
m.replyCursor.Blink = false
m.status = "Press ctrl+c to cancel" m.status = "Press ctrl+c to cancel"
toPrompt := m.messages
// Add response placeholder message
if m.messages[len(m.messages)-1].Role != models.MessageRoleAssistant {
m.addMessage(models.Message{
Role: models.MessageRoleAssistant,
Content: "",
})
}
m.tokenCount = 0 m.tokenCount = 0
m.startTime = time.Now() m.startTime = time.Now()
m.elapsed = 0 m.elapsed = 0
@ -868,7 +934,7 @@ func (m *chatModel) promptLLM() tea.Cmd {
}() }()
resp, err := completionProvider.CreateChatCompletionStream( resp, err := completionProvider.CreateChatCompletionStream(
ctx, requestParams, m.messages, replyHandler, m.replyChunkChan, ctx, requestParams, toPrompt, replyHandler, m.replyChunkChan,
) )
if err != nil && !canceled { if err != nil && !canceled {