Matt Low
38fed741af
We were sending an empty string to the output channel when `ping` messages were received from Anthropic's API. This was causing the TUI to break since we were doing an empty chunk check (and mistakenly not waiting for future chunks if one was received). This commit makes it so we no longer an empty string on the ping message from Anthropic, and, we update the handling of msgAssistantChunk and msgAssistantReply to make it less likely that we forget to wait for the next chunk/reply.
1092 lines
27 KiB
Go
1092 lines
27 KiB
Go
package chat
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
|
|
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/styles"
|
|
tuiutil "git.mlow.ca/mlow/lmcli/pkg/tui/util"
|
|
"github.com/charmbracelet/bubbles/cursor"
|
|
"github.com/charmbracelet/bubbles/spinner"
|
|
"github.com/charmbracelet/bubbles/textarea"
|
|
"github.com/charmbracelet/bubbles/viewport"
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
"github.com/charmbracelet/lipgloss"
|
|
"github.com/muesli/reflow/wordwrap"
|
|
"github.com/muesli/reflow/wrap"
|
|
"gopkg.in/yaml.v2"
|
|
)
|
|
|
|
type focusState int
|
|
|
|
const (
|
|
focusInput focusState = iota
|
|
focusMessages
|
|
)
|
|
|
|
type editorTarget int
|
|
|
|
const (
|
|
input editorTarget = iota
|
|
selectedMessage
|
|
)
|
|
|
|
// custom tea.Msg types
|
|
type (
|
|
// sent on each chunk received from LLM
|
|
msgResponseChunk string
|
|
// sent when response is finished being received
|
|
msgResponseEnd string
|
|
// a special case of common.MsgError that stops the response waiting animation
|
|
msgResponseError error
|
|
// sent on each completed reply
|
|
msgAssistantReply models.Message
|
|
// sent when a conversation is (re)loaded
|
|
msgConversationLoaded *models.Conversation
|
|
// sent when a new conversation title is set
|
|
msgConversationTitleChanged string
|
|
// sent when a conversation's messages are laoded
|
|
msgMessagesLoaded []models.Message
|
|
)
|
|
|
|
type Model struct {
|
|
shared.State
|
|
shared.Sections
|
|
|
|
// app state
|
|
conversation *models.Conversation
|
|
rootMessages []models.Message
|
|
messages []models.Message
|
|
selectedMessage int
|
|
waitingForReply bool
|
|
editorTarget editorTarget
|
|
stopSignal chan struct{}
|
|
replyChan chan models.Message
|
|
replyChunkChan chan string
|
|
persistence bool // whether we will save new messages in the conversation
|
|
|
|
// ui state
|
|
focus focusState
|
|
wrap bool // whether message content is wrapped to viewport width
|
|
status string // a general status message
|
|
showToolResults bool // whether tool calls and results are shown
|
|
messageCache []string // cache of syntax highlighted and wrapped message content
|
|
messageOffsets []int
|
|
|
|
tokenCount uint
|
|
startTime time.Time
|
|
elapsed time.Duration
|
|
|
|
// ui elements
|
|
content viewport.Model
|
|
input textarea.Model
|
|
spinner spinner.Model
|
|
replyCursor cursor.Model // cursor to indicate incoming response
|
|
}
|
|
|
|
func Chat(state shared.State) Model {
|
|
m := Model{
|
|
State: state,
|
|
|
|
conversation: &models.Conversation{},
|
|
persistence: true,
|
|
|
|
stopSignal: make(chan struct{}),
|
|
replyChan: make(chan models.Message),
|
|
replyChunkChan: make(chan string),
|
|
|
|
wrap: true,
|
|
selectedMessage: -1,
|
|
|
|
content: viewport.New(0, 0),
|
|
input: textarea.New(),
|
|
spinner: spinner.New(spinner.WithSpinner(
|
|
spinner.Spinner{
|
|
Frames: []string{
|
|
". ",
|
|
".. ",
|
|
"...",
|
|
".. ",
|
|
". ",
|
|
" ",
|
|
},
|
|
FPS: time.Second / 3,
|
|
},
|
|
)),
|
|
replyCursor: cursor.New(),
|
|
}
|
|
|
|
m.replyCursor.SetChar(" ")
|
|
m.replyCursor.Focus()
|
|
|
|
system := state.Ctx.GetSystemPrompt()
|
|
if system != "" {
|
|
m.messages = []models.Message{{
|
|
Role: models.MessageRoleSystem,
|
|
Content: system,
|
|
}}
|
|
}
|
|
|
|
m.input.Focus()
|
|
m.input.MaxHeight = 0
|
|
m.input.CharLimit = 0
|
|
m.input.ShowLineNumbers = false
|
|
m.input.Placeholder = "Enter a message"
|
|
|
|
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
|
|
m.input.FocusedStyle.Base = inputFocusedStyle
|
|
m.input.BlurredStyle.Base = inputBlurredStyle
|
|
|
|
m.waitingForReply = false
|
|
m.status = "Press ctrl+s to send"
|
|
return m
|
|
}
|
|
|
|
// styles
|
|
var (
|
|
messageHeadingStyle = lipgloss.NewStyle().
|
|
MarginTop(1).
|
|
MarginBottom(1).
|
|
PaddingLeft(1).
|
|
Bold(true)
|
|
|
|
userStyle = lipgloss.NewStyle().Faint(true).Foreground(lipgloss.Color("10"))
|
|
|
|
assistantStyle = lipgloss.NewStyle().Faint(true).Foreground(lipgloss.Color("12"))
|
|
|
|
messageStyle = lipgloss.NewStyle().
|
|
PaddingLeft(2).
|
|
PaddingRight(2)
|
|
|
|
inputFocusedStyle = lipgloss.NewStyle().
|
|
Border(lipgloss.RoundedBorder(), true, true, true, false)
|
|
|
|
inputBlurredStyle = lipgloss.NewStyle().
|
|
Faint(true).
|
|
Border(lipgloss.RoundedBorder(), true, true, true, false)
|
|
|
|
footerStyle = lipgloss.NewStyle()
|
|
)
|
|
|
|
func (m *Model) HandleInput(msg tea.KeyMsg) (bool, tea.Cmd) {
|
|
switch m.focus {
|
|
case focusInput:
|
|
consumed, cmd := m.HandleInputKey(msg)
|
|
if consumed {
|
|
return true, cmd
|
|
}
|
|
case focusMessages:
|
|
consumed, cmd := m.handleMessagesKey(msg)
|
|
if consumed {
|
|
return true, cmd
|
|
}
|
|
}
|
|
|
|
switch msg.String() {
|
|
case "esc":
|
|
if m.waitingForReply {
|
|
m.stopSignal <- struct{}{}
|
|
return true, nil
|
|
}
|
|
return true, func() tea.Msg {
|
|
return shared.MsgViewChange(shared.StateConversations)
|
|
}
|
|
case "ctrl+c":
|
|
if m.waitingForReply {
|
|
m.stopSignal <- struct{}{}
|
|
return true, nil
|
|
}
|
|
case "ctrl+p":
|
|
m.persistence = !m.persistence
|
|
return true, nil
|
|
case "ctrl+t":
|
|
m.showToolResults = !m.showToolResults
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
return true, nil
|
|
case "ctrl+w":
|
|
m.wrap = !m.wrap
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
return true, nil
|
|
}
|
|
return false, nil
|
|
}
|
|
|
|
func (m Model) Init() tea.Cmd {
|
|
return tea.Batch(
|
|
m.waitForChunk(),
|
|
m.waitForReply(),
|
|
)
|
|
}
|
|
|
|
func (m *Model) HandleResize(width, height int) {
|
|
m.Width, m.Height = width, height
|
|
m.content.Width = width
|
|
m.input.SetWidth(width - m.input.FocusedStyle.Base.GetHorizontalFrameSize())
|
|
if len(m.messages) > 0 {
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
}
|
|
}
|
|
|
|
func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
|
var cmds []tea.Cmd
|
|
switch msg := msg.(type) {
|
|
case shared.MsgViewEnter:
|
|
// wake up spinners and cursors
|
|
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
|
|
|
|
if m.State.Values.ConvShortname != "" && m.conversation.ShortName.String != m.State.Values.ConvShortname {
|
|
cmds = append(cmds, m.loadConversation(m.State.Values.ConvShortname))
|
|
}
|
|
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
case tea.WindowSizeMsg:
|
|
m.HandleResize(msg.Width, msg.Height)
|
|
case tuiutil.MsgTempfileEditorClosed:
|
|
contents := string(msg)
|
|
switch m.editorTarget {
|
|
case input:
|
|
m.input.SetValue(contents)
|
|
case selectedMessage:
|
|
m.setMessageContents(m.selectedMessage, contents)
|
|
if m.persistence && m.messages[m.selectedMessage].ID > 0 {
|
|
// update persisted message
|
|
err := m.State.Ctx.Store.UpdateMessage(&m.messages[m.selectedMessage])
|
|
if err != nil {
|
|
cmds = append(cmds, shared.WrapError(fmt.Errorf("Could not save edited message: %v", err)))
|
|
}
|
|
}
|
|
m.updateContent()
|
|
}
|
|
case msgConversationLoaded:
|
|
m.conversation = (*models.Conversation)(msg)
|
|
m.rootMessages, _ = m.State.Ctx.Store.RootMessages(m.conversation.ID)
|
|
cmds = append(cmds, m.loadMessages(m.conversation))
|
|
case msgMessagesLoaded:
|
|
m.selectedMessage = len(msg) - 1
|
|
m.messages = msg
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
m.content.GotoBottom()
|
|
case msgResponseChunk:
|
|
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
|
|
|
|
chunk := string(msg)
|
|
if chunk == "" {
|
|
break
|
|
}
|
|
|
|
last := len(m.messages) - 1
|
|
if last >= 0 && m.messages[last].Role.IsAssistant() {
|
|
// append chunk to existing message
|
|
m.setMessageContents(last, m.messages[last].Content+chunk)
|
|
} else {
|
|
// use chunk in new message
|
|
m.addMessage(models.Message{
|
|
Role: models.MessageRoleAssistant,
|
|
Content: chunk,
|
|
})
|
|
}
|
|
m.updateContent()
|
|
|
|
// show cursor and reset blink interval (simulate typing)
|
|
m.replyCursor.Blink = false
|
|
cmds = append(cmds, m.replyCursor.BlinkCmd())
|
|
|
|
m.tokenCount++
|
|
m.elapsed = time.Now().Sub(m.startTime)
|
|
case msgAssistantReply:
|
|
cmds = append(cmds, m.waitForReply()) // wait for the next reply
|
|
|
|
reply := models.Message(msg)
|
|
reply.Content = strings.TrimSpace(reply.Content)
|
|
|
|
last := len(m.messages) - 1
|
|
if last < 0 {
|
|
panic("Unexpected empty messages handling msgAssistantReply")
|
|
}
|
|
|
|
if reply.Role.IsAssistant() && m.messages[last].Role.IsAssistant() {
|
|
// this was a continuation, so replace the previous message with the completed reply
|
|
m.setMessage(last, reply)
|
|
} else {
|
|
m.addMessage(reply)
|
|
}
|
|
|
|
if m.persistence {
|
|
err := m.persistConversation()
|
|
if err != nil {
|
|
cmds = append(cmds, shared.WrapError(err))
|
|
}
|
|
}
|
|
|
|
if m.conversation.Title == "" {
|
|
cmds = append(cmds, m.generateConversationTitle())
|
|
}
|
|
|
|
m.updateContent()
|
|
case msgResponseEnd:
|
|
m.waitingForReply = false
|
|
last := len(m.messages) - 1
|
|
if last < 0 {
|
|
panic("Unexpected empty messages handling msgResponseEnd")
|
|
}
|
|
m.setMessageContents(last, strings.TrimSpace(m.messages[last].Content))
|
|
m.updateContent()
|
|
m.status = "Press ctrl+s to send"
|
|
case msgResponseError:
|
|
m.waitingForReply = false
|
|
m.status = "Press ctrl+s to send"
|
|
m.State.Err = error(msg)
|
|
m.updateContent()
|
|
case msgConversationTitleChanged:
|
|
title := string(msg)
|
|
m.conversation.Title = title
|
|
if m.persistence {
|
|
err := m.State.Ctx.Store.UpdateConversation(m.conversation)
|
|
if err != nil {
|
|
cmds = append(cmds, shared.WrapError(err))
|
|
}
|
|
}
|
|
case cursor.BlinkMsg:
|
|
if m.waitingForReply {
|
|
// ensure we show updated "wait for response" cursor blink state
|
|
m.updateContent()
|
|
}
|
|
}
|
|
|
|
var cmd tea.Cmd
|
|
m.spinner, cmd = m.spinner.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
m.replyCursor, cmd = m.replyCursor.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
|
|
prevInputLineCnt := m.input.LineCount()
|
|
inputCaptured := false
|
|
m.input, cmd = m.input.Update(msg)
|
|
if cmd != nil {
|
|
inputCaptured = true
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
|
|
if !inputCaptured {
|
|
m.content, cmd = m.content.Update(msg)
|
|
if cmd != nil {
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
}
|
|
|
|
// update views once window dimensions are known
|
|
if m.Width > 0 {
|
|
m.Header = m.headerView()
|
|
m.Footer = m.footerView()
|
|
m.Error = tuiutil.ErrorBanner(m.Err, m.Width)
|
|
fixedHeight := tuiutil.Height(m.Header) + tuiutil.Height(m.Error) + tuiutil.Height(m.Footer)
|
|
|
|
// calculate clamped input height to accomodate input text
|
|
// minimum 4 lines, maximum half of content area
|
|
newHeight := max(4, min((m.Height-fixedHeight-1)/2, m.input.LineCount()))
|
|
m.input.SetHeight(newHeight)
|
|
m.Input = m.input.View()
|
|
|
|
// remaining height towards content
|
|
m.content.Height = m.Height - fixedHeight - tuiutil.Height(m.Input)
|
|
m.Content = m.content.View()
|
|
}
|
|
|
|
// this is a pretty nasty hack to ensure the input area viewport doesn't
|
|
// scroll below its content, which can happen when the input viewport
|
|
// height has grown, or previously entered lines have been deleted
|
|
if prevInputLineCnt != m.input.LineCount() {
|
|
// dist is the distance we'd need to scroll up from the current cursor
|
|
// position to position the last input line at the bottom of the
|
|
// viewport. if negative, we're already scrolled above the bottom
|
|
dist := m.input.Line() - (m.input.LineCount() - m.input.Height())
|
|
if dist > 0 {
|
|
for i := 0; i < dist; i++ {
|
|
// move cursor up until content reaches the bottom of the viewport
|
|
m.input.CursorUp()
|
|
}
|
|
m.input, cmd = m.input.Update(nil)
|
|
for i := 0; i < dist; i++ {
|
|
// move cursor back down to its previous position
|
|
m.input.CursorDown()
|
|
}
|
|
m.input, cmd = m.input.Update(nil)
|
|
}
|
|
}
|
|
|
|
return m, tea.Batch(cmds...)
|
|
}
|
|
|
|
func (m *Model) handleMessagesKey(msg tea.KeyMsg) (bool, tea.Cmd) {
|
|
switch msg.String() {
|
|
case "tab", "enter":
|
|
m.focus = focusInput
|
|
m.updateContent()
|
|
m.input.Focus()
|
|
return true, nil
|
|
case "e":
|
|
message := m.messages[m.selectedMessage]
|
|
cmd := tuiutil.OpenTempfileEditor("message.*.md", message.Content, "# Edit the message below\n")
|
|
m.editorTarget = selectedMessage
|
|
return true, cmd
|
|
case "ctrl+k":
|
|
if m.selectedMessage > 0 && len(m.messages) == len(m.messageOffsets) {
|
|
m.selectedMessage--
|
|
m.updateContent()
|
|
offset := m.messageOffsets[m.selectedMessage]
|
|
tuiutil.ScrollIntoView(&m.content, offset, m.content.Height/2)
|
|
}
|
|
return true, nil
|
|
case "ctrl+j":
|
|
if m.selectedMessage < len(m.messages)-1 && len(m.messages) == len(m.messageOffsets) {
|
|
m.selectedMessage++
|
|
m.updateContent()
|
|
offset := m.messageOffsets[m.selectedMessage]
|
|
tuiutil.ScrollIntoView(&m.content, offset, m.content.Height/2)
|
|
}
|
|
return true, nil
|
|
case "ctrl+h", "ctrl+l":
|
|
dir := CyclePrev
|
|
if msg.String() == "ctrl+l" {
|
|
dir = CycleNext
|
|
}
|
|
|
|
var err error
|
|
var selected *models.Message
|
|
if m.selectedMessage == 0 {
|
|
selected, err = m.cycleSelectedRoot(m.conversation, dir)
|
|
if err != nil {
|
|
return true, shared.WrapError(fmt.Errorf("Could not cycle conversation root: %v", err))
|
|
}
|
|
} else if m.selectedMessage > 0 {
|
|
selected, err = m.cycleSelectedReply(&m.messages[m.selectedMessage-1], dir)
|
|
if err != nil {
|
|
return true, shared.WrapError(fmt.Errorf("Could not cycle reply: %v", err))
|
|
}
|
|
}
|
|
|
|
if selected == nil {
|
|
return false, nil
|
|
}
|
|
|
|
// Retrieve updated view at this point
|
|
newPath, err := m.State.Ctx.Store.PathToLeaf(selected)
|
|
if err != nil {
|
|
m.State.Err = fmt.Errorf("Could not fetch messages: %v", err)
|
|
}
|
|
|
|
m.messages = append(m.messages[:m.selectedMessage], newPath...)
|
|
m.rebuildMessageCache()
|
|
m.updateContent()
|
|
return true, nil
|
|
case "ctrl+r":
|
|
// resubmit the conversation with all messages up until and including the selected message
|
|
if m.waitingForReply || len(m.messages) == 0 {
|
|
return true, nil
|
|
}
|
|
m.messages = m.messages[:m.selectedMessage+1]
|
|
m.messageCache = m.messageCache[:m.selectedMessage+1]
|
|
cmd := m.promptLLM()
|
|
m.updateContent()
|
|
m.content.GotoBottom()
|
|
return true, cmd
|
|
}
|
|
return false, nil
|
|
}
|
|
|
|
type CycleDirection int
|
|
|
|
const (
|
|
CycleNext CycleDirection = 1
|
|
CyclePrev CycleDirection = -1
|
|
)
|
|
|
|
func cycleMessages(m *models.Message, msgs []models.Message, dir CycleDirection) (*models.Message, error) {
|
|
currentIndex := -1
|
|
for i, reply := range msgs {
|
|
if reply.ID == m.ID {
|
|
currentIndex = i
|
|
break
|
|
}
|
|
}
|
|
|
|
if currentIndex < 0 {
|
|
return nil, fmt.Errorf("message not found")
|
|
}
|
|
|
|
var next int
|
|
if dir == CyclePrev {
|
|
// Wrap around to the last reply if at the beginning
|
|
next = (currentIndex - 1 + len(msgs)) % len(msgs)
|
|
} else {
|
|
// Wrap around to the first reply if at the end
|
|
next = (currentIndex + 1) % len(msgs)
|
|
}
|
|
return &msgs[next], nil
|
|
}
|
|
|
|
func (m *Model) cycleSelectedRoot(conv *models.Conversation, dir CycleDirection) (*models.Message, error) {
|
|
if len(m.rootMessages) < 2 {
|
|
return nil, nil
|
|
}
|
|
|
|
nextRoot, err := cycleMessages(conv.SelectedRoot, m.rootMessages, dir)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
conv.SelectedRoot = nextRoot
|
|
err = m.State.Ctx.Store.UpdateConversation(conv)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Could not update conversation: %v", err)
|
|
}
|
|
return nextRoot, nil
|
|
}
|
|
|
|
func (m *Model) cycleSelectedReply(message *models.Message, dir CycleDirection) (*models.Message, error) {
|
|
if len(message.Replies) < 2 {
|
|
return nil, nil
|
|
}
|
|
|
|
nextReply, err := cycleMessages(message.SelectedReply, message.Replies, dir)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
message.SelectedReply = nextReply
|
|
err = m.State.Ctx.Store.UpdateMessage(message)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Could not update message: %v", err)
|
|
}
|
|
return nextReply, nil
|
|
}
|
|
|
|
func (m *Model) HandleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) {
|
|
switch msg.String() {
|
|
case "esc":
|
|
m.focus = focusMessages
|
|
if len(m.messages) > 0 {
|
|
if m.selectedMessage < 0 || m.selectedMessage >= len(m.messages) {
|
|
m.selectedMessage = len(m.messages) - 1
|
|
}
|
|
offset := m.messageOffsets[m.selectedMessage]
|
|
tuiutil.ScrollIntoView(&m.content, offset, m.content.Height/2)
|
|
}
|
|
m.updateContent()
|
|
m.input.Blur()
|
|
return true, nil
|
|
case "ctrl+s":
|
|
if m.waitingForReply {
|
|
return false, nil
|
|
}
|
|
|
|
input := strings.TrimSpace(m.input.Value())
|
|
if input == "" {
|
|
return true, nil
|
|
}
|
|
|
|
if len(m.messages) > 0 && m.messages[len(m.messages)-1].Role == models.MessageRoleUser {
|
|
return true, shared.WrapError(fmt.Errorf("Can't reply to a user message"))
|
|
}
|
|
|
|
m.addMessage(models.Message{
|
|
Role: models.MessageRoleUser,
|
|
Content: input,
|
|
})
|
|
|
|
m.input.SetValue("")
|
|
|
|
if m.persistence {
|
|
err := m.persistConversation()
|
|
if err != nil {
|
|
return true, shared.WrapError(err)
|
|
}
|
|
}
|
|
|
|
cmd := m.promptLLM()
|
|
m.updateContent()
|
|
m.content.GotoBottom()
|
|
return true, cmd
|
|
case "ctrl+e":
|
|
cmd := tuiutil.OpenTempfileEditor("message.*.md", m.input.Value(), "# Edit your input below\n")
|
|
m.editorTarget = input
|
|
return true, cmd
|
|
}
|
|
return false, nil
|
|
}
|
|
|
|
func (m Model) View() string {
|
|
if m.Width == 0 {
|
|
return ""
|
|
}
|
|
sections := make([]string, 0, 6)
|
|
|
|
if m.Header != "" {
|
|
sections = append(sections, m.Header)
|
|
}
|
|
|
|
sections = append(sections, m.Content)
|
|
if m.Error != "" {
|
|
sections = append(sections, m.Error)
|
|
}
|
|
sections = append(sections, m.Input)
|
|
|
|
if m.Footer != "" {
|
|
sections = append(sections, m.Footer)
|
|
}
|
|
|
|
return lipgloss.JoinVertical(lipgloss.Left, sections...)
|
|
}
|
|
|
|
func (m *Model) renderMessageHeading(i int, message *models.Message) string {
|
|
icon := ""
|
|
friendly := message.Role.FriendlyRole()
|
|
style := lipgloss.NewStyle().Faint(true).Bold(true)
|
|
|
|
switch message.Role {
|
|
case models.MessageRoleSystem:
|
|
icon = "⚙️"
|
|
case models.MessageRoleUser:
|
|
style = userStyle
|
|
case models.MessageRoleAssistant:
|
|
style = assistantStyle
|
|
case models.MessageRoleToolCall:
|
|
style = assistantStyle
|
|
friendly = models.MessageRoleAssistant.FriendlyRole()
|
|
case models.MessageRoleToolResult:
|
|
icon = "🔧"
|
|
}
|
|
|
|
user := style.Render(icon + friendly)
|
|
|
|
var prefix string
|
|
var suffix string
|
|
|
|
faint := lipgloss.NewStyle().Faint(true)
|
|
|
|
if i == 0 && len(m.rootMessages) > 0 {
|
|
selectedRootIndex := 0
|
|
for j, reply := range m.rootMessages {
|
|
if reply.ID == *m.conversation.SelectedRootID {
|
|
selectedRootIndex = j
|
|
break
|
|
}
|
|
}
|
|
suffix += faint.Render(fmt.Sprintf(" <%d/%d>", selectedRootIndex+1, len(m.rootMessages)))
|
|
}
|
|
if i > 0 && len(m.messages[i-1].Replies) > 1 {
|
|
// Find the selected reply index
|
|
selectedReplyIndex := 0
|
|
for j, reply := range m.messages[i-1].Replies {
|
|
if reply.ID == *m.messages[i-1].SelectedReplyID {
|
|
selectedReplyIndex = j
|
|
break
|
|
}
|
|
}
|
|
suffix += faint.Render(fmt.Sprintf(" <%d/%d>", selectedReplyIndex+1, len(m.messages[i-1].Replies)))
|
|
}
|
|
|
|
if m.focus == focusMessages {
|
|
if i == m.selectedMessage {
|
|
prefix = "> "
|
|
}
|
|
}
|
|
|
|
if message.ID == 0 {
|
|
suffix += faint.Render(" (not saved)")
|
|
}
|
|
|
|
return messageHeadingStyle.Render(prefix + user + suffix)
|
|
}
|
|
|
|
func (m *Model) renderMessage(i int) string {
|
|
msg := &m.messages[i]
|
|
|
|
// Write message contents
|
|
sb := &strings.Builder{}
|
|
sb.Grow(len(msg.Content) * 2)
|
|
if msg.Content != "" {
|
|
err := m.State.Ctx.Chroma.Highlight(sb, msg.Content)
|
|
if err != nil {
|
|
sb.Reset()
|
|
sb.WriteString(msg.Content)
|
|
}
|
|
}
|
|
|
|
// Show the assistant's cursor
|
|
if m.waitingForReply && i == len(m.messages)-1 {
|
|
sb.WriteString(m.replyCursor.View())
|
|
}
|
|
|
|
// Write tool call info
|
|
var toolString string
|
|
switch msg.Role {
|
|
case models.MessageRoleToolCall:
|
|
bytes, err := yaml.Marshal(msg.ToolCalls)
|
|
if err != nil {
|
|
toolString = "Could not serialize ToolCalls"
|
|
} else {
|
|
toolString = "tool_calls:\n" + string(bytes)
|
|
}
|
|
case models.MessageRoleToolResult:
|
|
if !m.showToolResults {
|
|
break
|
|
}
|
|
|
|
type renderedResult struct {
|
|
ToolName string `yaml:"tool"`
|
|
Result any
|
|
}
|
|
|
|
var toolResults []renderedResult
|
|
for _, result := range msg.ToolResults {
|
|
var jsonResult interface{}
|
|
err := json.Unmarshal([]byte(result.Result), &jsonResult)
|
|
if err != nil {
|
|
// If parsing as JSON fails, treat Result as a plain string
|
|
toolResults = append(toolResults, renderedResult{
|
|
ToolName: result.ToolName,
|
|
Result: result.Result,
|
|
})
|
|
} else {
|
|
// If parsing as JSON succeeds, marshal the parsed JSON into YAML
|
|
toolResults = append(toolResults, renderedResult{
|
|
ToolName: result.ToolName,
|
|
Result: &jsonResult,
|
|
})
|
|
}
|
|
}
|
|
|
|
bytes, err := yaml.Marshal(toolResults)
|
|
if err != nil {
|
|
toolString = "Could not serialize ToolResults"
|
|
} else {
|
|
toolString = "tool_results:\n" + string(bytes)
|
|
}
|
|
}
|
|
|
|
if toolString != "" {
|
|
toolString = strings.TrimRight(toolString, "\n")
|
|
if msg.Content != "" {
|
|
sb.WriteString("\n\n")
|
|
}
|
|
_ = m.State.Ctx.Chroma.HighlightLang(sb, toolString, "yaml")
|
|
}
|
|
|
|
content := strings.TrimRight(sb.String(), "\n")
|
|
|
|
if m.wrap {
|
|
wrapWidth := m.content.Width - messageStyle.GetHorizontalPadding()
|
|
// first we word-wrap text to slightly less than desired width (since
|
|
// wordwrap seems to have an off-by-1 issue), then hard wrap at
|
|
// desired with
|
|
content = wrap.String(wordwrap.String(content, wrapWidth-2), wrapWidth)
|
|
}
|
|
|
|
return messageStyle.Width(0).Render(content)
|
|
}
|
|
|
|
// render the conversation into a string
|
|
func (m *Model) conversationMessagesView() string {
|
|
sb := strings.Builder{}
|
|
|
|
m.messageOffsets = make([]int, len(m.messages))
|
|
lineCnt := 1
|
|
for i, message := range m.messages {
|
|
m.messageOffsets[i] = lineCnt
|
|
|
|
switch message.Role {
|
|
case models.MessageRoleToolCall:
|
|
if !m.showToolResults && message.Content == "" {
|
|
continue
|
|
}
|
|
case models.MessageRoleToolResult:
|
|
if !m.showToolResults {
|
|
continue
|
|
}
|
|
}
|
|
|
|
heading := m.renderMessageHeading(i, &message)
|
|
sb.WriteString(heading)
|
|
sb.WriteString("\n")
|
|
lineCnt += lipgloss.Height(heading)
|
|
|
|
var rendered string
|
|
if m.waitingForReply && i == len(m.messages)-1 {
|
|
// do a direct render of final (assistant) message to handle the
|
|
// assistant cursor blink
|
|
rendered = m.renderMessage(i)
|
|
} else {
|
|
rendered = m.messageCache[i]
|
|
}
|
|
|
|
sb.WriteString(rendered)
|
|
sb.WriteString("\n")
|
|
lineCnt += lipgloss.Height(rendered)
|
|
}
|
|
|
|
return sb.String()
|
|
}
|
|
|
|
func (m *Model) headerView() string {
|
|
titleStyle := lipgloss.NewStyle().Bold(true)
|
|
var title string
|
|
if m.conversation != nil && m.conversation.Title != "" {
|
|
title = m.conversation.Title
|
|
} else {
|
|
title = "Untitled"
|
|
}
|
|
title = tuiutil.TruncateToCellWidth(title, m.Width-styles.Header.GetHorizontalPadding(), "...")
|
|
header := titleStyle.Render(title)
|
|
return styles.Header.Width(m.Width).Render(header)
|
|
}
|
|
|
|
func (m *Model) footerView() string {
|
|
segmentStyle := lipgloss.NewStyle().PaddingLeft(1).PaddingRight(1).Faint(true)
|
|
segmentSeparator := "|"
|
|
|
|
savingStyle := segmentStyle.Copy().Bold(true)
|
|
saving := ""
|
|
if m.persistence {
|
|
saving = savingStyle.Foreground(lipgloss.Color("2")).Render("✅💾")
|
|
} else {
|
|
saving = savingStyle.Foreground(lipgloss.Color("1")).Render("❌💾")
|
|
}
|
|
|
|
status := m.status
|
|
if m.waitingForReply {
|
|
status += m.spinner.View()
|
|
}
|
|
|
|
leftSegments := []string{
|
|
saving,
|
|
segmentStyle.Render(status),
|
|
}
|
|
rightSegments := []string{}
|
|
|
|
if m.elapsed > 0 && m.tokenCount > 0 {
|
|
throughput := fmt.Sprintf("%.0f t/sec", float64(m.tokenCount)/m.elapsed.Seconds())
|
|
rightSegments = append(rightSegments, segmentStyle.Render(throughput))
|
|
}
|
|
|
|
model := fmt.Sprintf("Model: %s", *m.State.Ctx.Config.Defaults.Model)
|
|
rightSegments = append(rightSegments, segmentStyle.Render(model))
|
|
|
|
left := strings.Join(leftSegments, segmentSeparator)
|
|
right := strings.Join(rightSegments, segmentSeparator)
|
|
|
|
totalWidth := lipgloss.Width(left) + lipgloss.Width(right)
|
|
remaining := m.Width - totalWidth
|
|
|
|
var padding string
|
|
if remaining > 0 {
|
|
padding = strings.Repeat(" ", remaining)
|
|
}
|
|
|
|
footer := left + padding + right
|
|
if remaining < 0 {
|
|
footer = tuiutil.TruncateToCellWidth(footer, m.Width, "...")
|
|
}
|
|
return footerStyle.Width(m.Width).Render(footer)
|
|
}
|
|
|
|
func (m *Model) setMessage(i int, msg models.Message) {
|
|
if i >= len(m.messages) {
|
|
panic("i out of range")
|
|
}
|
|
m.messages[i] = msg
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
|
|
func (m *Model) addMessage(msg models.Message) {
|
|
m.messages = append(m.messages, msg)
|
|
m.messageCache = append(m.messageCache, m.renderMessage(len(m.messages)-1))
|
|
}
|
|
|
|
func (m *Model) setMessageContents(i int, content string) {
|
|
if i >= len(m.messages) {
|
|
panic("i out of range")
|
|
}
|
|
m.messages[i].Content = content
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
|
|
func (m *Model) rebuildMessageCache() {
|
|
m.messageCache = make([]string, len(m.messages))
|
|
for i := range m.messages {
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
}
|
|
|
|
func (m *Model) updateContent() {
|
|
atBottom := m.content.AtBottom()
|
|
m.content.SetContent(m.conversationMessagesView())
|
|
if atBottom {
|
|
// if we were at bottom before the update, scroll with the output
|
|
m.content.GotoBottom()
|
|
}
|
|
}
|
|
|
|
func (m *Model) loadConversation(shortname string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
if shortname == "" {
|
|
return nil
|
|
}
|
|
c, err := m.State.Ctx.Store.ConversationByShortName(shortname)
|
|
if err != nil {
|
|
return shared.MsgError(fmt.Errorf("Could not lookup conversation: %v", err))
|
|
}
|
|
if c.ID == 0 {
|
|
return shared.MsgError(fmt.Errorf("Conversation not found: %s", shortname))
|
|
}
|
|
return msgConversationLoaded(c)
|
|
}
|
|
}
|
|
|
|
func (m *Model) loadMessages(c *models.Conversation) tea.Cmd {
|
|
return func() tea.Msg {
|
|
messages, err := m.State.Ctx.Store.PathToLeaf(c.SelectedRoot)
|
|
if err != nil {
|
|
return shared.MsgError(fmt.Errorf("Could not load conversation messages: %v\n", err))
|
|
}
|
|
return msgMessagesLoaded(messages)
|
|
}
|
|
}
|
|
|
|
func (m *Model) persistConversation() error {
|
|
if m.conversation.ID == 0 {
|
|
// Start a new conversation with all messages so far
|
|
c, messages, err := m.State.Ctx.Store.StartConversation(m.messages...)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
m.conversation = c
|
|
m.messages = messages
|
|
|
|
return nil
|
|
}
|
|
|
|
// else, we'll handle updating an existing conversation's messages
|
|
for i := 0; i < len(m.messages); i++ {
|
|
if m.messages[i].ID > 0 {
|
|
// message has an ID, update its contents
|
|
// TODO: check for content/tool equality before updating?
|
|
err := m.State.Ctx.Store.UpdateMessage(&m.messages[i])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
} else if i > 0 {
|
|
// messages is new, so add it as a reply to previous message
|
|
saved, err := m.State.Ctx.Store.Reply(&m.messages[i-1], m.messages[i])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
// add this message as a reply to the previous
|
|
m.messages[i-1].Replies = append(m.messages[i-1].Replies, saved[0])
|
|
m.messages[i] = saved[0]
|
|
} else {
|
|
// message has no id and no previous messages to add it to
|
|
// this shouldn't happen?
|
|
return fmt.Errorf("Error: no messages to reply to")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (m *Model) generateConversationTitle() tea.Cmd {
|
|
return func() tea.Msg {
|
|
title, err := cmdutil.GenerateTitle(m.State.Ctx, m.messages)
|
|
if err != nil {
|
|
return shared.MsgError(err)
|
|
}
|
|
return msgConversationTitleChanged(title)
|
|
}
|
|
}
|
|
|
|
func (m *Model) waitForReply() tea.Cmd {
|
|
return func() tea.Msg {
|
|
return msgAssistantReply(<-m.replyChan)
|
|
}
|
|
}
|
|
|
|
func (m *Model) waitForChunk() tea.Cmd {
|
|
return func() tea.Msg {
|
|
return msgResponseChunk(<-m.replyChunkChan)
|
|
}
|
|
}
|
|
|
|
func (m *Model) promptLLM() tea.Cmd {
|
|
m.waitingForReply = true
|
|
m.replyCursor.Blink = false
|
|
m.status = "Press ctrl+c to cancel"
|
|
|
|
toPrompt := m.messages
|
|
|
|
// Add response placeholder message
|
|
if m.messages[len(m.messages)-1].Role != models.MessageRoleAssistant {
|
|
m.addMessage(models.Message{
|
|
Role: models.MessageRoleAssistant,
|
|
Content: "",
|
|
})
|
|
}
|
|
|
|
m.tokenCount = 0
|
|
m.startTime = time.Now()
|
|
m.elapsed = 0
|
|
|
|
return func() tea.Msg {
|
|
completionProvider, err := m.State.Ctx.GetCompletionProvider(*m.State.Ctx.Config.Defaults.Model)
|
|
if err != nil {
|
|
return shared.MsgError(err)
|
|
}
|
|
|
|
requestParams := models.RequestParameters{
|
|
Model: *m.State.Ctx.Config.Defaults.Model,
|
|
MaxTokens: *m.State.Ctx.Config.Defaults.MaxTokens,
|
|
Temperature: *m.State.Ctx.Config.Defaults.Temperature,
|
|
ToolBag: m.State.Ctx.EnabledTools,
|
|
}
|
|
|
|
replyHandler := func(msg models.Message) {
|
|
m.replyChan <- msg
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
canceled := false
|
|
go func() {
|
|
select {
|
|
case <-m.stopSignal:
|
|
canceled = true
|
|
cancel()
|
|
}
|
|
}()
|
|
|
|
resp, err := completionProvider.CreateChatCompletionStream(
|
|
ctx, requestParams, toPrompt, replyHandler, m.replyChunkChan,
|
|
)
|
|
|
|
if err != nil && !canceled {
|
|
return msgResponseError(err)
|
|
}
|
|
|
|
return msgResponseEnd(resp)
|
|
}
|
|
}
|