tui: cache highlighted messages

Syntax highlighting is fairly expensive, and this means we no longer
need to do syntax highlighting on the entire conversaion each time a new
message chunk is received
This commit is contained in:
Matt Low 2024-03-13 16:29:06 +00:00
parent 51e6f6ebf6
commit ccf2353a0b

View File

@ -41,8 +41,9 @@ type model struct {
err error
// ui state
focus focusState
status string // a general status message
focus focusState
status string // a general status message
highlightCache []string // a cache of syntax highlighted message content
// ui elements
content viewport.Model
@ -132,15 +133,15 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
m.conversation = (*models.Conversation)(msg)
cmd = m.loadMessages(m.conversation)
case msgMessagesLoaded:
m.messages = []models.Message(msg)
m.setMessages(msg)
m.updateContent()
case msgResponseChunk:
chunk := string(msg)
last := len(m.messages) - 1
if last >= 0 && m.messages[last].Role == models.MessageRoleAssistant {
m.messages[last].Content += chunk
m.setMessageContents(last, m.messages[last].Content+chunk)
} else {
m.messages = append(m.messages, models.Message{
m.addMessage(models.Message{
Role: models.MessageRoleAssistant,
Content: chunk,
})
@ -155,9 +156,9 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
panic("Unexpected messages length handling msgReply")
}
if reply.Role == models.MessageRoleToolCall && m.messages[last].Role == models.MessageRoleAssistant {
m.messages[last] = reply
m.setMessage(last, reply)
} else if reply.Role != models.MessageRoleAssistant {
m.messages = append(m.messages, reply)
m.addMessage(reply)
}
m.updateContent()
cmd = m.waitForReply()
@ -239,7 +240,7 @@ func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
return nil
}
m.input.SetValue("")
m.messages = append(m.messages, models.Message{
m.addMessage(models.Message{
Role: models.MessageRoleUser,
Content: userInput,
})
@ -322,6 +323,40 @@ func (m *model) promptLLM() tea.Cmd {
}
}
func (m *model) setMessages(messages []models.Message) {
m.messages = messages
m.highlightCache = make([]string, len(messages))
for i, msg := range m.messages {
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.highlightCache[i] = highlighted
}
}
func (m *model) setMessage(i int, msg models.Message) {
if i >= len(m.messages) {
panic("i out of range")
}
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.messages[i] = msg
m.highlightCache[i] = highlighted
}
func (m *model) addMessage(msg models.Message) {
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.messages = append(m.messages, msg)
m.highlightCache = append(m.highlightCache, highlighted)
}
func (m *model) setMessageContents(i int, content string) {
if i >= len(m.messages) {
panic("i out of range")
}
highlighted, _ := m.ctx.Chroma.HighlightS(content)
m.messages[i].Content = content
m.highlightCache[i] = highlighted
}
// render the conversation into the main viewport
func (m *model) updateContent() {
sb := strings.Builder{}
msgCnt := len(m.messages)
@ -342,10 +377,22 @@ func (m *model) updateContent() {
icon = "🔧"
}
sb.WriteString(fmt.Sprintf("%s\n\n", style.Render(icon + friendly)))
// write message heading with space for content
header := fmt.Sprintf("%s\n\n", style.Render(icon+friendly))
sb.WriteString(header)
// TODO: render something for tool calls/results?
// write message contents
var highlighted string
if m.highlightCache[i] == "" {
highlighted = message.Content
} else {
highlighted = m.highlightCache[i]
}
contents := messageStyle.Width(m.content.Width - 5).Render(highlighted)
sb.WriteString(contents)
highlighted, _ := m.ctx.Chroma.HighlightS(message.Content)
sb.WriteString(messageStyle.Width(m.content.Width - 5).Render(highlighted))
if i < msgCnt-1 {
sb.WriteString("\n\n")
}
@ -389,7 +436,6 @@ func (m *model) footerView() string {
}
footer := lipgloss.JoinHorizontal(lipgloss.Center, left, padding, right)
return footerStyle.Width(m.content.Width).Render(footer)
}