Compare commits

..

27 Commits

Author SHA1 Message Date
cf46088762 tui: update lodos 2024-03-14 06:01:42 +00:00
c4b78aa0c6 tui: add response waiting spinner 2024-03-14 06:01:42 +00:00
377a4f1dfa tui: add LLM response error handling
+ various other small tweaks
2024-03-14 06:01:42 +00:00
000a2ec6f2 tui: add a "scroll bar" and error view 2024-03-14 06:01:42 +00:00
387dd7534c tui: generate titles for conversations 2024-03-14 06:01:42 +00:00
c14541577e tui: persist new conversations as well 2024-03-14 06:01:42 +00:00
213e36f652 tui: add reply persistence 2024-03-14 06:01:42 +00:00
9e02277ee7 tui: improve footer rendering
Made it easier to add segmemts later, better handling of padding
2024-03-14 06:01:42 +00:00
a96eac91b3 tui: slight function order change 2024-03-14 06:01:42 +00:00
ccf2353a0b tui: cache highlighted messages
Syntax highlighting is fairly expensive, and this means we no longer
need to do syntax highlighting on the entire conversaion each time a new
message chunk is received
2024-03-14 06:01:42 +00:00
51e6f6ebf6 tui: adjust message header styling 2024-03-14 06:01:42 +00:00
6cb8d03c5b tui: style tweaks 2024-03-14 06:01:42 +00:00
50ad7d9ec6 tui: add contentStyle, applied to overall viewport content 2024-03-14 06:01:42 +00:00
5e26ee3373 tui: update TODO 2024-03-14 06:01:42 +00:00
8bc2523c17 tui: fix conversation loading 2024-03-14 06:01:42 +00:00
a06ac694c6 tui: use EnabledTools from lmcli.Context 2024-03-14 06:01:42 +00:00
00eb57820f tui: styling tweak 2024-03-14 06:01:42 +00:00
d1f10d2cfc tui: add header with title 2024-03-14 06:01:42 +00:00
1bd6baa837 tui: handle multi part responses 2024-03-14 06:01:42 +00:00
8613719b58 tui: scroll content view with output
clean up msgResponseChunk handling
2024-03-14 06:01:42 +00:00
51de2b7079 tui: ability to cancel request in flight 2024-03-14 06:01:42 +00:00
fe5baf58e3 tui: add focus switching between input/messages view 2024-03-14 06:01:42 +00:00
0ebfd39297 tui: removed confirm before send, dynamic footer
footer now rendered based on model data, instead of being set to a fixed
string
2024-03-14 06:01:42 +00:00
780c34a7ef tui: use ctx chroma highlighter 2024-03-14 06:01:42 +00:00
6bf2f1bb43 Add initial TUI 2024-03-14 06:01:42 +00:00
ec1f326c2a Add store.AddReply 2024-03-14 06:01:42 +00:00
db116660a5 Removed tool usage logging to stdout 2024-03-14 06:01:42 +00:00
3 changed files with 428 additions and 96 deletions

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
"time"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model" "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
sqids "github.com/sqids/sqids-go" sqids "github.com/sqids/sqids-go"
@ -26,6 +27,7 @@ type ConversationStore interface {
SaveMessage(message *model.Message) error SaveMessage(message *model.Message) error
DeleteMessage(message *model.Message) error DeleteMessage(message *model.Message) error
UpdateMessage(message *model.Message) error UpdateMessage(message *model.Message) error
AddReply(conversation *model.Conversation, message model.Message) (*model.Message, error)
} }
type SQLStore struct { type SQLStore struct {
@ -119,3 +121,12 @@ func (s *SQLStore) LastMessage(conversation *model.Conversation) (*model.Message
err := s.db.Where("conversation_id = ?", conversation.ID).Last(&message).Error err := s.db.Where("conversation_id = ?", conversation.ID).Last(&message).Error
return &message, err return &message, err
} }
// AddReply adds the given messages as a reply to the given conversation, can be
// used to easily copy a message associated with one conversation, to another
func (s *SQLStore) AddReply(c *model.Conversation, m model.Message) (*model.Message, error) {
m.ConversationID = c.ID
m.ID = 0
m.CreatedAt = time.Time{}
return &m, s.SaveMessage(&m)
}

View File

@ -2,7 +2,6 @@ package tools
import ( import (
"fmt" "fmt"
"os"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model" "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
) )
@ -29,9 +28,6 @@ func ExecuteToolCalls(toolCalls []model.ToolCall, toolBag []model.Tool) ([]model
return nil, fmt.Errorf("Requested tool '%s' does not exist. Hallucination?", toolCall.Name) return nil, fmt.Errorf("Requested tool '%s' does not exist. Hallucination?", toolCall.Name)
} }
// TODO: ability to silence this
fmt.Fprintf(os.Stderr, "\nINFO: Executing tool '%s' with args %s\n", toolCall.Name, toolCall.Parameters)
// Execute the tool // Execute the tool
result, err := tool.Impl(tool, toolCall.Parameters) result, err := tool.Impl(tool, toolCall.Parameters)
if err != nil { if err != nil {

View File

@ -3,15 +3,23 @@ package tui
// The terminal UI for lmcli, launched from the `lmcli chat` command // The terminal UI for lmcli, launched from the `lmcli chat` command
// TODO: // TODO:
// - binding to open selected message/input in $EDITOR // - binding to open selected message/input in $EDITOR
// - ability to continue or retry previous response
// - conversation list view
// - change model
// - rename conversation
// - set system prompt
// - system prompt library?
import ( import (
"context" "context"
"fmt" "fmt"
"strings" "strings"
"time"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli" "git.mlow.ca/mlow/lmcli/pkg/lmcli"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model" models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools" "github.com/charmbracelet/bubbles/spinner"
"github.com/charmbracelet/bubbles/textarea" "github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport" "github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea" tea "github.com/charmbracelet/bubbletea"
@ -26,6 +34,9 @@ const (
) )
type model struct { type model struct {
width int
height int
ctx *lmcli.Context ctx *lmcli.Context
convShortname string convShortname string
@ -33,17 +44,21 @@ type model struct {
conversation *models.Conversation conversation *models.Conversation
messages []models.Message messages []models.Message
waitingForReply bool waitingForReply bool
replyChan chan string replyChan chan models.Message
replyChunkChan chan string
replyCancelFunc context.CancelFunc replyCancelFunc context.CancelFunc
err error err error
persistence bool // whether we will save new messages in the conversation
// ui state // ui state
focus focusState focus focusState
status string // a general status message status string // a general status message
highlightCache []string // a cache of syntax highlighted message content
// ui elements // ui elements
content viewport.Model content viewport.Model
input textarea.Model input textarea.Model
spinner spinner.Model
} }
type message struct { type message struct {
@ -57,8 +72,14 @@ type (
msgResponseChunk string msgResponseChunk string
// sent when response is finished being received // sent when response is finished being received
msgResponseEnd string msgResponseEnd string
// a special case of msgError that stops the response waiting animation
msgResponseError error
// sent on each completed reply
msgAssistantReply models.Message
// sent when a conversation is (re)loaded // sent when a conversation is (re)loaded
msgConversationLoaded *models.Conversation msgConversationLoaded *models.Conversation
// sent when a new conversation title is set
msgConversationTitleChanged string
// send when a conversation's messages are laoded // send when a conversation's messages are laoded
msgMessagesLoaded []models.Message msgMessagesLoaded []models.Message
// sent when an error occurs // sent when an error occurs
@ -67,10 +88,13 @@ type (
// styles // styles
var ( var (
inputStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#ff0000")) userStyle = lipgloss.NewStyle().Faint(true).Bold(true).Foreground(lipgloss.Color("10"))
contentStyle = lipgloss.NewStyle().PaddingLeft(2) assistantStyle = lipgloss.NewStyle().Faint(true).Bold(true).Foreground(lipgloss.Color("12"))
userStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("10")) messageStyle = lipgloss.NewStyle().PaddingLeft(1)
assistantStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("12")) headerStyle = lipgloss.NewStyle().
Background(lipgloss.Color("0"))
contentStyle = lipgloss.NewStyle().
Padding(1)
footerStyle = lipgloss.NewStyle(). footerStyle = lipgloss.NewStyle().
BorderTop(true). BorderTop(true).
BorderStyle(lipgloss.NormalBorder()) BorderStyle(lipgloss.NormalBorder())
@ -79,13 +103,21 @@ var (
func (m model) Init() tea.Cmd { func (m model) Init() tea.Cmd {
return tea.Batch( return tea.Batch(
textarea.Blink, textarea.Blink,
m.spinner.Tick,
m.loadConversation(m.convShortname), m.loadConversation(m.convShortname),
waitForChunk(m.replyChan), m.waitForChunk(),
m.waitForReply(),
) )
} }
func wrapError(err error) tea.Cmd {
return func() tea.Msg {
return msgError(err)
}
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd var cmds []tea.Cmd
switch msg := msg.(type) { switch msg := msg.(type) {
case tea.KeyMsg: case tea.KeyMsg:
@ -96,6 +128,8 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
} else { } else {
return m, tea.Quit return m, tea.Quit
} }
case "ctrl+p":
m.persistence = !m.persistence
case "q": case "q":
if m.focus != focusInput { if m.focus != focusInput {
return m, tea.Quit return m, tea.Quit
@ -113,70 +147,256 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
} }
} }
case tea.WindowSizeMsg: case tea.WindowSizeMsg:
m.width = msg.Width
m.height = msg.Height
m.content.Width = msg.Width m.content.Width = msg.Width
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView()) m.content.Height = msg.Height - m.getFixedComponentHeight()
m.input.SetWidth(msg.Width - 1) m.input.SetWidth(msg.Width - 1)
m.updateContent() m.updateContent()
case msgConversationLoaded: case msgConversationLoaded:
c := (*models.Conversation)(msg) m.conversation = (*models.Conversation)(msg)
cmd = m.loadMessages(c) cmds = append(cmds, m.loadMessages(m.conversation))
case msgMessagesLoaded: case msgMessagesLoaded:
m.messages = []models.Message(msg) m.setMessages(msg)
m.updateContent() m.updateContent()
case msgResponseChunk: case msgResponseChunk:
chunk := string(msg) chunk := string(msg)
if len(m.messages) > 0 { last := len(m.messages) - 1
i := len(m.messages) - 1 if last >= 0 && m.messages[last].Role == models.MessageRoleAssistant {
switch m.messages[i].Role { m.setMessageContents(last, m.messages[last].Content+chunk)
case models.MessageRoleAssistant: } else {
m.messages[i].Content += chunk m.addMessage(models.Message{
default:
m.messages = append(m.messages, models.Message{
Role: models.MessageRoleAssistant, Role: models.MessageRoleAssistant,
Content: chunk, Content: chunk,
}) })
} }
m.updateContent() m.updateContent()
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
case msgAssistantReply:
// the last reply that was being worked on is finished
reply := models.Message(msg)
last := len(m.messages) - 1
if last < 0 {
panic("Unexpected messages length handling msgReply")
} }
cmd = waitForChunk(m.replyChan) // wait for the next chunk if reply.Role == models.MessageRoleToolCall && m.messages[last].Role == models.MessageRoleAssistant {
m.setMessage(last, reply)
} else if reply.Role != models.MessageRoleAssistant {
m.addMessage(reply)
}
if m.persistence {
var err error
if m.conversation.ID == 0 {
err = m.ctx.Store.SaveConversation(m.conversation)
}
if err != nil {
cmds = append(cmds, wrapError(err))
} else {
cmds = append(cmds, m.persistRecentMessages())
}
}
if m.conversation.Title == "" {
cmds = append(cmds, m.generateConversationTitle())
}
m.updateContent()
cmds = append(cmds, m.waitForReply())
case msgResponseEnd: case msgResponseEnd:
m.replyCancelFunc = nil m.replyCancelFunc = nil
m.waitingForReply = false m.waitingForReply = false
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
case msgResponseError:
m.replyCancelFunc = nil
m.waitingForReply = false
m.status = "Press ctrl+s to send"
m.err = error(msg)
case msgConversationTitleChanged:
title := string(msg)
m.conversation.Title = title
if m.persistence {
err := m.ctx.Store.SaveConversation(m.conversation)
if err != nil {
cmds = append(cmds, wrapError(err))
}
}
case msgError:
m.err = error(msg)
} }
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
if cmd != nil { if cmd != nil {
return m, cmd cmds = append(cmds, cmd)
} }
inputCaptured := false
m.input, cmd = m.input.Update(msg) m.input, cmd = m.input.Update(msg)
if cmd != nil { if cmd != nil {
return m, cmd inputCaptured = true
cmds = append(cmds, cmd)
} }
if !inputCaptured {
m.content, cmd = m.content.Update(msg) m.content, cmd = m.content.Update(msg)
if cmd != nil { if cmd != nil {
return m, cmd cmds = append(cmds, cmd)
}
} }
return m, cmd return m, tea.Batch(cmds...)
} }
func (m model) View() string { func (m model) View() string {
if m.content.Width == 0 {
// this is the case upon initial startup, but it's also a safe bet that
// we can just skip rendering if the terminal is really 0 width...
// without this, the below view functions may do weird things
return ""
}
m.content.Height = m.height - m.getFixedComponentHeight()
sections := make([]string, 0, 6)
error := m.errorView()
scrollbar := m.scrollbarView()
sections = append(sections, m.headerView())
if scrollbar != "" {
sections = append(sections, scrollbar)
}
sections = append(sections, m.contentView())
if error != "" {
sections = append(sections, error)
}
sections = append(sections, m.inputView())
sections = append(sections, m.footerView())
return lipgloss.JoinVertical( return lipgloss.JoinVertical(
lipgloss.Left, lipgloss.Left,
m.content.View(), sections...,
m.inputView(),
m.footerView(),
) )
} }
func (m *model) getFixedComponentHeight() int {
h := 0
h += m.input.Height()
h += lipgloss.Height(m.headerView())
h += lipgloss.Height(m.footerView())
scrollbar := m.scrollbarView()
if scrollbar != "" {
h += lipgloss.Height(scrollbar)
}
errorView := m.errorView()
if errorView != "" {
h += lipgloss.Height(errorView)
}
return h
}
func (m *model) headerView() string {
titleStyle := lipgloss.NewStyle().
PaddingLeft(1).
PaddingRight(1).
Bold(true)
var title string
if m.conversation != nil && m.conversation.Title != "" {
title = m.conversation.Title
} else {
title = "Untitled"
}
part := titleStyle.Render(title)
return headerStyle.Width(m.width).Render(part)
}
func (m *model) contentView() string {
return m.content.View()
}
func (m *model) errorView() string {
if m.err == nil {
return ""
}
return lipgloss.NewStyle().
Width(m.width).
AlignHorizontal(lipgloss.Center).
Bold(true).
Foreground(lipgloss.Color("1")).
Render(fmt.Sprintf("%s", m.err))
}
func (m *model) scrollbarView() string {
if m.content.AtTop() {
return ""
}
count := int(m.content.ScrollPercent() * float64(m.width-2))
fill := strings.Repeat("-", count)
return lipgloss.NewStyle().
Width(m.width).
PaddingLeft(1).
PaddingRight(1).
Render(fill)
}
func (m *model) inputView() string {
return m.input.View()
}
func (m *model) footerView() string {
segmentStyle := lipgloss.NewStyle().PaddingLeft(1).PaddingRight(1).Faint(true)
segmentSeparator := "|"
saving := ""
if m.persistence {
saving = segmentStyle.Copy().Bold(true).Foreground(lipgloss.Color("2")).Render("✅💾")
} else {
saving = segmentStyle.Copy().Bold(true).Foreground(lipgloss.Color("1")).Render("❌💾")
}
status := m.status
if m.waitingForReply {
status += m.spinner.View()
}
leftSegments := []string{
saving,
segmentStyle.Render(status),
}
rightSegments := []string{
segmentStyle.Render(fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)),
}
left := strings.Join(leftSegments, segmentSeparator)
right := strings.Join(rightSegments, segmentSeparator)
totalWidth := lipgloss.Width(left) + lipgloss.Width(right)
remaining := m.width - totalWidth
var padding string
if remaining > 0 {
padding = strings.Repeat(" ", remaining)
}
footer := left + padding + right
if remaining < 0 {
ellipses := "... "
footer = footer[:m.width-len(ellipses)] + ellipses
}
return footerStyle.Render(footer)
}
func initialModel(ctx *lmcli.Context, convShortname string) model { func initialModel(ctx *lmcli.Context, convShortname string) model {
m := model{ m := model{
ctx: ctx, ctx: ctx,
convShortname: convShortname, convShortname: convShortname,
conversation: &models.Conversation{},
persistence: true,
replyChan: make(chan string), replyChan: make(chan models.Message),
replyChunkChan: make(chan string),
} }
m.content = viewport.New(0, 0) m.content = viewport.New(0, 0)
@ -186,9 +406,22 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle() m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.ShowLineNumbers = false m.input.ShowLineNumbers = false
m.input.SetHeight(4)
m.input.Focus() m.input.Focus()
m.updateContent() m.spinner = spinner.New(spinner.WithSpinner(
spinner.Spinner{
Frames: []string{
". ",
".. ",
"...",
".. ",
". ",
" ",
},
FPS: time.Second / 3,
},
))
m.waitingForReply = false m.waitingForReply = false
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
@ -214,17 +447,43 @@ func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
if strings.TrimSpace(userInput) == "" { if strings.TrimSpace(userInput) == "" {
return nil return nil
} }
m.input.SetValue("")
m.messages = append(m.messages, models.Message{ reply := models.Message{
Role: models.MessageRoleUser, Role: models.MessageRoleUser,
Content: userInput, Content: userInput,
}) }
if m.persistence {
var err error
if m.conversation.ID == 0 {
err = m.ctx.Store.SaveConversation(m.conversation)
}
if err != nil {
return wrapError(err)
}
// ensure all messages up to the one we're about to add are
// persistent
cmd := m.persistRecentMessages()
if cmd != nil {
return cmd
}
// persist our new message, returning with any possible errors
savedReply, err := m.ctx.Store.AddReply(m.conversation, reply)
if err != nil {
return wrapError(err)
}
reply = *savedReply
}
m.input.SetValue("")
m.addMessage(reply)
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
m.waitingForReply = true m.waitingForReply = true
m.status = "Waiting for response, press ctrl+c to cancel..." m.status = "Press ctrl+c to cancel"
return m.promptLLM() return m.promptLLM()
} }
return nil return nil
@ -237,10 +496,10 @@ func (m *model) loadConversation(shortname string) tea.Cmd {
} }
c, err := m.ctx.Store.ConversationByShortName(shortname) c, err := m.ctx.Store.ConversationByShortName(shortname)
if err != nil { if err != nil {
return msgError(fmt.Errorf("Could not lookup conversation: %v\n", err)) return msgError(fmt.Errorf("Could not lookup conversation: %v", err))
} }
if c.ID == 0 { if c.ID == 0 {
return msgError(fmt.Errorf("Conversation not found with short name: %s\n", shortname)) return msgError(fmt.Errorf("Conversation not found: %s", shortname))
} }
return msgConversationLoaded(c) return msgConversationLoaded(c)
} }
@ -256,9 +515,25 @@ func (m *model) loadMessages(c *models.Conversation) tea.Cmd {
} }
} }
func waitForChunk(ch chan string) tea.Cmd { func (m *model) waitForReply() tea.Cmd {
return func() tea.Msg { return func() tea.Msg {
return msgResponseChunk(<-ch) return msgAssistantReply(<-m.replyChan)
}
}
func (m *model) waitForChunk() tea.Cmd {
return func() tea.Msg {
return msgResponseChunk(<-m.replyChunkChan)
}
}
func (m *model) generateConversationTitle() tea.Cmd {
return func() tea.Msg {
title, err := cmdutil.GenerateTitle(m.ctx, m.conversation)
if err != nil {
return msgError(err)
}
return msgConversationTitleChanged(title)
} }
} }
@ -269,80 +544,130 @@ func (m *model) promptLLM() tea.Cmd {
return msgError(err) return msgError(err)
} }
var toolBag []models.Tool
for _, toolName := range *m.ctx.Config.Tools.EnabledTools {
tool, ok := tools.AvailableTools[toolName]
if ok {
toolBag = append(toolBag, tool)
}
}
requestParams := models.RequestParameters{ requestParams := models.RequestParameters{
Model: *m.ctx.Config.Defaults.Model, Model: *m.ctx.Config.Defaults.Model,
MaxTokens: *m.ctx.Config.Defaults.MaxTokens, MaxTokens: *m.ctx.Config.Defaults.MaxTokens,
Temperature: *m.ctx.Config.Defaults.Temperature, Temperature: *m.ctx.Config.Defaults.Temperature,
ToolBag: toolBag, ToolBag: m.ctx.EnabledTools,
}
replyHandler := func(msg models.Message) {
m.replyChan <- msg
} }
ctx, replyCancelFunc := context.WithCancel(context.Background()) ctx, replyCancelFunc := context.WithCancel(context.Background())
m.replyCancelFunc = replyCancelFunc m.replyCancelFunc = replyCancelFunc
// TODO: supply a reply callback and handle error resp, err := completionProvider.CreateChatCompletionStream(
resp, _ := completionProvider.CreateChatCompletionStream( ctx, requestParams, m.messages, replyHandler, m.replyChunkChan,
ctx, requestParams, m.messages, nil, m.replyChan,
) )
if err != nil {
return msgResponseError(err)
}
return msgResponseEnd(resp) return msgResponseEnd(resp)
} }
} }
func (m *model) persistRecentMessages() tea.Cmd {
for i, msg := range m.messages {
if msg.ID > 0 {
continue
}
newMessage, err := m.ctx.Store.AddReply(m.conversation, msg)
if err != nil {
return wrapError(err)
}
m.setMessage(i, *newMessage)
}
return nil
}
func (m *model) setMessages(messages []models.Message) {
m.messages = messages
m.highlightCache = make([]string, len(messages))
for i, msg := range m.messages {
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.highlightCache[i] = highlighted
}
}
func (m *model) setMessage(i int, msg models.Message) {
if i >= len(m.messages) {
panic("i out of range")
}
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.messages[i] = msg
m.highlightCache[i] = highlighted
}
func (m *model) addMessage(msg models.Message) {
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.messages = append(m.messages, msg)
m.highlightCache = append(m.highlightCache, highlighted)
}
func (m *model) setMessageContents(i int, content string) {
if i >= len(m.messages) {
panic("i out of range")
}
highlighted, _ := m.ctx.Chroma.HighlightS(content)
m.messages[i].Content = content
m.highlightCache[i] = highlighted
}
// render the conversation into the main viewport
func (m *model) updateContent() { func (m *model) updateContent() {
sb := strings.Builder{} sb := strings.Builder{}
msgCnt := len(m.messages) msgCnt := len(m.messages)
for i, message := range m.messages { for i, message := range m.messages {
var style lipgloss.Style icon := "⚙️"
if message.Role == models.MessageRoleUser { friendly := message.Role.FriendlyRole()
style := lipgloss.NewStyle().Bold(true).Faint(true)
switch message.Role {
case models.MessageRoleUser:
icon = ""
style = userStyle style = userStyle
} else { case models.MessageRoleAssistant:
icon = ""
style = assistantStyle style = assistantStyle
case models.MessageRoleToolCall:
case models.MessageRoleToolResult:
icon = "🔧"
} }
sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role.FriendlyRole())))) var saved string
if message.ID == 0 {
saved = lipgloss.NewStyle().Faint(true).Render(" (not saved)")
}
// write message heading with space for content
header := fmt.Sprintf("%s\n\n", style.Render(icon+friendly)+saved)
sb.WriteString(header)
// TODO: special rendering for tool calls/results?
// write message contents
var highlighted string
if m.highlightCache[i] == "" {
highlighted = message.Content
} else {
highlighted = m.highlightCache[i]
}
contents := messageStyle.Width(m.content.Width - 5).Render(highlighted)
sb.WriteString(contents)
highlighted, _ := m.ctx.Chroma.HighlightS(message.Content)
sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted))
if i < msgCnt-1 { if i < msgCnt-1 {
sb.WriteString("\n\n") sb.WriteString("\n\n")
} }
} }
m.content.SetContent(sb.String()) atBottom := m.content.AtBottom()
m.content.SetContent(contentStyle.Render(sb.String()))
if atBottom {
// if we were at bottom before the update, scroll with the output
m.content.GotoBottom()
} }
func (m model) inputView() string {
var inputView string
if m.waitingForReply {
inputView = inputStyle.Faint(true).Render(m.input.View())
} else {
inputView = inputStyle.Render(m.input.View())
}
return inputView
}
func (m model) footerView() string {
left := m.status
right := fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)
totalWidth := lipgloss.Width(left + right)
var padding string
if m.content.Width-totalWidth > 0 {
padding = strings.Repeat(" ", m.content.Width-totalWidth)
} else {
padding = ""
}
footer := lipgloss.JoinHorizontal(lipgloss.Center, left, padding, right)
return footerStyle.Width(m.content.Width).Render(footer)
} }
func Launch(ctx *lmcli.Context, convShortname string) error { func Launch(ctx *lmcli.Context, convShortname string) error {