package tui // The terminal UI for lmcli, launched from the `lmcli chat` command // TODO: // - binding to open selected message/input in $EDITOR // - ability to continue or retry previous response // - conversation list view // - change model // - rename conversation // - set system prompt // - system prompt library? import ( "context" "fmt" "strings" "time" cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util" "git.mlow.ca/mlow/lmcli/pkg/lmcli" models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model" "github.com/charmbracelet/bubbles/spinner" "github.com/charmbracelet/bubbles/textarea" "github.com/charmbracelet/bubbles/viewport" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" ) type focusState int const ( focusInput focusState = iota focusMessages ) type model struct { width int height int ctx *lmcli.Context convShortname string // application state conversation *models.Conversation messages []models.Message waitingForReply bool replyChan chan models.Message replyChunkChan chan string replyCancelFunc context.CancelFunc err error persistence bool // whether we will save new messages in the conversation // ui state focus focusState status string // a general status message highlightCache []string // a cache of syntax highlighted message content // ui elements content viewport.Model input textarea.Model spinner spinner.Model } type message struct { role string content string } // custom tea.Msg types type ( // sent on each chunk received from LLM msgResponseChunk string // sent when response is finished being received msgResponseEnd string // a special case of msgError that stops the response waiting animation msgResponseError error // sent on each completed reply msgAssistantReply models.Message // sent when a conversation is (re)loaded msgConversationLoaded *models.Conversation // sent when a new conversation title is set msgConversationTitleChanged string // send when a conversation's messages are laoded msgMessagesLoaded []models.Message // sent when an error occurs msgError error ) // styles var ( userStyle = lipgloss.NewStyle().Faint(true).Bold(true).Foreground(lipgloss.Color("10")) assistantStyle = lipgloss.NewStyle().Faint(true).Bold(true).Foreground(lipgloss.Color("12")) messageStyle = lipgloss.NewStyle().PaddingLeft(1) headerStyle = lipgloss.NewStyle(). Background(lipgloss.Color("0")) contentStyle = lipgloss.NewStyle(). Padding(1) footerStyle = lipgloss.NewStyle(). BorderTop(true). BorderStyle(lipgloss.NormalBorder()) ) func (m model) Init() tea.Cmd { return tea.Batch( textarea.Blink, m.spinner.Tick, m.loadConversation(m.convShortname), m.waitForChunk(), m.waitForReply(), ) } func wrapError(err error) tea.Cmd { return func() tea.Msg { return msgError(err) } } func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { var cmds []tea.Cmd switch msg := msg.(type) { case tea.KeyMsg: switch msg.String() { case "ctrl+c": if m.waitingForReply { m.replyCancelFunc() } else { return m, tea.Quit } case "ctrl+p": m.persistence = !m.persistence case "q": if m.focus != focusInput { return m, tea.Quit } default: var inputHandled tea.Cmd switch m.focus { case focusInput: inputHandled = m.handleInputKey(msg) case focusMessages: inputHandled = m.handleMessagesKey(msg) } if inputHandled != nil { return m, inputHandled } } case tea.WindowSizeMsg: m.width = msg.Width m.height = msg.Height m.content.Width = msg.Width m.content.Height = msg.Height - m.getFixedComponentHeight() m.input.SetWidth(msg.Width - 1) m.updateContent() case msgConversationLoaded: m.conversation = (*models.Conversation)(msg) cmds = append(cmds, m.loadMessages(m.conversation)) case msgMessagesLoaded: m.setMessages(msg) m.updateContent() case msgResponseChunk: chunk := string(msg) last := len(m.messages) - 1 if last >= 0 && m.messages[last].Role == models.MessageRoleAssistant { m.setMessageContents(last, m.messages[last].Content+chunk) } else { m.addMessage(models.Message{ Role: models.MessageRoleAssistant, Content: chunk, }) } m.updateContent() cmds = append(cmds, m.waitForChunk()) // wait for the next chunk case msgAssistantReply: // the last reply that was being worked on is finished reply := models.Message(msg) last := len(m.messages) - 1 if last < 0 { panic("Unexpected messages length handling msgReply") } if reply.Role == models.MessageRoleToolCall && m.messages[last].Role == models.MessageRoleAssistant { m.setMessage(last, reply) } else if reply.Role != models.MessageRoleAssistant { m.addMessage(reply) } if m.persistence { var err error if m.conversation.ID == 0 { err = m.ctx.Store.SaveConversation(m.conversation) } if err != nil { cmds = append(cmds, wrapError(err)) } else { cmds = append(cmds, m.persistRecentMessages()) } } if m.conversation.Title == "" { cmds = append(cmds, m.generateConversationTitle()) } m.updateContent() cmds = append(cmds, m.waitForReply()) case msgResponseEnd: m.replyCancelFunc = nil m.waitingForReply = false m.status = "Press ctrl+s to send" case msgResponseError: m.replyCancelFunc = nil m.waitingForReply = false m.status = "Press ctrl+s to send" m.err = error(msg) case msgConversationTitleChanged: title := string(msg) m.conversation.Title = title if m.persistence { err := m.ctx.Store.SaveConversation(m.conversation) if err != nil { cmds = append(cmds, wrapError(err)) } } case msgError: m.err = error(msg) } var cmd tea.Cmd m.spinner, cmd = m.spinner.Update(msg) if cmd != nil { cmds = append(cmds, cmd) } inputCaptured := false m.input, cmd = m.input.Update(msg) if cmd != nil { inputCaptured = true cmds = append(cmds, cmd) } if !inputCaptured { m.content, cmd = m.content.Update(msg) if cmd != nil { cmds = append(cmds, cmd) } } return m, tea.Batch(cmds...) } func (m model) View() string { if m.width == 0 { // this is the case upon initial startup, but it's also a safe bet that // we can just skip rendering if the terminal is really 0 width... // without this, the m.*View() functions may crash return "" } sections := make([]string, 0, 6) sections = append(sections, m.headerView()) sections = append(sections, m.contentView()) error := m.errorView() if error != "" { sections = append(sections, error) } sections = append(sections, m.inputView()) sections = append(sections, m.footerView()) return lipgloss.JoinVertical( lipgloss.Left, sections..., ) } // returns the total height of "fixed" components, which are those which don't // change height dependent on window size. func (m *model) getFixedComponentHeight() int { h := 0 h += m.input.Height() h += lipgloss.Height(m.headerView()) h += lipgloss.Height(m.footerView()) errorView := m.errorView() if errorView != "" { h += lipgloss.Height(errorView) } return h } func (m *model) headerView() string { titleStyle := lipgloss.NewStyle(). PaddingLeft(1). PaddingRight(1). Bold(true) var title string if m.conversation != nil && m.conversation.Title != "" { title = m.conversation.Title } else { title = "Untitled" } part := titleStyle.Render(title) return headerStyle.Width(m.width).Render(part) } func (m *model) contentView() string { return m.content.View() } func (m *model) errorView() string { if m.err == nil { return "" } return lipgloss.NewStyle(). Width(m.width). AlignHorizontal(lipgloss.Center). Bold(true). Foreground(lipgloss.Color("1")). Render(fmt.Sprintf("%s", m.err)) } func (m *model) inputView() string { return m.input.View() } func (m *model) footerView() string { segmentStyle := lipgloss.NewStyle().PaddingLeft(1).PaddingRight(1).Faint(true) segmentSeparator := "|" savingStyle := segmentStyle.Copy().Bold(true) saving := "" if m.persistence { saving = savingStyle.Foreground(lipgloss.Color("2")).Render("✅💾") } else { saving = savingStyle.Foreground(lipgloss.Color("1")).Render("❌💾") } status := m.status if m.waitingForReply { status += m.spinner.View() } leftSegments := []string{ saving, segmentStyle.Render(status), } rightSegments := []string{ segmentStyle.Render(fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)), } left := strings.Join(leftSegments, segmentSeparator) right := strings.Join(rightSegments, segmentSeparator) totalWidth := lipgloss.Width(left) + lipgloss.Width(right) remaining := m.width - totalWidth var padding string if remaining > 0 { padding = strings.Repeat(" ", remaining) } footer := left + padding + right if remaining < 0 { ellipses := "... " // this doesn't work very well, due to trying to trim a string with // ansii chars already in it footer = footer[:(len(footer)+remaining)-len(ellipses)-3] + ellipses } return footerStyle.Width(m.width).Render(footer) } func initialModel(ctx *lmcli.Context, convShortname string) model { m := model{ ctx: ctx, convShortname: convShortname, conversation: &models.Conversation{}, persistence: true, replyChan: make(chan models.Message), replyChunkChan: make(chan string), } m.content = viewport.New(0, 0) m.input = textarea.New() m.input.Placeholder = "Enter a message" m.input.FocusedStyle.CursorLine = lipgloss.NewStyle() m.input.ShowLineNumbers = false m.input.SetHeight(4) m.input.Focus() m.spinner = spinner.New(spinner.WithSpinner( spinner.Spinner{ Frames: []string{ ". ", ".. ", "...", ".. ", ". ", " ", }, FPS: time.Second / 3, }, )) m.waitingForReply = false m.status = "Press ctrl+s to send" return m } func (m *model) handleMessagesKey(msg tea.KeyMsg) tea.Cmd { switch msg.String() { case "tab": m.focus = focusInput m.input.Focus() } return nil } func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd { switch msg.String() { case "esc": m.focus = focusMessages m.input.Blur() case "ctrl+s": userInput := strings.TrimSpace(m.input.Value()) if strings.TrimSpace(userInput) == "" { return nil } if len(m.messages) > 0 && m.messages[len(m.messages)-1].Role == models.MessageRoleUser { return wrapError(fmt.Errorf("Can't reply to a user message")) } reply := models.Message{ Role: models.MessageRoleUser, Content: userInput, } if m.persistence { var err error if m.conversation.ID == 0 { err = m.ctx.Store.SaveConversation(m.conversation) } if err != nil { return wrapError(err) } // ensure all messages up to the one we're about to add are // persistent cmd := m.persistRecentMessages() if cmd != nil { return cmd } // persist our new message, returning with any possible errors savedReply, err := m.ctx.Store.AddReply(m.conversation, reply) if err != nil { return wrapError(err) } reply = *savedReply } m.input.SetValue("") m.addMessage(reply) m.updateContent() m.content.GotoBottom() return m.promptLLM() case "ctrl+r": if len(m.messages) == 0 { return nil } // TODO: retry from selected message if m.messages[len(m.messages)-1].Role != models.MessageRoleUser { m.messages = m.messages[:len(m.messages)-1] m.updateContent() } m.content.GotoBottom() return m.promptLLM() } return nil } func (m *model) loadConversation(shortname string) tea.Cmd { return func() tea.Msg { if shortname == "" { return nil } c, err := m.ctx.Store.ConversationByShortName(shortname) if err != nil { return msgError(fmt.Errorf("Could not lookup conversation: %v", err)) } if c.ID == 0 { return msgError(fmt.Errorf("Conversation not found: %s", shortname)) } return msgConversationLoaded(c) } } func (m *model) loadMessages(c *models.Conversation) tea.Cmd { return func() tea.Msg { messages, err := m.ctx.Store.Messages(c) if err != nil { return msgError(fmt.Errorf("Could not load conversation messages: %v\n", err)) } return msgMessagesLoaded(messages) } } func (m *model) waitForReply() tea.Cmd { return func() tea.Msg { return msgAssistantReply(<-m.replyChan) } } func (m *model) waitForChunk() tea.Cmd { return func() tea.Msg { return msgResponseChunk(<-m.replyChunkChan) } } func (m *model) generateConversationTitle() tea.Cmd { return func() tea.Msg { title, err := cmdutil.GenerateTitle(m.ctx, m.conversation) if err != nil { return msgError(err) } return msgConversationTitleChanged(title) } } func (m *model) promptLLM() tea.Cmd { m.waitingForReply = true m.status = "Press ctrl+c to cancel" return func() tea.Msg { completionProvider, err := m.ctx.GetCompletionProvider(*m.ctx.Config.Defaults.Model) if err != nil { return msgError(err) } requestParams := models.RequestParameters{ Model: *m.ctx.Config.Defaults.Model, MaxTokens: *m.ctx.Config.Defaults.MaxTokens, Temperature: *m.ctx.Config.Defaults.Temperature, ToolBag: m.ctx.EnabledTools, } replyHandler := func(msg models.Message) { m.replyChan <- msg } ctx, replyCancelFunc := context.WithCancel(context.Background()) m.replyCancelFunc = replyCancelFunc resp, err := completionProvider.CreateChatCompletionStream( ctx, requestParams, m.messages, replyHandler, m.replyChunkChan, ) if err != nil { return msgResponseError(err) } return msgResponseEnd(resp) } } func (m *model) persistRecentMessages() tea.Cmd { for i, msg := range m.messages { if msg.ID > 0 { continue } newMessage, err := m.ctx.Store.AddReply(m.conversation, msg) if err != nil { return wrapError(err) } m.setMessage(i, *newMessage) } return nil } func (m *model) setMessages(messages []models.Message) { m.messages = messages m.highlightCache = make([]string, len(messages)) for i, msg := range m.messages { highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content) m.highlightCache[i] = highlighted } } func (m *model) setMessage(i int, msg models.Message) { if i >= len(m.messages) { panic("i out of range") } highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content) m.messages[i] = msg m.highlightCache[i] = highlighted } func (m *model) addMessage(msg models.Message) { highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content) m.messages = append(m.messages, msg) m.highlightCache = append(m.highlightCache, highlighted) } func (m *model) setMessageContents(i int, content string) { if i >= len(m.messages) { panic("i out of range") } highlighted, _ := m.ctx.Chroma.HighlightS(content) m.messages[i].Content = content m.highlightCache[i] = highlighted } // render the conversation into the main viewport func (m *model) updateContent() { sb := strings.Builder{} msgCnt := len(m.messages) for i, message := range m.messages { icon := "⚙️" friendly := message.Role.FriendlyRole() style := lipgloss.NewStyle().Bold(true).Faint(true) switch message.Role { case models.MessageRoleUser: icon = "" style = userStyle case models.MessageRoleAssistant: icon = "" style = assistantStyle case models.MessageRoleToolCall, models.MessageRoleToolResult: icon = "🔧" } var saved string if message.ID == 0 { saved = lipgloss.NewStyle().Faint(true).Render(" (not saved)") } // write message heading with space for content header := fmt.Sprintf("%s\n\n", style.Render(icon+friendly)+saved) sb.WriteString(header) // TODO: special rendering for tool calls/results? // write message contents var highlighted string if m.highlightCache[i] == "" { highlighted = message.Content } else { highlighted = m.highlightCache[i] } contents := messageStyle.Width(m.content.Width - 5).Render(highlighted) sb.WriteString(contents) if i < msgCnt-1 { sb.WriteString("\n\n") } } atBottom := m.content.AtBottom() m.content.SetContent(contentStyle.Render(sb.String())) if atBottom { // if we were at bottom before the update, scroll with the output m.content.GotoBottom() } } func Launch(ctx *lmcli.Context, convShortname string) error { p := tea.NewProgram(initialModel(ctx, convShortname), tea.WithAltScreen()) if _, err := p.Run(); err != nil { return fmt.Errorf("Error running program: %v", err) } return nil }