lmcli/pkg/tui/tui.go

534 lines
13 KiB
Go
Raw Normal View History

2024-03-12 01:10:54 -06:00
package tui
// The terminal UI for lmcli, launched from the `lmcli chat` command
// TODO:
// - binding to open selected message/input in $EDITOR
2024-03-12 23:15:50 -06:00
// - todo ability to continue or retry previous response
// - save messages to store
// - conversation list view
2024-03-12 01:10:54 -06:00
import (
"context"
"fmt"
"strings"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type focusState int
const (
focusInput focusState = iota
focusMessages
)
2024-03-12 01:10:54 -06:00
type model struct {
ctx *lmcli.Context
convShortname string
// application state
conversation *models.Conversation
messages []models.Message
waitingForReply bool
2024-03-12 20:05:48 -06:00
replyChan chan models.Message
replyChunkChan chan string
replyCancelFunc context.CancelFunc
err error
2024-03-13 15:20:03 -06:00
persistence bool // whether we will save new messages in the conversation
2024-03-12 01:10:54 -06:00
// ui state
focus focusState
status string // a general status message
highlightCache []string // a cache of syntax highlighted message content
2024-03-12 01:10:54 -06:00
// ui elements
content viewport.Model
input textarea.Model
}
type message struct {
role string
content string
}
// custom tea.Msg types
type (
// sent on each chunk received from LLM
msgResponseChunk string
// sent when response is finished being received
msgResponseEnd string
2024-03-12 20:05:48 -06:00
// sent on each completed reply
msgReply models.Message
2024-03-12 01:10:54 -06:00
// sent when a conversation is (re)loaded
msgConversationLoaded *models.Conversation
// send when a conversation's messages are laoded
msgMessagesLoaded []models.Message
// sent when an error occurs
msgError error
)
// styles
var (
2024-03-13 09:56:23 -06:00
userStyle = lipgloss.NewStyle().Faint(true).Bold(true).Foreground(lipgloss.Color("10"))
assistantStyle = lipgloss.NewStyle().Faint(true).Bold(true).Foreground(lipgloss.Color("12"))
2024-03-12 23:39:09 -06:00
messageStyle = lipgloss.NewStyle().PaddingLeft(1)
2024-03-12 23:01:36 -06:00
headerStyle = lipgloss.NewStyle().
Background(lipgloss.Color("0"))
contentStyle = lipgloss.NewStyle().
2024-03-12 23:39:09 -06:00
Padding(1)
2024-03-12 23:01:36 -06:00
footerStyle = lipgloss.NewStyle().
2024-03-12 01:10:54 -06:00
BorderTop(true).
BorderStyle(lipgloss.NormalBorder())
)
func (m model) Init() tea.Cmd {
return tea.Batch(
textarea.Blink,
m.loadConversation(m.convShortname),
2024-03-12 20:05:48 -06:00
m.waitForChunk(),
m.waitForReply(),
2024-03-12 01:10:54 -06:00
)
}
2024-03-13 15:20:03 -06:00
func wrapError(err error) tea.Cmd {
return func() tea.Msg {
return msgError(err)
}
}
2024-03-12 01:10:54 -06:00
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
2024-03-13 15:20:03 -06:00
var cmds []tea.Cmd
2024-03-12 01:10:54 -06:00
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c":
if m.waitingForReply {
m.replyCancelFunc()
} else {
return m, tea.Quit
}
2024-03-13 15:20:03 -06:00
case "ctrl+p":
m.persistence = !m.persistence
case "q":
if m.focus != focusInput {
return m, tea.Quit
}
default:
var inputHandled tea.Cmd
switch m.focus {
case focusInput:
inputHandled = m.handleInputKey(msg)
case focusMessages:
inputHandled = m.handleMessagesKey(msg)
}
if inputHandled != nil {
return m, inputHandled
}
}
2024-03-12 01:10:54 -06:00
case tea.WindowSizeMsg:
m.content.Width = msg.Width
2024-03-12 23:01:36 -06:00
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView()) - lipgloss.Height(m.headerView())
2024-03-12 01:10:54 -06:00
m.input.SetWidth(msg.Width - 1)
m.updateContent()
case msgConversationLoaded:
2024-03-12 23:15:41 -06:00
m.conversation = (*models.Conversation)(msg)
2024-03-13 15:20:03 -06:00
cmds = append(cmds, m.loadMessages(m.conversation))
2024-03-12 01:10:54 -06:00
case msgMessagesLoaded:
m.setMessages(msg)
2024-03-12 01:10:54 -06:00
m.updateContent()
case msgResponseChunk:
chunk := string(msg)
last := len(m.messages) - 1
if last >= 0 && m.messages[last].Role == models.MessageRoleAssistant {
m.setMessageContents(last, m.messages[last].Content+chunk)
} else {
m.addMessage(models.Message{
Role: models.MessageRoleAssistant,
Content: chunk,
})
2024-03-12 01:10:54 -06:00
}
m.updateContent()
2024-03-13 15:20:03 -06:00
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
2024-03-12 20:05:48 -06:00
case msgReply:
// the last reply that was being worked on is finished
reply := models.Message(msg)
last := len(m.messages) - 1
if last < 0 {
panic("Unexpected messages length handling msgReply")
}
if reply.Role == models.MessageRoleToolCall && m.messages[last].Role == models.MessageRoleAssistant {
m.setMessage(last, reply)
2024-03-12 20:05:48 -06:00
} else if reply.Role != models.MessageRoleAssistant {
m.addMessage(reply)
2024-03-12 20:05:48 -06:00
}
2024-03-13 15:20:03 -06:00
if m.persistence && m.conversation != nil && m.conversation.ID > 0 {
cmds = append(cmds, m.persistRecentMessages())
}
2024-03-12 20:05:48 -06:00
m.updateContent()
2024-03-13 15:20:03 -06:00
cmds = append(cmds, m.waitForReply())
2024-03-12 01:10:54 -06:00
case msgResponseEnd:
m.replyCancelFunc = nil
m.waitingForReply = false
m.status = "Press ctrl+s to send"
2024-03-12 01:10:54 -06:00
}
2024-03-13 15:20:03 -06:00
if len(cmds) > 0 {
return m, tea.Batch(cmds...)
2024-03-12 01:10:54 -06:00
}
2024-03-13 15:20:03 -06:00
var cmd tea.Cmd
2024-03-12 01:10:54 -06:00
m.input, cmd = m.input.Update(msg)
if cmd != nil {
return m, cmd
}
m.content, cmd = m.content.Update(msg)
if cmd != nil {
return m, cmd
}
return m, cmd
}
func (m model) View() string {
if m.content.Width == 0 {
// this is the case upon initial startup, but it's also a safe bet that
// we can just skip rendering if the terminal is really 0 width...
// without this, the below view functions may do weird things
return ""
}
2024-03-12 01:10:54 -06:00
return lipgloss.JoinVertical(
lipgloss.Left,
2024-03-12 23:01:36 -06:00
m.headerView(),
2024-03-13 13:14:03 -06:00
m.contentView(),
2024-03-12 01:10:54 -06:00
m.inputView(),
m.footerView(),
)
}
2024-03-13 13:14:03 -06:00
func (m *model) headerView() string {
titleStyle := lipgloss.NewStyle().
PaddingLeft(1).
PaddingRight(1).
2024-03-13 13:14:03 -06:00
Bold(true)
var title string
if m.conversation != nil && m.conversation.Title != "" {
title = m.conversation.Title
} else {
title = "Untitled"
}
part := titleStyle.Render(title)
return headerStyle.Width(m.content.Width).Render(part)
}
func (m *model) contentView() string {
return m.content.View()
}
func (m *model) inputView() string {
return m.input.View()
}
func (m *model) footerView() string {
2024-03-13 15:20:03 -06:00
segmentStyle := lipgloss.NewStyle().PaddingLeft(1).PaddingRight(1).Faint(true)
segmentSeparator := "|"
2024-03-13 15:20:03 -06:00
saving := ""
if m.persistence {
saving = segmentStyle.Copy().Bold(true).Foreground(lipgloss.Color("2")).Render("✅💾")
} else {
saving = segmentStyle.Copy().Bold(true).Foreground(lipgloss.Color("1")).Render("❌💾")
}
leftSegments := []string{
2024-03-13 15:20:03 -06:00
saving,
segmentStyle.Render(m.status),
}
rightSegments := []string{
segmentStyle.Render(fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)),
}
left := strings.Join(leftSegments, segmentSeparator)
right := strings.Join(rightSegments, segmentSeparator)
2024-03-13 13:14:03 -06:00
totalWidth := lipgloss.Width(left + right)
remaining := m.content.Width - totalWidth
2024-03-13 13:14:03 -06:00
var padding string
if remaining > 0 {
padding = strings.Repeat(" ", remaining)
2024-03-13 13:14:03 -06:00
}
footer := left + padding + right
if remaining < 0 {
ellipses := "... "
footer = footer[:m.content.Width-len(ellipses)] + ellipses
}
return footerStyle.Render(footer)
2024-03-13 13:14:03 -06:00
}
2024-03-12 01:10:54 -06:00
func initialModel(ctx *lmcli.Context, convShortname string) model {
m := model{
ctx: ctx,
convShortname: convShortname,
2024-03-13 15:20:03 -06:00
persistence: true,
2024-03-12 01:10:54 -06:00
2024-03-12 20:05:48 -06:00
replyChan: make(chan models.Message),
replyChunkChan: make(chan string),
2024-03-12 01:10:54 -06:00
}
m.content = viewport.New(0, 0)
m.input = textarea.New()
m.input.Placeholder = "Enter a message"
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.ShowLineNumbers = false
m.input.Focus()
m.updateContent()
m.waitingForReply = false
m.status = "Press ctrl+s to send"
2024-03-12 01:10:54 -06:00
return m
}
func (m *model) handleMessagesKey(msg tea.KeyMsg) tea.Cmd {
2024-03-12 01:10:54 -06:00
switch msg.String() {
case "tab":
m.focus = focusInput
m.input.Focus()
}
return nil
}
func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
switch msg.String() {
case "esc":
m.focus = focusMessages
m.input.Blur()
2024-03-12 01:10:54 -06:00
case "ctrl+s":
userInput := strings.TrimSpace(m.input.Value())
if strings.TrimSpace(userInput) == "" {
2024-03-12 01:10:54 -06:00
return nil
}
2024-03-13 15:20:03 -06:00
reply := models.Message{
Role: models.MessageRoleUser,
Content: userInput,
2024-03-13 15:20:03 -06:00
}
if m.persistence && m.conversation != nil && m.conversation.ID > 0 {
// ensure all messages up to the one we're about to add are
// persistent
cmd := m.persistRecentMessages()
if cmd != nil {
return cmd
}
// persist our new message, returning with any possible errors
savedReply, err := m.ctx.Store.AddReply(m.conversation, reply)
if err != nil {
return wrapError(err)
}
reply = *savedReply
}
m.input.SetValue("")
m.addMessage(reply)
m.updateContent()
m.content.GotoBottom()
m.waitingForReply = true
m.status = "Waiting for response, press ctrl+c to cancel..."
return m.promptLLM()
2024-03-12 01:10:54 -06:00
}
return nil
}
func (m *model) loadConversation(shortname string) tea.Cmd {
return func() tea.Msg {
if shortname == "" {
return nil
}
c, err := m.ctx.Store.ConversationByShortName(shortname)
if err != nil {
return msgError(fmt.Errorf("Could not lookup conversation: %v\n", err))
}
if c.ID == 0 {
return msgError(fmt.Errorf("Conversation not found with short name: %s\n", shortname))
}
return msgConversationLoaded(c)
}
}
func (m *model) loadMessages(c *models.Conversation) tea.Cmd {
return func() tea.Msg {
messages, err := m.ctx.Store.Messages(c)
if err != nil {
return msgError(fmt.Errorf("Could not load conversation messages: %v\n", err))
}
return msgMessagesLoaded(messages)
}
}
2024-03-12 20:05:48 -06:00
func (m *model) waitForReply() tea.Cmd {
return func() tea.Msg {
return msgReply(<-m.replyChan)
}
}
func (m *model) waitForChunk() tea.Cmd {
2024-03-12 01:10:54 -06:00
return func() tea.Msg {
2024-03-12 20:05:48 -06:00
return msgResponseChunk(<-m.replyChunkChan)
2024-03-12 01:10:54 -06:00
}
}
func (m *model) promptLLM() tea.Cmd {
return func() tea.Msg {
completionProvider, err := m.ctx.GetCompletionProvider(*m.ctx.Config.Defaults.Model)
if err != nil {
return msgError(err)
}
requestParams := models.RequestParameters{
Model: *m.ctx.Config.Defaults.Model,
MaxTokens: *m.ctx.Config.Defaults.MaxTokens,
Temperature: *m.ctx.Config.Defaults.Temperature,
ToolBag: m.ctx.EnabledTools,
2024-03-12 01:10:54 -06:00
}
2024-03-12 20:05:48 -06:00
replyHandler := func(msg models.Message) {
m.replyChan <- msg
}
ctx, replyCancelFunc := context.WithCancel(context.Background())
m.replyCancelFunc = replyCancelFunc
2024-03-12 20:05:48 -06:00
// TODO: handle error
2024-03-12 01:10:54 -06:00
resp, _ := completionProvider.CreateChatCompletionStream(
2024-03-12 20:05:48 -06:00
ctx, requestParams, m.messages, replyHandler, m.replyChunkChan,
2024-03-12 01:10:54 -06:00
)
return msgResponseEnd(resp)
}
}
2024-03-13 15:20:03 -06:00
func (m *model) persistRecentMessages() tea.Cmd {
for i, msg := range m.messages {
if msg.ID > 0 {
continue
}
newMessage, err := m.ctx.Store.AddReply(m.conversation, msg)
if err != nil {
return wrapError(err)
}
m.setMessage(i, *newMessage)
}
return nil
}
func (m *model) setMessages(messages []models.Message) {
m.messages = messages
m.highlightCache = make([]string, len(messages))
for i, msg := range m.messages {
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.highlightCache[i] = highlighted
}
}
func (m *model) setMessage(i int, msg models.Message) {
if i >= len(m.messages) {
panic("i out of range")
}
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.messages[i] = msg
m.highlightCache[i] = highlighted
}
func (m *model) addMessage(msg models.Message) {
highlighted, _ := m.ctx.Chroma.HighlightS(msg.Content)
m.messages = append(m.messages, msg)
m.highlightCache = append(m.highlightCache, highlighted)
}
func (m *model) setMessageContents(i int, content string) {
if i >= len(m.messages) {
panic("i out of range")
}
highlighted, _ := m.ctx.Chroma.HighlightS(content)
m.messages[i].Content = content
m.highlightCache[i] = highlighted
}
// render the conversation into the main viewport
2024-03-12 01:10:54 -06:00
func (m *model) updateContent() {
sb := strings.Builder{}
msgCnt := len(m.messages)
for i, message := range m.messages {
2024-03-13 09:56:23 -06:00
icon := "⚙️"
friendly := message.Role.FriendlyRole()
style := lipgloss.NewStyle().Bold(true).Faint(true)
switch message.Role {
case models.MessageRoleUser:
icon = ""
2024-03-12 01:10:54 -06:00
style = userStyle
2024-03-13 09:56:23 -06:00
case models.MessageRoleAssistant:
icon = ""
2024-03-12 01:10:54 -06:00
style = assistantStyle
2024-03-13 09:56:23 -06:00
case models.MessageRoleToolCall:
case models.MessageRoleToolResult:
icon = "🔧"
2024-03-12 01:10:54 -06:00
}
2024-03-13 15:20:03 -06:00
var saved string
if message.ID == 0 {
saved = lipgloss.NewStyle().Faint(true).Render(" (not saved)")
}
// write message heading with space for content
2024-03-13 15:20:03 -06:00
header := fmt.Sprintf("%s\n\n", style.Render(icon+friendly)+saved)
sb.WriteString(header)
2024-03-13 15:20:03 -06:00
// TODO: special rendering for tool calls/results?
// write message contents
var highlighted string
if m.highlightCache[i] == "" {
highlighted = message.Content
} else {
highlighted = m.highlightCache[i]
}
contents := messageStyle.Width(m.content.Width - 5).Render(highlighted)
sb.WriteString(contents)
2024-03-12 01:10:54 -06:00
if i < msgCnt-1 {
sb.WriteString("\n\n")
}
}
atBottom := m.content.AtBottom()
m.content.SetContent(contentStyle.Render(sb.String()))
if atBottom {
// if we were at bottom before the update, scroll with the output
m.content.GotoBottom()
}
2024-03-12 01:10:54 -06:00
}
func Launch(ctx *lmcli.Context, convShortname string) error {
p := tea.NewProgram(initialModel(ctx, convShortname), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
return fmt.Errorf("Error running program: %v", err)
}
return nil
}