lmcli/pkg/tui/tui.go
2024-03-12 02:09:06 -06:00

330 lines
7.6 KiB
Go

package tui
// The terminal UI for lmcli, launched from the `lmcli chat` command
// TODO:
// - mode/focus changing between input and message selection
// - binding to open selected message/input in $EDITOR
import (
"fmt"
"strings"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
"git.mlow.ca/mlow/lmcli/pkg/util/tty"
"github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type model struct {
ctx *lmcli.Context
convShortname string
highlighter *tty.ChromaHighlighter
// application state
conversation *models.Conversation
messages []models.Message
replyChan chan string
err error
// ui state
isWaiting bool
confirmPrompt bool
// ui elements
content viewport.Model
input textarea.Model
footer string
}
type message struct {
role string
content string
}
// custom tea.Msg types
type (
// sent on each chunk received from LLM
msgResponseChunk string
// sent when response is finished being received
msgResponseEnd string
// sent when a conversation is (re)loaded
msgConversationLoaded *models.Conversation
// send when a conversation's messages are laoded
msgMessagesLoaded []models.Message
// sent when an error occurs
msgError error
)
// styles
var (
inputStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#ff0000"))
contentStyle = lipgloss.NewStyle().PaddingLeft(2)
userStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("10"))
assistantStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("12"))
footerStyle = lipgloss.NewStyle().
BorderTop(true).
BorderStyle(lipgloss.NormalBorder())
)
func (m model) Init() tea.Cmd {
return tea.Batch(
textarea.Blink,
m.loadConversation(m.convShortname),
waitForChunk(m.replyChan),
)
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
cmd = m.handleKeyMsg(msg)
case tea.WindowSizeMsg:
m.content.Width = msg.Width
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView())
m.input.SetWidth(msg.Width - 1)
m.updateContent()
case msgConversationLoaded:
c := (*models.Conversation)(msg)
cmd = m.loadMessages(c)
case msgMessagesLoaded:
m.messages = []models.Message(msg)
m.updateContent()
case msgResponseChunk:
chunk := string(msg)
if len(m.messages) > 0 {
i := len(m.messages) - 1
switch m.messages[i].Role {
case models.MessageRoleAssistant:
m.messages[i].Content += chunk
default:
m.messages = append(m.messages, models.Message{
Role: models.MessageRoleAssistant,
Content: chunk,
})
}
m.updateContent()
}
cmd = waitForChunk(m.replyChan) // wait for the next chunk
case msgResponseEnd:
m.isWaiting = false
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
}
if cmd != nil {
return m, cmd
}
m.input, cmd = m.input.Update(msg)
if cmd != nil {
return m, cmd
}
m.content, cmd = m.content.Update(msg)
if cmd != nil {
return m, cmd
}
return m, cmd
}
func (m model) View() string {
return lipgloss.JoinVertical(
lipgloss.Left,
m.content.View(),
m.inputView(),
m.footerView(),
)
}
func initialModel(ctx *lmcli.Context, convShortname string) model {
m := model{
ctx: ctx,
convShortname: convShortname,
replyChan: make(chan string),
isWaiting: false,
confirmPrompt: false,
}
m.highlighter = tty.NewChromaHighlighter(
"markdown", // we're highlighting markdown
*ctx.Config.Chroma.Formatter,
*ctx.Config.Chroma.Style,
)
m.content = viewport.New(0, 0)
m.input = textarea.New()
m.input.Placeholder = "Enter a message"
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.ShowLineNumbers = false
m.input.Focus()
m.updateContent()
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
return m
}
func (m *model) handleKeyMsg(msg tea.KeyMsg) tea.Cmd {
switch msg.String() {
case "ctrl+c", "q":
return tea.Quit
case "ctrl+s":
if !m.isWaiting && !m.confirmPrompt {
m.confirmPrompt = true
m.footer = "Press 'y' to confirm sending the message, 'n' to cancel"
return nil
}
case "y":
if m.confirmPrompt {
userInput := m.input.Value()
m.input.SetValue("")
m.messages = append(m.messages, models.Message{
Role: models.MessageRoleUser,
Content: userInput,
})
m.updateContent()
m.content.GotoBottom()
m.isWaiting = true
m.confirmPrompt = false
m.footer = "Waiting for response... (Press 's' to stop)"
return m.promptLLM()
}
case "n":
if m.confirmPrompt {
m.confirmPrompt = false
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
return nil
}
case "s":
if m.isWaiting {
m.isWaiting = false
m.footer = "Response generation stopped. Press Ctrl+S to send, Ctrl+C or 'q' to quit"
return nil
}
}
return nil
}
func (m *model) loadConversation(shortname string) tea.Cmd {
return func() tea.Msg {
if shortname == "" {
return nil
}
c, err := m.ctx.Store.ConversationByShortName(shortname)
if err != nil {
return msgError(fmt.Errorf("Could not lookup conversation: %v\n", err))
}
if c.ID == 0 {
return msgError(fmt.Errorf("Conversation not found with short name: %s\n", shortname))
}
return msgConversationLoaded(c)
}
}
func (m *model) loadMessages(c *models.Conversation) tea.Cmd {
return func() tea.Msg {
messages, err := m.ctx.Store.Messages(c)
if err != nil {
return msgError(fmt.Errorf("Could not load conversation messages: %v\n", err))
}
return msgMessagesLoaded(messages)
}
}
func waitForChunk(ch chan string) tea.Cmd {
return func() tea.Msg {
return msgResponseChunk(<-ch)
}
}
func (m *model) promptLLM() tea.Cmd {
return func() tea.Msg {
completionProvider, err := m.ctx.GetCompletionProvider(*m.ctx.Config.Defaults.Model)
if err != nil {
return msgError(err)
}
var toolBag []models.Tool
for _, toolName := range *m.ctx.Config.Tools.EnabledTools {
tool, ok := tools.AvailableTools[toolName]
if ok {
toolBag = append(toolBag, tool)
}
}
requestParams := models.RequestParameters{
Model: *m.ctx.Config.Defaults.Model,
MaxTokens: *m.ctx.Config.Defaults.MaxTokens,
Temperature: *m.ctx.Config.Defaults.Temperature,
ToolBag: toolBag,
}
var apiReplies []models.Message
resp, _ := completionProvider.CreateChatCompletionStream(
requestParams, m.messages, &apiReplies, m.replyChan,
)
return msgResponseEnd(resp)
}
}
func (m *model) updateContent() {
sb := strings.Builder{}
msgCnt := len(m.messages)
for i, message := range m.messages {
var style lipgloss.Style
if message.Role == models.MessageRoleUser {
style = userStyle
} else {
style = assistantStyle
}
sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role))))
highlighted, _ := m.highlighter.HighlightS(message.Content)
sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted))
if i < msgCnt-1 {
sb.WriteString("\n\n")
}
}
m.content.SetContent(sb.String())
}
func (m model) inputView() string {
var inputView string
if m.isWaiting {
inputView = inputStyle.Faint(true).Render(m.input.View())
} else {
inputView = inputStyle.Render(m.input.View())
}
return inputView
}
func (m model) footerView() string {
return footerStyle.
Width(m.content.Width).
Align(lipgloss.Center).
Render(m.footer)
}
func Launch(ctx *lmcli.Context, convShortname string) error {
p := tea.NewProgram(initialModel(ctx, convShortname), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
return fmt.Errorf("Error running program: %v", err)
}
return nil
}