lmcli/pkg/tui/tui.go

346 lines
7.8 KiB
Go
Raw Normal View History

2024-03-12 01:10:54 -06:00
package tui
// The terminal UI for lmcli, launched from the `lmcli chat` command
// TODO:
// - binding to open selected message/input in $EDITOR
import (
"context"
"fmt"
"strings"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
"github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type focusState int
const (
focusInput focusState = iota
focusMessages
)
2024-03-12 01:10:54 -06:00
type model struct {
ctx *lmcli.Context
convShortname string
// application state
conversation *models.Conversation
messages []models.Message
replyChan chan string
err error
// ui state
focus focusState
isWaiting bool
status string // a general status message
2024-03-12 01:10:54 -06:00
// ui elements
content viewport.Model
input textarea.Model
}
type message struct {
role string
content string
}
// custom tea.Msg types
type (
// sent on each chunk received from LLM
msgResponseChunk string
// sent when response is finished being received
msgResponseEnd string
// sent when a conversation is (re)loaded
msgConversationLoaded *models.Conversation
// send when a conversation's messages are laoded
msgMessagesLoaded []models.Message
// sent when an error occurs
msgError error
)
// styles
var (
inputStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#ff0000"))
contentStyle = lipgloss.NewStyle().PaddingLeft(2)
userStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("10"))
assistantStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("12"))
footerStyle = lipgloss.NewStyle().
BorderTop(true).
BorderStyle(lipgloss.NormalBorder())
)
func (m model) Init() tea.Cmd {
return tea.Batch(
textarea.Blink,
m.loadConversation(m.convShortname),
waitForChunk(m.replyChan),
)
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c":
return m, tea.Quit
case "q":
if m.focus != focusInput {
return m, tea.Quit
}
default:
var inputHandled tea.Cmd
switch m.focus {
case focusInput:
inputHandled = m.handleInputKey(msg)
case focusMessages:
inputHandled = m.handleMessagesKey(msg)
}
if inputHandled != nil {
return m, inputHandled
}
}
2024-03-12 01:10:54 -06:00
case tea.WindowSizeMsg:
m.content.Width = msg.Width
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView())
m.input.SetWidth(msg.Width - 1)
m.updateContent()
case msgConversationLoaded:
c := (*models.Conversation)(msg)
cmd = m.loadMessages(c)
case msgMessagesLoaded:
m.messages = []models.Message(msg)
m.updateContent()
case msgResponseChunk:
chunk := string(msg)
if len(m.messages) > 0 {
i := len(m.messages) - 1
switch m.messages[i].Role {
case models.MessageRoleAssistant:
m.messages[i].Content += chunk
default:
m.messages = append(m.messages, models.Message{
Role: models.MessageRoleAssistant,
2024-03-12 01:10:54 -06:00
Content: chunk,
})
}
m.updateContent()
}
cmd = waitForChunk(m.replyChan) // wait for the next chunk
case msgResponseEnd:
m.isWaiting = false
m.status = "Press ctrl+s to send"
2024-03-12 01:10:54 -06:00
}
if cmd != nil {
return m, cmd
}
m.input, cmd = m.input.Update(msg)
if cmd != nil {
return m, cmd
}
m.content, cmd = m.content.Update(msg)
if cmd != nil {
return m, cmd
}
return m, cmd
}
func (m model) View() string {
return lipgloss.JoinVertical(
lipgloss.Left,
m.content.View(),
m.inputView(),
m.footerView(),
)
}
func initialModel(ctx *lmcli.Context, convShortname string) model {
m := model{
ctx: ctx,
convShortname: convShortname,
replyChan: make(chan string),
}
m.content = viewport.New(0, 0)
m.input = textarea.New()
m.input.Placeholder = "Enter a message"
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.ShowLineNumbers = false
m.input.Focus()
m.updateContent()
m.isWaiting = false
m.status = "Press ctrl+s to send"
2024-03-12 01:10:54 -06:00
return m
}
func (m *model) handleMessagesKey(msg tea.KeyMsg) tea.Cmd {
2024-03-12 01:10:54 -06:00
switch msg.String() {
case "tab":
m.focus = focusInput
m.input.Focus()
}
return nil
}
func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
switch msg.String() {
case "esc":
m.focus = focusMessages
m.input.Blur()
2024-03-12 01:10:54 -06:00
case "ctrl+s":
userInput := strings.TrimSpace(m.input.Value())
if strings.TrimSpace(userInput) == "" {
2024-03-12 01:10:54 -06:00
return nil
}
m.input.SetValue("")
m.messages = append(m.messages, models.Message{
Role: models.MessageRoleUser,
Content: userInput,
})
m.updateContent()
m.content.GotoBottom()
m.isWaiting = true
m.status = "Waiting for response... (Press 's' to stop)"
return m.promptLLM()
2024-03-12 01:10:54 -06:00
}
return nil
}
func (m *model) loadConversation(shortname string) tea.Cmd {
return func() tea.Msg {
if shortname == "" {
return nil
}
c, err := m.ctx.Store.ConversationByShortName(shortname)
if err != nil {
return msgError(fmt.Errorf("Could not lookup conversation: %v\n", err))
}
if c.ID == 0 {
return msgError(fmt.Errorf("Conversation not found with short name: %s\n", shortname))
}
return msgConversationLoaded(c)
}
}
func (m *model) loadMessages(c *models.Conversation) tea.Cmd {
return func() tea.Msg {
messages, err := m.ctx.Store.Messages(c)
if err != nil {
return msgError(fmt.Errorf("Could not load conversation messages: %v\n", err))
}
return msgMessagesLoaded(messages)
}
}
func waitForChunk(ch chan string) tea.Cmd {
return func() tea.Msg {
return msgResponseChunk(<-ch)
}
}
func (m *model) promptLLM() tea.Cmd {
return func() tea.Msg {
completionProvider, err := m.ctx.GetCompletionProvider(*m.ctx.Config.Defaults.Model)
if err != nil {
return msgError(err)
}
var toolBag []models.Tool
for _, toolName := range *m.ctx.Config.Tools.EnabledTools {
tool, ok := tools.AvailableTools[toolName]
if ok {
toolBag = append(toolBag, tool)
}
}
requestParams := models.RequestParameters{
Model: *m.ctx.Config.Defaults.Model,
MaxTokens: *m.ctx.Config.Defaults.MaxTokens,
Temperature: *m.ctx.Config.Defaults.Temperature,
ToolBag: toolBag,
}
// TODO: supply a reply callback and handle error
resp, _ := completionProvider.CreateChatCompletionStream(
context.Background(), requestParams, m.messages, nil, m.replyChan,
)
return msgResponseEnd(resp)
}
}
func (m *model) updateContent() {
sb := strings.Builder{}
msgCnt := len(m.messages)
for i, message := range m.messages {
var style lipgloss.Style
if message.Role == models.MessageRoleUser {
style = userStyle
} else {
style = assistantStyle
}
sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role.FriendlyRole()))))
2024-03-12 01:10:54 -06:00
2024-03-12 02:12:12 -06:00
highlighted, _ := m.ctx.Chroma.HighlightS(message.Content)
2024-03-12 01:10:54 -06:00
sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted))
if i < msgCnt-1 {
sb.WriteString("\n\n")
}
}
m.content.SetContent(sb.String())
}
func (m model) inputView() string {
var inputView string
if m.isWaiting {
inputView = inputStyle.Faint(true).Render(m.input.View())
} else {
inputView = inputStyle.Render(m.input.View())
}
return inputView
}
func (m model) footerView() string {
left := m.status
right := fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)
totalWidth := lipgloss.Width(left + right)
var padding string
if m.content.Width-totalWidth > 0 {
padding = strings.Repeat(" ", m.content.Width-totalWidth)
} else {
padding = ""
}
footer := lipgloss.JoinHorizontal(lipgloss.Center, left, padding, right)
return footerStyle.Width(m.content.Width).Render(footer)
2024-03-12 01:10:54 -06:00
}
func Launch(ctx *lmcli.Context, convShortname string) error {
p := tea.NewProgram(initialModel(ctx, convShortname), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
return fmt.Errorf("Error running program: %v", err)
}
return nil
}