lmcli/pkg/tui/views/chat/chat.go

164 lines
4.0 KiB
Go
Raw Normal View History

package chat
import (
"time"
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
"github.com/charmbracelet/bubbles/cursor"
"github.com/charmbracelet/bubbles/spinner"
"github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type focusState int
const (
focusInput focusState = iota
focusMessages
)
type editorTarget int
const (
input editorTarget = iota
selectedMessage
)
// custom tea.Msg types
type (
// sent on each chunk received from LLM
msgResponseChunk string
// sent when response is finished being received
msgResponseEnd string
// a special case of common.MsgError that stops the response waiting animation
msgResponseError error
// sent on each completed reply
msgResponse models.Message
// sent when a conversation is (re)loaded
msgConversationLoaded struct {
conversation *models.Conversation
rootMessages []models.Message
}
// sent when a new conversation title generated
msgConversationTitleGenerated string
// sent when a conversation's messages are laoded
msgMessagesLoaded []models.Message
// sent when the conversation has been persisted, triggers a reload of contents
msgConversationPersisted struct {
conversation *models.Conversation
messages []models.Message
}
// sent when the given message is made the new selected reply of its parent
msgSelectedReplyCycled *models.Message
// sent when the given message is made the new selected root of the current conversation
msgSelectedRootCycled *models.Message
// sent when a message's contents are updated and saved
msgMessageUpdated *models.Message
// sent when a message is cloned, with the cloned message
msgMessageCloned *models.Message
)
type Model struct {
shared.State
2024-05-30 01:04:55 -06:00
shared.Sections
// app state
conversation *models.Conversation
rootMessages []models.Message
messages []models.Message
selectedMessage int
waitingForReply bool
editorTarget editorTarget
stopSignal chan struct{}
replyChan chan models.Message
replyChunkChan chan string
persistence bool // whether we will save new messages in the conversation
// ui state
focus focusState
wrap bool // whether message content is wrapped to viewport width
status string // a general status message
showToolResults bool // whether tool calls and results are shown
messageCache []string // cache of syntax highlighted and wrapped message content
messageOffsets []int
// ui elements
content viewport.Model
input textarea.Model
spinner spinner.Model
replyCursor cursor.Model // cursor to indicate incoming response
2024-06-02 16:40:46 -06:00
// metrics
tokenCount uint
startTime time.Time
elapsed time.Duration
}
2024-05-30 01:18:31 -06:00
func Chat(state shared.State) Model {
m := Model{
2024-05-30 01:18:31 -06:00
State: state,
conversation: &models.Conversation{},
persistence: true,
stopSignal: make(chan struct{}),
replyChan: make(chan models.Message),
replyChunkChan: make(chan string),
wrap: true,
selectedMessage: -1,
content: viewport.New(0, 0),
input: textarea.New(),
spinner: spinner.New(spinner.WithSpinner(
spinner.Spinner{
Frames: []string{
". ",
".. ",
"...",
".. ",
". ",
" ",
},
FPS: time.Second / 3,
},
)),
replyCursor: cursor.New(),
}
m.replyCursor.SetChar(" ")
m.replyCursor.Focus()
2024-05-30 01:18:31 -06:00
system := state.Ctx.GetSystemPrompt()
if system != "" {
m.messages = []models.Message{{
Role: models.MessageRoleSystem,
Content: system,
}}
}
m.input.Focus()
m.input.MaxHeight = 0
m.input.CharLimit = 0
m.input.ShowLineNumbers = false
m.input.Placeholder = "Enter a message"
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.FocusedStyle.Base = inputFocusedStyle
m.input.BlurredStyle.Base = inputBlurredStyle
m.waitingForReply = false
m.status = "Press ctrl+s to send"
return m
}
func (m Model) Init() tea.Cmd {
return tea.Batch(
m.waitForResponseChunk(),
m.waitForResponse(),
)
}