Matt Low
0384c7cb66
This refactor splits out all conversation concerns into a new `conversation` package. There is now a split between `conversation` and `api`s representation of `Message`, the latter storing the minimum information required for interaction with LLM providers. There is necessary conversation between the two when making LLM calls.
171 lines
4.2 KiB
Go
171 lines
4.2 KiB
Go
package chat
|
|
|
|
import (
|
|
"time"
|
|
|
|
"git.mlow.ca/mlow/lmcli/pkg/api"
|
|
"git.mlow.ca/mlow/lmcli/pkg/provider"
|
|
"git.mlow.ca/mlow/lmcli/pkg/conversation"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/model"
|
|
"github.com/charmbracelet/bubbles/cursor"
|
|
"github.com/charmbracelet/bubbles/spinner"
|
|
"github.com/charmbracelet/bubbles/textarea"
|
|
"github.com/charmbracelet/bubbles/viewport"
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
"github.com/charmbracelet/lipgloss"
|
|
)
|
|
|
|
// custom tea.Msg types
|
|
type (
|
|
// sent when a new conversation title generated
|
|
msgConversationTitleGenerated string
|
|
// sent when the conversation has been persisted, triggers a reload of contents
|
|
msgConversationPersisted struct {
|
|
conversation *conversation.Conversation
|
|
messages []conversation.Message
|
|
}
|
|
// sent when a conversation's messages are laoded
|
|
msgConversationMessagesLoaded struct {
|
|
messages []conversation.Message
|
|
}
|
|
// a special case of common.MsgError that stops the response waiting animation
|
|
msgChatResponseError struct {
|
|
Err error
|
|
}
|
|
// sent on each chunk received from LLM
|
|
msgChatResponseChunk provider.Chunk
|
|
// sent on each completed reply
|
|
msgChatResponse *conversation.Message
|
|
// sent when the response is canceled
|
|
msgChatResponseCanceled struct{}
|
|
// sent when results from a tool call are returned
|
|
msgToolResults []api.ToolResult
|
|
// sent when the given message is made the new selected reply of its parent
|
|
msgSelectedReplyCycled *conversation.Message
|
|
// sent when the given message is made the new selected root of the current conversation
|
|
msgSelectedRootCycled *conversation.Message
|
|
// sent when a message's contents are updated and saved
|
|
msgMessageUpdated *conversation.Message
|
|
// sent when a message is cloned, with the cloned message
|
|
msgMessageCloned *conversation.Message
|
|
)
|
|
|
|
type focusState int
|
|
|
|
const (
|
|
focusInput focusState = iota
|
|
focusMessages
|
|
)
|
|
|
|
type editorTarget int
|
|
|
|
const (
|
|
input editorTarget = iota
|
|
selectedMessage
|
|
)
|
|
|
|
type state int
|
|
|
|
const (
|
|
idle state = iota
|
|
loading
|
|
pendingResponse
|
|
)
|
|
|
|
type Model struct {
|
|
// App state
|
|
App *model.AppModel
|
|
Height int
|
|
Width int
|
|
|
|
// Chat view state
|
|
state state // current overall status of the view
|
|
selectedMessage int
|
|
editorTarget editorTarget
|
|
stopSignal chan struct{}
|
|
replyChan chan conversation.Message
|
|
chatReplyChunks chan provider.Chunk
|
|
persistence bool // whether we will save new messages in the conversation
|
|
|
|
// UI state
|
|
focus focusState
|
|
showDetails bool // whether various details are shown in the UI (e.g. system prompt, tool calls/results, message metadata)
|
|
wrap bool // whether message content is wrapped to viewport width
|
|
messageCache []string // cache of syntax highlighted and wrapped message content
|
|
messageOffsets []int
|
|
|
|
// ui elements
|
|
content viewport.Model
|
|
input textarea.Model
|
|
spinner spinner.Model
|
|
replyCursor cursor.Model // cursor to indicate incoming response
|
|
|
|
// metrics
|
|
tokenCount uint
|
|
startTime time.Time
|
|
elapsed time.Duration
|
|
}
|
|
|
|
func getSpinner() spinner.Model {
|
|
return spinner.New(spinner.WithSpinner(
|
|
spinner.Spinner{
|
|
Frames: []string{
|
|
"∙∙∙",
|
|
"●∙∙",
|
|
"●●∙",
|
|
"●●●",
|
|
"∙●●",
|
|
"∙∙●",
|
|
"∙∙∙",
|
|
"∙∙●",
|
|
"∙●●",
|
|
"●●●",
|
|
"●●∙",
|
|
"●∙∙",
|
|
},
|
|
FPS: 440 * time.Millisecond,
|
|
},
|
|
))
|
|
}
|
|
|
|
func Chat(app *model.AppModel) *Model {
|
|
m := Model{
|
|
App: app,
|
|
|
|
state: idle,
|
|
persistence: true,
|
|
|
|
stopSignal: make(chan struct{}),
|
|
replyChan: make(chan conversation.Message),
|
|
chatReplyChunks: make(chan provider.Chunk),
|
|
|
|
wrap: true,
|
|
selectedMessage: -1,
|
|
|
|
content: viewport.New(0, 0),
|
|
input: textarea.New(),
|
|
spinner: getSpinner(),
|
|
replyCursor: cursor.New(),
|
|
}
|
|
|
|
m.replyCursor.SetChar(" ")
|
|
m.replyCursor.Focus()
|
|
|
|
m.input.Focus()
|
|
m.input.MaxHeight = 0
|
|
m.input.CharLimit = 0
|
|
m.input.ShowLineNumbers = false
|
|
m.input.Placeholder = "Enter a message"
|
|
|
|
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
|
|
m.input.FocusedStyle.Base = inputFocusedStyle
|
|
m.input.BlurredStyle.Base = inputBlurredStyle
|
|
return &m
|
|
}
|
|
|
|
func (m *Model) Init() tea.Cmd {
|
|
return tea.Batch(
|
|
m.waitForResponseChunk(),
|
|
)
|
|
}
|