Matt Low
bb48bc9abd
Adjusted `ctrl+t` in chat view to toggle `showDetails` which toggles the display of system messages, message metadata (generation model), and tool call details Modified message selection update logic to skip messages that aren't shown
162 lines
3.9 KiB
Go
162 lines
3.9 KiB
Go
package chat
|
|
|
|
import (
|
|
"time"
|
|
|
|
"git.mlow.ca/mlow/lmcli/pkg/api"
|
|
"git.mlow.ca/mlow/lmcli/pkg/api/provider"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/model"
|
|
"github.com/charmbracelet/bubbles/cursor"
|
|
"github.com/charmbracelet/bubbles/spinner"
|
|
"github.com/charmbracelet/bubbles/textarea"
|
|
"github.com/charmbracelet/bubbles/viewport"
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
"github.com/charmbracelet/lipgloss"
|
|
)
|
|
|
|
// custom tea.Msg types
|
|
type (
|
|
// sent when a new conversation title generated
|
|
msgConversationTitleGenerated string
|
|
// sent when the conversation has been persisted, triggers a reload of contents
|
|
msgConversationPersisted struct {
|
|
isNew bool
|
|
conversation *api.Conversation
|
|
messages []api.Message
|
|
}
|
|
// sent when a conversation's messages are laoded
|
|
msgConversationMessagesLoaded struct {
|
|
messages []api.Message
|
|
rootMessages []api.Message
|
|
}
|
|
// a special case of common.MsgError that stops the response waiting animation
|
|
msgChatResponseError struct {
|
|
Err error
|
|
}
|
|
// sent on each chunk received from LLM
|
|
msgChatResponseChunk provider.Chunk
|
|
// sent on each completed reply
|
|
msgChatResponse *api.Message
|
|
// sent when the response is canceled
|
|
msgChatResponseCanceled struct{}
|
|
// sent when results from a tool call are returned
|
|
msgToolResults []api.ToolResult
|
|
// sent when the given message is made the new selected reply of its parent
|
|
msgSelectedReplyCycled *api.Message
|
|
// sent when the given message is made the new selected root of the current conversation
|
|
msgSelectedRootCycled *api.Message
|
|
// sent when a message's contents are updated and saved
|
|
msgMessageUpdated *api.Message
|
|
// sent when a message is cloned, with the cloned message
|
|
msgMessageCloned *api.Message
|
|
)
|
|
|
|
type focusState int
|
|
|
|
const (
|
|
focusInput focusState = iota
|
|
focusMessages
|
|
)
|
|
|
|
type editorTarget int
|
|
|
|
const (
|
|
input editorTarget = iota
|
|
selectedMessage
|
|
)
|
|
|
|
type state int
|
|
|
|
const (
|
|
idle state = iota
|
|
loading
|
|
pendingResponse
|
|
)
|
|
|
|
type Model struct {
|
|
// App state
|
|
App *model.AppModel
|
|
Height int
|
|
Width int
|
|
|
|
// Chat view state
|
|
state state // current overall status of the view
|
|
selectedMessage int
|
|
editorTarget editorTarget
|
|
stopSignal chan struct{}
|
|
replyChan chan api.Message
|
|
chatReplyChunks chan provider.Chunk
|
|
persistence bool // whether we will save new messages in the conversation
|
|
|
|
// UI state
|
|
focus focusState
|
|
showDetails bool // whether various details are shown in the UI (e.g. system prompt, tool calls/results, message metadata)
|
|
wrap bool // whether message content is wrapped to viewport width
|
|
messageCache []string // cache of syntax highlighted and wrapped message content
|
|
messageOffsets []int
|
|
|
|
// ui elements
|
|
content viewport.Model
|
|
input textarea.Model
|
|
spinner spinner.Model
|
|
replyCursor cursor.Model // cursor to indicate incoming response
|
|
|
|
// metrics
|
|
tokenCount uint
|
|
startTime time.Time
|
|
elapsed time.Duration
|
|
}
|
|
|
|
func Chat(app *model.AppModel) *Model {
|
|
m := Model{
|
|
App: app,
|
|
|
|
state: idle,
|
|
persistence: true,
|
|
|
|
stopSignal: make(chan struct{}),
|
|
replyChan: make(chan api.Message),
|
|
chatReplyChunks: make(chan provider.Chunk),
|
|
|
|
wrap: true,
|
|
selectedMessage: -1,
|
|
|
|
content: viewport.New(0, 0),
|
|
input: textarea.New(),
|
|
spinner: spinner.New(spinner.WithSpinner(
|
|
spinner.Spinner{
|
|
Frames: []string{
|
|
". ",
|
|
".. ",
|
|
"...",
|
|
".. ",
|
|
". ",
|
|
" ",
|
|
},
|
|
FPS: time.Second / 3,
|
|
},
|
|
)),
|
|
replyCursor: cursor.New(),
|
|
}
|
|
|
|
m.replyCursor.SetChar(" ")
|
|
m.replyCursor.Focus()
|
|
|
|
m.input.Focus()
|
|
m.input.MaxHeight = 0
|
|
m.input.CharLimit = 0
|
|
m.input.ShowLineNumbers = false
|
|
m.input.Placeholder = "Enter a message"
|
|
|
|
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
|
|
m.input.FocusedStyle.Base = inputFocusedStyle
|
|
m.input.BlurredStyle.Base = inputBlurredStyle
|
|
return &m
|
|
}
|
|
|
|
func (m *Model) Init() tea.Cmd {
|
|
return tea.Batch(
|
|
m.waitForResponseChunk(),
|
|
)
|
|
}
|