lmcli/pkg/tui/views/chat/chat.go

172 lines
4.1 KiB
Go
Raw Normal View History

package chat
import (
"time"
"git.mlow.ca/mlow/lmcli/pkg/api"
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
"github.com/charmbracelet/bubbles/cursor"
"github.com/charmbracelet/bubbles/spinner"
"github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
// custom tea.Msg types
type (
// sent when a conversation is (re)loaded
msgConversationLoaded struct {
conversation *api.Conversation
rootMessages []api.Message
}
// sent when a new conversation title generated
msgConversationTitleGenerated string
// sent when the conversation has been persisted, triggers a reload of contents
msgConversationPersisted struct {
isNew bool
conversation *api.Conversation
messages []api.Message
}
// sent when a conversation's messages are laoded
msgMessagesLoaded []api.Message
// a special case of common.MsgError that stops the response waiting animation
msgChatResponseError error
// sent on each chunk received from LLM
msgChatResponseChunk api.Chunk
// sent on each completed reply
msgChatResponse *api.Message
// sent when the response is canceled
msgChatResponseCanceled struct{}
// sent when results from a tool call are returned
msgToolResults []api.ToolResult
// sent when the given message is made the new selected reply of its parent
msgSelectedReplyCycled *api.Message
// sent when the given message is made the new selected root of the current conversation
msgSelectedRootCycled *api.Message
// sent when a message's contents are updated and saved
msgMessageUpdated *api.Message
// sent when a message is cloned, with the cloned message
msgMessageCloned *api.Message
)
type focusState int
const (
focusInput focusState = iota
focusMessages
)
type editorTarget int
const (
input editorTarget = iota
selectedMessage
)
type state int
const (
idle state = iota
loading
pendingResponse
)
type Model struct {
2024-06-08 16:01:16 -06:00
shared.Shared
2024-05-30 01:04:55 -06:00
shared.Sections
// app state
state state // current overall status of the view
conversation *api.Conversation
rootMessages []api.Message
messages []api.Message
selectedMessage int
editorTarget editorTarget
stopSignal chan struct{}
replyChan chan api.Message
chatReplyChunks chan api.Chunk
persistence bool // whether we will save new messages in the conversation
// ui state
focus focusState
wrap bool // whether message content is wrapped to viewport width
showToolResults bool // whether tool calls and results are shown
messageCache []string // cache of syntax highlighted and wrapped message content
messageOffsets []int
// ui elements
content viewport.Model
input textarea.Model
spinner spinner.Model
replyCursor cursor.Model // cursor to indicate incoming response
2024-06-02 16:40:46 -06:00
// metrics
tokenCount uint
startTime time.Time
elapsed time.Duration
}
2024-06-08 16:01:16 -06:00
func Chat(shared shared.Shared) Model {
m := Model{
2024-06-08 16:01:16 -06:00
Shared: shared,
state: idle,
conversation: &api.Conversation{},
persistence: true,
stopSignal: make(chan struct{}),
replyChan: make(chan api.Message),
chatReplyChunks: make(chan api.Chunk),
wrap: true,
selectedMessage: -1,
content: viewport.New(0, 0),
input: textarea.New(),
spinner: spinner.New(spinner.WithSpinner(
spinner.Spinner{
Frames: []string{
". ",
".. ",
"...",
".. ",
". ",
" ",
},
FPS: time.Second / 3,
},
)),
replyCursor: cursor.New(),
}
m.replyCursor.SetChar(" ")
m.replyCursor.Focus()
system := shared.Ctx.Config.GetSystemPrompt()
if system != "" {
m.messages = []api.Message{{
Role: api.MessageRoleSystem,
Content: system,
}}
}
m.input.Focus()
m.input.MaxHeight = 0
m.input.CharLimit = 0
m.input.ShowLineNumbers = false
m.input.Placeholder = "Enter a message"
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.FocusedStyle.Base = inputFocusedStyle
m.input.BlurredStyle.Base = inputBlurredStyle
return m
}
func (m Model) Init() tea.Cmd {
return tea.Batch(
m.waitForResponseChunk(),
)
}