package chat import ( "time" "git.mlow.ca/mlow/lmcli/pkg/api" "git.mlow.ca/mlow/lmcli/pkg/tui/model" "git.mlow.ca/mlow/lmcli/pkg/tui/shared" "github.com/charmbracelet/bubbles/cursor" "github.com/charmbracelet/bubbles/spinner" "github.com/charmbracelet/bubbles/textarea" "github.com/charmbracelet/bubbles/viewport" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" ) // custom tea.Msg types type ( // sent when a new conversation title generated msgConversationTitleGenerated string // sent when the conversation has been persisted, triggers a reload of contents msgConversationPersisted struct { isNew bool conversation *api.Conversation messages []api.Message } // sent when a conversation's messages are laoded msgConversationMessagesLoaded struct { messages []api.Message rootMessages []api.Message } // a special case of common.MsgError that stops the response waiting animation msgChatResponseError error // sent on each chunk received from LLM msgChatResponseChunk api.Chunk // sent on each completed reply msgChatResponse *api.Message // sent when the response is canceled msgChatResponseCanceled struct{} // sent when results from a tool call are returned msgToolResults []api.ToolResult // sent when the given message is made the new selected reply of its parent msgSelectedReplyCycled *api.Message // sent when the given message is made the new selected root of the current conversation msgSelectedRootCycled *api.Message // sent when a message's contents are updated and saved msgMessageUpdated *api.Message // sent when a message is cloned, with the cloned message msgMessageCloned *api.Message ) type focusState int const ( focusInput focusState = iota focusMessages ) type editorTarget int const ( input editorTarget = iota selectedMessage ) type state int const ( idle state = iota loading pendingResponse ) type Model struct { *shared.ViewState shared.Sections // App state App *model.AppModel // Chat view state state state // current overall status of the view selectedMessage int editorTarget editorTarget stopSignal chan struct{} replyChan chan api.Message chatReplyChunks chan api.Chunk persistence bool // whether we will save new messages in the conversation // UI state focus focusState wrap bool // whether message content is wrapped to viewport width showToolResults bool // whether tool calls and results are shown messageCache []string // cache of syntax highlighted and wrapped message content messageOffsets []int // ui elements content viewport.Model input textarea.Model spinner spinner.Model replyCursor cursor.Model // cursor to indicate incoming response // metrics tokenCount uint startTime time.Time elapsed time.Duration } func (m Model) Initialized() bool { return m.ViewState.Initialized } func Chat(app *model.AppModel, shared shared.ViewState) shared.ViewModel { m := Model{ App: app, ViewState: &shared, state: idle, persistence: true, stopSignal: make(chan struct{}), replyChan: make(chan api.Message), chatReplyChunks: make(chan api.Chunk), wrap: true, selectedMessage: -1, content: viewport.New(0, 0), input: textarea.New(), spinner: spinner.New(spinner.WithSpinner( spinner.Spinner{ Frames: []string{ ". ", ".. ", "...", ".. ", ". ", " ", }, FPS: time.Second / 3, }, )), replyCursor: cursor.New(), } m.replyCursor.SetChar(" ") m.replyCursor.Focus() system := app.Ctx.DefaultSystemPrompt() agent := app.Ctx.GetAgent(app.Ctx.Config.Defaults.Agent) if agent != nil && agent.SystemPrompt != "" { system = agent.SystemPrompt } if system != "" { m.App.Messages = api.ApplySystemPrompt(m.App.Messages, system, false) } m.input.Focus() m.input.MaxHeight = 0 m.input.CharLimit = 0 m.input.ShowLineNumbers = false m.input.Placeholder = "Enter a message" m.input.FocusedStyle.CursorLine = lipgloss.NewStyle() m.input.FocusedStyle.Base = inputFocusedStyle m.input.BlurredStyle.Base = inputBlurredStyle return m } func (m Model) Init() tea.Cmd { m.ViewState.Initialized = true return tea.Batch( m.waitForResponseChunk(), ) }