Matt Low
443c8096d3
- Clean up, improved startup logic, initial conversation load - Moved converation/message business logic (mostly) into `model/tui`
164 lines
3.8 KiB
Go
164 lines
3.8 KiB
Go
package chat
|
|
|
|
import (
|
|
"time"
|
|
|
|
"git.mlow.ca/mlow/lmcli/pkg/api"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/model"
|
|
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
)
|
|
|
|
func (m *Model) setMessage(i int, msg api.Message) {
|
|
if i >= len(m.App.Messages) {
|
|
panic("i out of range")
|
|
}
|
|
m.App.Messages[i] = msg
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
|
|
func (m *Model) addMessage(msg api.Message) {
|
|
m.App.Messages = append(m.App.Messages, msg)
|
|
m.messageCache = append(m.messageCache, m.renderMessage(len(m.App.Messages)-1))
|
|
}
|
|
|
|
func (m *Model) setMessageContents(i int, content string) {
|
|
if i >= len(m.App.Messages) {
|
|
panic("i out of range")
|
|
}
|
|
m.App.Messages[i].Content = content
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
|
|
func (m *Model) rebuildMessageCache() {
|
|
m.messageCache = make([]string, len(m.App.Messages))
|
|
for i := range m.App.Messages {
|
|
m.messageCache[i] = m.renderMessage(i)
|
|
}
|
|
}
|
|
|
|
func (m *Model) updateContent() {
|
|
atBottom := m.content.AtBottom()
|
|
m.content.SetContent(m.conversationMessagesView())
|
|
if atBottom {
|
|
m.content.GotoBottom()
|
|
}
|
|
}
|
|
|
|
func (m *Model) loadConversationMessages() tea.Cmd {
|
|
return func() tea.Msg {
|
|
messages, err := m.App.LoadConversationMessages()
|
|
if err != nil {
|
|
return shared.MsgError(err)
|
|
}
|
|
return msgMessagesLoaded(messages)
|
|
}
|
|
}
|
|
|
|
func (m *Model) generateConversationTitle() tea.Cmd {
|
|
return func() tea.Msg {
|
|
title, err := m.App.GenerateConversationTitle(m.App.Messages)
|
|
if err != nil {
|
|
return shared.MsgError(err)
|
|
}
|
|
return msgConversationTitleGenerated(title)
|
|
}
|
|
}
|
|
|
|
func (m *Model) updateConversationTitle(conversation *api.Conversation) tea.Cmd {
|
|
return func() tea.Msg {
|
|
err := m.App.UpdateConversationTitle(conversation)
|
|
if err != nil {
|
|
return shared.WrapError(err)
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func (m *Model) cloneMessage(message api.Message, selected bool) tea.Cmd {
|
|
return func() tea.Msg {
|
|
msg, err := m.App.CloneMessage(message, selected)
|
|
if err != nil {
|
|
return shared.WrapError(err)
|
|
}
|
|
return msgMessageCloned(msg)
|
|
}
|
|
}
|
|
|
|
func (m *Model) updateMessageContent(message *api.Message) tea.Cmd {
|
|
return func() tea.Msg {
|
|
err := m.App.UpdateMessageContent(message)
|
|
if err != nil {
|
|
return shared.WrapError(err)
|
|
}
|
|
return msgMessageUpdated(message)
|
|
}
|
|
}
|
|
|
|
func (m *Model) cycleSelectedRoot(conv *api.Conversation, dir model.MessageCycleDirection) tea.Cmd {
|
|
if len(m.App.RootMessages) < 2 {
|
|
return nil
|
|
}
|
|
|
|
return func() tea.Msg {
|
|
nextRoot, err := m.App.CycleSelectedRoot(conv, m.App.RootMessages, dir)
|
|
if err != nil {
|
|
return shared.WrapError(err)
|
|
}
|
|
return msgSelectedRootCycled(nextRoot)
|
|
}
|
|
}
|
|
|
|
func (m *Model) cycleSelectedReply(message *api.Message, dir model.MessageCycleDirection) tea.Cmd {
|
|
if len(message.Replies) < 2 {
|
|
return nil
|
|
}
|
|
|
|
return func() tea.Msg {
|
|
nextReply, err := m.App.CycleSelectedReply(message, dir)
|
|
if err != nil {
|
|
return shared.WrapError(err)
|
|
}
|
|
return msgSelectedReplyCycled(nextReply)
|
|
}
|
|
}
|
|
|
|
func (m *Model) persistConversation() tea.Cmd {
|
|
return func() tea.Msg {
|
|
conversation, messages, err := m.App.PersistConversation(m.App.Conversation, m.App.Messages)
|
|
if err != nil {
|
|
return shared.MsgError(err)
|
|
}
|
|
return msgConversationPersisted{conversation.ID == 0, conversation, messages}
|
|
}
|
|
}
|
|
|
|
func (m *Model) executeToolCalls(toolCalls []api.ToolCall) tea.Cmd {
|
|
return func() tea.Msg {
|
|
results, err := m.App.ExecuteToolCalls(toolCalls)
|
|
if err != nil {
|
|
return shared.MsgError(err)
|
|
}
|
|
return msgToolResults(results)
|
|
}
|
|
}
|
|
|
|
func (m *Model) promptLLM() tea.Cmd {
|
|
m.state = pendingResponse
|
|
m.replyCursor.Blink = false
|
|
|
|
m.startTime = time.Now()
|
|
m.elapsed = 0
|
|
m.tokenCount = 0
|
|
|
|
return func() tea.Msg {
|
|
resp, err := m.App.PromptLLM(m.App.Messages, m.chatReplyChunks, m.stopSignal)
|
|
|
|
if err != nil {
|
|
return msgChatResponseError(err)
|
|
}
|
|
|
|
return msgChatResponse(resp)
|
|
}
|
|
}
|