diff --git a/pkg/tui/tui.go b/pkg/tui/tui.go index de27cbe..c9df9c5 100644 --- a/pkg/tui/tui.go +++ b/pkg/tui/tui.go @@ -39,6 +39,7 @@ type model struct { replyChunkChan chan string replyCancelFunc context.CancelFunc err error + persistence bool // whether we will save new messages in the conversation // ui state focus focusState @@ -81,7 +82,6 @@ var ( contentStyle = lipgloss.NewStyle(). Padding(1) footerStyle = lipgloss.NewStyle(). - Faint(true). BorderTop(true). BorderStyle(lipgloss.NormalBorder()) ) @@ -95,8 +95,14 @@ func (m model) Init() tea.Cmd { ) } +func wrapError(err error) tea.Cmd { + return func() tea.Msg { + return msgError(err) + } +} + func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { - var cmd tea.Cmd + var cmds []tea.Cmd switch msg := msg.(type) { case tea.KeyMsg: @@ -107,6 +113,8 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } else { return m, tea.Quit } + case "ctrl+p": + m.persistence = !m.persistence case "q": if m.focus != focusInput { return m, tea.Quit @@ -130,7 +138,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.updateContent() case msgConversationLoaded: m.conversation = (*models.Conversation)(msg) - cmd = m.loadMessages(m.conversation) + cmds = append(cmds, m.loadMessages(m.conversation)) case msgMessagesLoaded: m.setMessages(msg) m.updateContent() @@ -146,7 +154,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { }) } m.updateContent() - cmd = m.waitForChunk() // wait for the next chunk + cmds = append(cmds, m.waitForChunk()) // wait for the next chunk case msgReply: // the last reply that was being worked on is finished reply := models.Message(msg) @@ -159,18 +167,24 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } else if reply.Role != models.MessageRoleAssistant { m.addMessage(reply) } + + if m.persistence && m.conversation != nil && m.conversation.ID > 0 { + cmds = append(cmds, m.persistRecentMessages()) + } + m.updateContent() - cmd = m.waitForReply() + cmds = append(cmds, m.waitForReply()) case msgResponseEnd: m.replyCancelFunc = nil m.waitingForReply = false m.status = "Press ctrl+s to send" } - if cmd != nil { - return m, cmd + if len(cmds) > 0 { + return m, tea.Batch(cmds...) } + var cmd tea.Cmd m.input, cmd = m.input.Update(msg) if cmd != nil { return m, cmd @@ -225,10 +239,18 @@ func (m *model) inputView() string { } func (m *model) footerView() string { - segmentStyle := lipgloss.NewStyle().PaddingLeft(1).PaddingRight(1) + segmentStyle := lipgloss.NewStyle().PaddingLeft(1).PaddingRight(1).Faint(true) segmentSeparator := "|" + saving := "" + if m.persistence { + saving = segmentStyle.Copy().Bold(true).Foreground(lipgloss.Color("2")).Render("✅💾") + } else { + saving = segmentStyle.Copy().Bold(true).Foreground(lipgloss.Color("1")).Render("❌💾") + } + leftSegments := []string{ + saving, segmentStyle.Render(m.status), } rightSegments := []string{ @@ -258,6 +280,7 @@ func initialModel(ctx *lmcli.Context, convShortname string) model { m := model{ ctx: ctx, convShortname: convShortname, + persistence: true, replyChan: make(chan models.Message), replyChunkChan: make(chan string), @@ -298,11 +321,29 @@ func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd { if strings.TrimSpace(userInput) == "" { return nil } - m.input.SetValue("") - m.addMessage(models.Message{ + + reply := models.Message{ Role: models.MessageRoleUser, Content: userInput, - }) + } + + if m.persistence && m.conversation != nil && m.conversation.ID > 0 { + // ensure all messages up to the one we're about to add are + // persistent + cmd := m.persistRecentMessages() + if cmd != nil { + return cmd + } + // persist our new message, returning with any possible errors + savedReply, err := m.ctx.Store.AddReply(m.conversation, reply) + if err != nil { + return wrapError(err) + } + reply = *savedReply + } + + m.input.SetValue("") + m.addMessage(reply) m.updateContent() m.content.GotoBottom() @@ -382,6 +423,20 @@ func (m *model) promptLLM() tea.Cmd { } } +func (m *model) persistRecentMessages() tea.Cmd { + for i, msg := range m.messages { + if msg.ID > 0 { + continue + } + newMessage, err := m.ctx.Store.AddReply(m.conversation, msg) + if err != nil { + return wrapError(err) + } + m.setMessage(i, *newMessage) + } + return nil +} + func (m *model) setMessages(messages []models.Message) { m.messages = messages m.highlightCache = make([]string, len(messages)) @@ -436,11 +491,16 @@ func (m *model) updateContent() { icon = "🔧" } + var saved string + if message.ID == 0 { + saved = lipgloss.NewStyle().Faint(true).Render(" (not saved)") + } + // write message heading with space for content - header := fmt.Sprintf("%s\n\n", style.Render(icon+friendly)) + header := fmt.Sprintf("%s\n\n", style.Render(icon+friendly)+saved) sb.WriteString(header) - // TODO: render something for tool calls/results? + // TODO: special rendering for tool calls/results? // write message contents var highlighted string