Compare commits
No commits in common. "42c3297e54e2e89381d8b0b8a9855824c10099a8" and "45df957a062d962e42a2ab7992e7c7ac62edc2b8" have entirely different histories.
42c3297e54
...
45df957a06
@ -31,6 +31,7 @@ func ReplyCmd(ctx *lmcli.Context) *cobra.Command {
|
||||
}
|
||||
|
||||
cmdutil.HandleConversationReply(ctx, conversation, true, model.Message{
|
||||
ConversationID: conversation.ID,
|
||||
Role: model.MessageRoleUser,
|
||||
Content: reply,
|
||||
})
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/util"
|
||||
@ -18,7 +17,7 @@ import (
|
||||
// Prompt prompts the configured the configured model and streams the response
|
||||
// to stdout. Returns all model reply messages.
|
||||
func Prompt(ctx *lmcli.Context, messages []model.Message, callback func(model.Message)) (string, error) {
|
||||
content := make(chan api.Chunk) // receives the reponse from LLM
|
||||
content := make(chan string) // receives the reponse from LLM
|
||||
defer close(content)
|
||||
|
||||
// render all content received over the channel
|
||||
@ -252,7 +251,7 @@ func ShowWaitAnimation(signal chan any) {
|
||||
// chunked) content is received on the channel, the waiting animation is
|
||||
// replaced by the content.
|
||||
// Blocks until the channel is closed.
|
||||
func ShowDelayedContent(content <-chan api.Chunk) {
|
||||
func ShowDelayedContent(content <-chan string) {
|
||||
waitSignal := make(chan any)
|
||||
go ShowWaitAnimation(waitSignal)
|
||||
|
||||
@ -265,7 +264,7 @@ func ShowDelayedContent(content <-chan api.Chunk) {
|
||||
<-waitSignal
|
||||
firstChunk = false
|
||||
}
|
||||
fmt.Print(chunk.Content)
|
||||
fmt.Print(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,12 +6,12 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api/provider/anthropic"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api/provider/google"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api/provider/ollama"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api/provider/openai"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider/anthropic"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider/google"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider/ollama"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider/openai"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/util"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/util/tty"
|
||||
@ -79,7 +79,7 @@ func (c *Context) GetModels() (models []string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Context) GetModelProvider(model string) (string, api.ChatCompletionClient, error) {
|
||||
func (c *Context) GetModelProvider(model string) (string, provider.ChatCompletionClient, error) {
|
||||
parts := strings.Split(model, "/")
|
||||
|
||||
var provider string
|
||||
|
@ -17,8 +17,8 @@ const (
|
||||
|
||||
type Message struct {
|
||||
ID uint `gorm:"primaryKey"`
|
||||
ConversationID *uint `gorm:"index"`
|
||||
Conversation *Conversation `gorm:"foreignKey:ConversationID"`
|
||||
ConversationID uint `gorm:"index"`
|
||||
Conversation Conversation `gorm:"foreignKey:ConversationID"`
|
||||
Content string
|
||||
Role MessageRole
|
||||
CreatedAt time.Time
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
|
||||
)
|
||||
|
||||
@ -107,7 +107,7 @@ func (c *AnthropicClient) CreateChatCompletion(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
callback provider.ReplyCallback,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -160,8 +160,8 @@ func (c *AnthropicClient) CreateChatCompletionStream(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
output chan<- api.Chunk,
|
||||
callback provider.ReplyCallback,
|
||||
output chan<- string,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -242,9 +242,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
|
||||
return "", fmt.Errorf("invalid text delta")
|
||||
}
|
||||
sb.WriteString(text)
|
||||
output <- api.Chunk{
|
||||
Content: text,
|
||||
}
|
||||
output <- text
|
||||
case "content_block_stop":
|
||||
// ignore?
|
||||
case "message_delta":
|
||||
@ -264,9 +262,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
|
||||
}
|
||||
|
||||
sb.WriteString(FUNCTION_STOP_SEQUENCE)
|
||||
output <- api.Chunk{
|
||||
Content: FUNCTION_STOP_SEQUENCE,
|
||||
}
|
||||
output <- FUNCTION_STOP_SEQUENCE
|
||||
|
||||
funcCallXml := content[start:] + FUNCTION_STOP_SEQUENCE
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
|
||||
)
|
||||
|
||||
@ -187,7 +187,7 @@ func handleToolCalls(
|
||||
params model.RequestParameters,
|
||||
content string,
|
||||
toolCalls []model.ToolCall,
|
||||
callback api.ReplyCallback,
|
||||
callback provider.ReplyCallback,
|
||||
messages []model.Message,
|
||||
) ([]model.Message, error) {
|
||||
lastMessage := messages[len(messages)-1]
|
||||
@ -245,7 +245,7 @@ func (c *Client) CreateChatCompletion(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
callback provider.ReplyCallback,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -325,8 +325,8 @@ func (c *Client) CreateChatCompletionStream(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
output chan<- api.Chunk,
|
||||
callback provider.ReplyCallback,
|
||||
output chan<- string,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -393,9 +393,7 @@ func (c *Client) CreateChatCompletionStream(
|
||||
if part.FunctionCall != nil {
|
||||
toolCalls = append(toolCalls, *part.FunctionCall)
|
||||
} else if part.Text != "" {
|
||||
output <- api.Chunk {
|
||||
Content: part.Text,
|
||||
}
|
||||
output <- part.Text
|
||||
content.WriteString(part.Text)
|
||||
}
|
||||
}
|
@ -10,8 +10,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider"
|
||||
)
|
||||
|
||||
type OllamaClient struct {
|
||||
@ -85,7 +85,7 @@ func (c *OllamaClient) CreateChatCompletion(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
callback provider.ReplyCallback,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -117,6 +117,9 @@ func (c *OllamaClient) CreateChatCompletion(
|
||||
}
|
||||
|
||||
content := completionResp.Message.Content
|
||||
|
||||
fmt.Println(content)
|
||||
|
||||
if callback != nil {
|
||||
callback(model.Message{
|
||||
Role: model.MessageRoleAssistant,
|
||||
@ -131,8 +134,8 @@ func (c *OllamaClient) CreateChatCompletionStream(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
output chan<- api.Chunk,
|
||||
callback provider.ReplyCallback,
|
||||
output chan<- string,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -181,9 +184,7 @@ func (c *OllamaClient) CreateChatCompletionStream(
|
||||
}
|
||||
|
||||
if len(streamResp.Message.Content) > 0 {
|
||||
output <- api.Chunk{
|
||||
Content: streamResp.Message.Content,
|
||||
}
|
||||
output <- streamResp.Message.Content
|
||||
content.WriteString(streamResp.Message.Content)
|
||||
}
|
||||
}
|
@ -10,8 +10,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/provider"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
|
||||
)
|
||||
|
||||
@ -121,7 +121,7 @@ func handleToolCalls(
|
||||
params model.RequestParameters,
|
||||
content string,
|
||||
toolCalls []ToolCall,
|
||||
callback api.ReplyCallback,
|
||||
callback provider.ReplyCallback,
|
||||
messages []model.Message,
|
||||
) ([]model.Message, error) {
|
||||
lastMessage := messages[len(messages)-1]
|
||||
@ -180,7 +180,7 @@ func (c *OpenAIClient) CreateChatCompletion(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
callback provider.ReplyCallback,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -244,8 +244,8 @@ func (c *OpenAIClient) CreateChatCompletionStream(
|
||||
ctx context.Context,
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback api.ReplyCallback,
|
||||
output chan<- api.Chunk,
|
||||
callback provider.ReplyCallback,
|
||||
output chan<- string,
|
||||
) (string, error) {
|
||||
if len(messages) == 0 {
|
||||
return "", fmt.Errorf("Can't create completion from no messages")
|
||||
@ -319,9 +319,7 @@ func (c *OpenAIClient) CreateChatCompletionStream(
|
||||
}
|
||||
}
|
||||
if len(delta.Content) > 0 {
|
||||
output <- api.Chunk {
|
||||
Content: delta.Content,
|
||||
}
|
||||
output <- delta.Content
|
||||
content.WriteString(delta.Content)
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package api
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -8,10 +8,6 @@ import (
|
||||
|
||||
type ReplyCallback func(model.Message)
|
||||
|
||||
type Chunk struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
type ChatCompletionClient interface {
|
||||
// CreateChatCompletion requests a response to the provided messages.
|
||||
// Replies are appended to the given replies struct, and the
|
||||
@ -30,6 +26,6 @@ type ChatCompletionClient interface {
|
||||
params model.RequestParameters,
|
||||
messages []model.Message,
|
||||
callback ReplyCallback,
|
||||
output chan<- Chunk,
|
||||
output chan<- string,
|
||||
) (string, error)
|
||||
}
|
@ -58,28 +58,24 @@ func NewSQLStore(db *gorm.DB) (*SQLStore, error) {
|
||||
return &SQLStore{db, _sqids}, nil
|
||||
}
|
||||
|
||||
func (s *SQLStore) createConversation() (*model.Conversation, error) {
|
||||
// Create the new conversation
|
||||
c := &model.Conversation{}
|
||||
err := s.db.Save(c).Error
|
||||
func (s *SQLStore) saveNewConversation(c *model.Conversation) error {
|
||||
// Save the new conversation
|
||||
err := s.db.Save(&c).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate and save its "short name"
|
||||
shortName, _ := s.sqids.Encode([]uint64{uint64(c.ID)})
|
||||
c.ShortName = sql.NullString{String: shortName, Valid: true}
|
||||
err = s.db.Updates(c).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
return s.UpdateConversation(c)
|
||||
}
|
||||
|
||||
func (s *SQLStore) UpdateConversation(c *model.Conversation) error {
|
||||
if c == nil || c.ID == 0 {
|
||||
return fmt.Errorf("Conversation is nil or invalid (missing ID)")
|
||||
}
|
||||
return s.db.Updates(c).Error
|
||||
return s.db.Updates(&c).Error
|
||||
}
|
||||
|
||||
func (s *SQLStore) DeleteConversation(c *model.Conversation) error {
|
||||
@ -88,7 +84,7 @@ func (s *SQLStore) DeleteConversation(c *model.Conversation) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Delete(c).Error
|
||||
return s.db.Delete(&c).Error
|
||||
}
|
||||
|
||||
func (s *SQLStore) DeleteMessage(message *model.Message, prune bool) error {
|
||||
@ -100,7 +96,7 @@ func (s *SQLStore) UpdateMessage(m *model.Message) error {
|
||||
if m == nil || m.ID == 0 {
|
||||
return fmt.Errorf("Message is nil or invalid (missing ID)")
|
||||
}
|
||||
return s.db.Updates(m).Error
|
||||
return s.db.Updates(&m).Error
|
||||
}
|
||||
|
||||
func (s *SQLStore) ConversationShortNameCompletions(shortName string) []string {
|
||||
@ -153,13 +149,14 @@ func (s *SQLStore) StartConversation(messages ...model.Message) (*model.Conversa
|
||||
}
|
||||
|
||||
// Create new conversation
|
||||
conversation, err := s.createConversation()
|
||||
conversation := &model.Conversation{}
|
||||
err := s.saveNewConversation(conversation)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Create first message
|
||||
messages[0].Conversation = conversation
|
||||
messages[0].ConversationID = conversation.ID
|
||||
err = s.db.Create(&messages[0]).Error
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -190,18 +187,19 @@ func (s *SQLStore) CloneConversation(toClone model.Conversation) (*model.Convers
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
clone, err := s.createConversation()
|
||||
if err != nil {
|
||||
clone := &model.Conversation{
|
||||
Title: toClone.Title + " - Clone",
|
||||
}
|
||||
if err := s.saveNewConversation(clone); err != nil {
|
||||
return nil, 0, fmt.Errorf("Could not create clone: %s", err)
|
||||
}
|
||||
clone.Title = toClone.Title + " - Clone"
|
||||
|
||||
var errors []error
|
||||
var messageCnt uint = 0
|
||||
for _, root := range rootMessages {
|
||||
messageCnt++
|
||||
newRoot := root
|
||||
newRoot.ConversationID = &clone.ID
|
||||
newRoot.ConversationID = clone.ID
|
||||
|
||||
cloned, count, err := s.CloneBranch(newRoot)
|
||||
if err != nil {
|
||||
@ -232,10 +230,9 @@ func (s *SQLStore) Reply(to *model.Message, messages ...model.Message) ([]model.
|
||||
err := s.db.Transaction(func(tx *gorm.DB) error {
|
||||
currentParent := to
|
||||
for i := range messages {
|
||||
parent := currentParent
|
||||
message := messages[i]
|
||||
message.Parent = parent
|
||||
message.Conversation = parent.Conversation
|
||||
message.ConversationID = currentParent.ConversationID
|
||||
message.ParentID = ¤tParent.ID
|
||||
message.ID = 0
|
||||
message.CreatedAt = time.Time{}
|
||||
|
||||
@ -244,9 +241,9 @@ func (s *SQLStore) Reply(to *model.Message, messages ...model.Message) ([]model.
|
||||
}
|
||||
|
||||
// update parent selected reply
|
||||
parent.Replies = append(parent.Replies, message)
|
||||
parent.SelectedReply = &message
|
||||
if err := tx.Model(parent).Update("selected_reply_id", message.ID).Error; err != nil {
|
||||
currentParent.Replies = append(currentParent.Replies, message)
|
||||
currentParent.SelectedReply = &message
|
||||
if err := tx.Model(currentParent).Update("selected_reply_id", message.ID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ type Values struct {
|
||||
ConvShortname string
|
||||
}
|
||||
|
||||
type Shared struct {
|
||||
type State struct {
|
||||
Ctx *lmcli.Context
|
||||
Values *Values
|
||||
Width int
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
|
||||
// Application model
|
||||
type Model struct {
|
||||
shared.Shared
|
||||
shared.State
|
||||
|
||||
state shared.View
|
||||
chat chat.Model
|
||||
@ -27,15 +27,15 @@ type Model struct {
|
||||
|
||||
func initialModel(ctx *lmcli.Context, values shared.Values) Model {
|
||||
m := Model{
|
||||
Shared: shared.Shared{
|
||||
State: shared.State{
|
||||
Ctx: ctx,
|
||||
Values: &values,
|
||||
},
|
||||
}
|
||||
|
||||
m.state = shared.StateChat
|
||||
m.chat = chat.Chat(m.Shared)
|
||||
m.conversations = conversations.Conversations(m.Shared)
|
||||
m.chat = chat.Chat(m.State)
|
||||
m.conversations = conversations.Conversations(m.State)
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,6 @@ package chat
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
models "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
||||
"github.com/charmbracelet/bubbles/cursor"
|
||||
@ -14,10 +13,24 @@ import (
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
type focusState int
|
||||
|
||||
const (
|
||||
focusInput focusState = iota
|
||||
focusMessages
|
||||
)
|
||||
|
||||
type editorTarget int
|
||||
|
||||
const (
|
||||
input editorTarget = iota
|
||||
selectedMessage
|
||||
)
|
||||
|
||||
// custom tea.Msg types
|
||||
type (
|
||||
// sent on each chunk received from LLM
|
||||
msgResponseChunk api.Chunk
|
||||
msgResponseChunk string
|
||||
// sent when response is finished being received
|
||||
msgResponseEnd string
|
||||
// a special case of common.MsgError that stops the response waiting animation
|
||||
@ -35,7 +48,6 @@ type (
|
||||
msgMessagesLoaded []models.Message
|
||||
// sent when the conversation has been persisted, triggers a reload of contents
|
||||
msgConversationPersisted struct {
|
||||
isNew bool
|
||||
conversation *models.Conversation
|
||||
messages []models.Message
|
||||
}
|
||||
@ -49,47 +61,26 @@ type (
|
||||
msgMessageCloned *models.Message
|
||||
)
|
||||
|
||||
type focusState int
|
||||
|
||||
const (
|
||||
focusInput focusState = iota
|
||||
focusMessages
|
||||
)
|
||||
|
||||
type editorTarget int
|
||||
|
||||
const (
|
||||
input editorTarget = iota
|
||||
selectedMessage
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
idle state = iota
|
||||
loading
|
||||
pendingResponse
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
shared.Shared
|
||||
shared.State
|
||||
shared.Sections
|
||||
|
||||
// app state
|
||||
state state // current overall status of the view
|
||||
conversation *models.Conversation
|
||||
rootMessages []models.Message
|
||||
messages []models.Message
|
||||
selectedMessage int
|
||||
waitingForReply bool
|
||||
editorTarget editorTarget
|
||||
stopSignal chan struct{}
|
||||
replyChan chan models.Message
|
||||
replyChunkChan chan api.Chunk
|
||||
replyChunkChan chan string
|
||||
persistence bool // whether we will save new messages in the conversation
|
||||
|
||||
// ui state
|
||||
focus focusState
|
||||
wrap bool // whether message content is wrapped to viewport width
|
||||
status string // a general status message
|
||||
showToolResults bool // whether tool calls and results are shown
|
||||
messageCache []string // cache of syntax highlighted and wrapped message content
|
||||
messageOffsets []int
|
||||
@ -106,17 +97,16 @@ type Model struct {
|
||||
elapsed time.Duration
|
||||
}
|
||||
|
||||
func Chat(shared shared.Shared) Model {
|
||||
func Chat(state shared.State) Model {
|
||||
m := Model{
|
||||
Shared: shared,
|
||||
State: state,
|
||||
|
||||
state: idle,
|
||||
conversation: &models.Conversation{},
|
||||
persistence: true,
|
||||
|
||||
stopSignal: make(chan struct{}),
|
||||
replyChan: make(chan models.Message),
|
||||
replyChunkChan: make(chan api.Chunk),
|
||||
replyChunkChan: make(chan string),
|
||||
|
||||
wrap: true,
|
||||
selectedMessage: -1,
|
||||
@ -142,7 +132,7 @@ func Chat(shared shared.Shared) Model {
|
||||
m.replyCursor.SetChar(" ")
|
||||
m.replyCursor.Focus()
|
||||
|
||||
system := shared.Ctx.GetSystemPrompt()
|
||||
system := state.Ctx.GetSystemPrompt()
|
||||
if system != "" {
|
||||
m.messages = []models.Message{{
|
||||
Role: models.MessageRoleSystem,
|
||||
@ -160,6 +150,8 @@ func Chat(shared shared.Shared) Model {
|
||||
m.input.FocusedStyle.Base = inputFocusedStyle
|
||||
m.input.BlurredStyle.Base = inputBlurredStyle
|
||||
|
||||
m.waitingForReply = false
|
||||
m.status = "Press ctrl+s to send"
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -53,14 +53,14 @@ func (m *Model) loadConversation(shortname string) tea.Cmd {
|
||||
if shortname == "" {
|
||||
return nil
|
||||
}
|
||||
c, err := m.Shared.Ctx.Store.ConversationByShortName(shortname)
|
||||
c, err := m.State.Ctx.Store.ConversationByShortName(shortname)
|
||||
if err != nil {
|
||||
return shared.MsgError(fmt.Errorf("Could not lookup conversation: %v", err))
|
||||
}
|
||||
if c.ID == 0 {
|
||||
return shared.MsgError(fmt.Errorf("Conversation not found: %s", shortname))
|
||||
}
|
||||
rootMessages, err := m.Shared.Ctx.Store.RootMessages(c.ID)
|
||||
rootMessages, err := m.State.Ctx.Store.RootMessages(c.ID)
|
||||
if err != nil {
|
||||
return shared.MsgError(fmt.Errorf("Could not load conversation root messages: %v\n", err))
|
||||
}
|
||||
@ -70,7 +70,7 @@ func (m *Model) loadConversation(shortname string) tea.Cmd {
|
||||
|
||||
func (m *Model) loadConversationMessages() tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
messages, err := m.Shared.Ctx.Store.PathToLeaf(m.conversation.SelectedRoot)
|
||||
messages, err := m.State.Ctx.Store.PathToLeaf(m.conversation.SelectedRoot)
|
||||
if err != nil {
|
||||
return shared.MsgError(fmt.Errorf("Could not load conversation messages: %v\n", err))
|
||||
}
|
||||
@ -80,7 +80,7 @@ func (m *Model) loadConversationMessages() tea.Cmd {
|
||||
|
||||
func (m *Model) generateConversationTitle() tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
title, err := cmdutil.GenerateTitle(m.Shared.Ctx, m.messages)
|
||||
title, err := cmdutil.GenerateTitle(m.State.Ctx, m.messages)
|
||||
if err != nil {
|
||||
return shared.MsgError(err)
|
||||
}
|
||||
@ -90,7 +90,7 @@ func (m *Model) generateConversationTitle() tea.Cmd {
|
||||
|
||||
func (m *Model) updateConversationTitle(conversation *models.Conversation) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
err := m.Shared.Ctx.Store.UpdateConversation(conversation)
|
||||
err := m.State.Ctx.Store.UpdateConversation(conversation)
|
||||
if err != nil {
|
||||
return shared.WrapError(err)
|
||||
}
|
||||
@ -110,10 +110,10 @@ func (m *Model) cloneMessage(message models.Message, selected bool) tea.Cmd {
|
||||
if selected {
|
||||
if msg.Parent == nil {
|
||||
msg.Conversation.SelectedRoot = msg
|
||||
err = m.Shared.Ctx.Store.UpdateConversation(msg.Conversation)
|
||||
err = m.State.Ctx.Store.UpdateConversation(&msg.Conversation)
|
||||
} else {
|
||||
msg.Parent.SelectedReply = msg
|
||||
err = m.Shared.Ctx.Store.UpdateMessage(msg.Parent)
|
||||
err = m.State.Ctx.Store.UpdateMessage(msg.Parent)
|
||||
}
|
||||
if err != nil {
|
||||
return shared.WrapError(fmt.Errorf("Could not update selected message: %v", err))
|
||||
@ -125,7 +125,7 @@ func (m *Model) cloneMessage(message models.Message, selected bool) tea.Cmd {
|
||||
|
||||
func (m *Model) updateMessageContent(message *models.Message) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
err := m.Shared.Ctx.Store.UpdateMessage(message)
|
||||
err := m.State.Ctx.Store.UpdateMessage(message)
|
||||
if err != nil {
|
||||
return shared.WrapError(fmt.Errorf("Could not update message: %v", err))
|
||||
}
|
||||
@ -170,7 +170,7 @@ func (m *Model) cycleSelectedRoot(conv *models.Conversation, dir MessageCycleDir
|
||||
}
|
||||
|
||||
conv.SelectedRoot = nextRoot
|
||||
err = m.Shared.Ctx.Store.UpdateConversation(conv)
|
||||
err = m.State.Ctx.Store.UpdateConversation(conv)
|
||||
if err != nil {
|
||||
return shared.WrapError(fmt.Errorf("Could not update conversation SelectedRoot: %v", err))
|
||||
}
|
||||
@ -190,7 +190,7 @@ func (m *Model) cycleSelectedReply(message *models.Message, dir MessageCycleDire
|
||||
}
|
||||
|
||||
message.SelectedReply = nextReply
|
||||
err = m.Shared.Ctx.Store.UpdateMessage(message)
|
||||
err = m.State.Ctx.Store.UpdateMessage(message)
|
||||
if err != nil {
|
||||
return shared.WrapError(fmt.Errorf("Could not update message SelectedReply: %v", err))
|
||||
}
|
||||
@ -203,14 +203,14 @@ func (m *Model) persistConversation() tea.Cmd {
|
||||
messages := m.messages
|
||||
|
||||
var err error
|
||||
if conversation.ID == 0 {
|
||||
if m.conversation.ID == 0 {
|
||||
return func() tea.Msg {
|
||||
// Start a new conversation with all messages so far
|
||||
conversation, messages, err = m.Shared.Ctx.Store.StartConversation(messages...)
|
||||
conversation, messages, err = m.State.Ctx.Store.StartConversation(messages...)
|
||||
if err != nil {
|
||||
return shared.MsgError(fmt.Errorf("Could not start new conversation: %v", err))
|
||||
}
|
||||
return msgConversationPersisted{true, conversation, messages}
|
||||
return msgConversationPersisted{conversation, messages}
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,7 +219,7 @@ func (m *Model) persistConversation() tea.Cmd {
|
||||
for i := range messages {
|
||||
if messages[i].ID > 0 {
|
||||
// message has an ID, update its contents
|
||||
err := m.Shared.Ctx.Store.UpdateMessage(&messages[i])
|
||||
err := m.State.Ctx.Store.UpdateMessage(&messages[i])
|
||||
if err != nil {
|
||||
return shared.MsgError(err)
|
||||
}
|
||||
@ -228,7 +228,7 @@ func (m *Model) persistConversation() tea.Cmd {
|
||||
continue
|
||||
}
|
||||
// messages is new, so add it as a reply to previous message
|
||||
saved, err := m.Shared.Ctx.Store.Reply(&messages[i-1], messages[i])
|
||||
saved, err := m.State.Ctx.Store.Reply(&messages[i-1], messages[i])
|
||||
if err != nil {
|
||||
return shared.MsgError(err)
|
||||
}
|
||||
@ -239,29 +239,30 @@ func (m *Model) persistConversation() tea.Cmd {
|
||||
return fmt.Errorf("Error: no messages to reply to")
|
||||
}
|
||||
}
|
||||
return msgConversationPersisted{false, conversation, messages}
|
||||
return msgConversationPersisted{conversation, messages}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) promptLLM() tea.Cmd {
|
||||
m.state = pendingResponse
|
||||
m.waitingForReply = true
|
||||
m.replyCursor.Blink = false
|
||||
m.status = "Press ctrl+c to cancel"
|
||||
|
||||
m.tokenCount = 0
|
||||
m.startTime = time.Now()
|
||||
m.elapsed = 0
|
||||
|
||||
return func() tea.Msg {
|
||||
model, provider, err := m.Shared.Ctx.GetModelProvider(*m.Shared.Ctx.Config.Defaults.Model)
|
||||
model, provider, err := m.State.Ctx.GetModelProvider(*m.State.Ctx.Config.Defaults.Model)
|
||||
if err != nil {
|
||||
return shared.MsgError(err)
|
||||
}
|
||||
|
||||
requestParams := models.RequestParameters{
|
||||
Model: model,
|
||||
MaxTokens: *m.Shared.Ctx.Config.Defaults.MaxTokens,
|
||||
Temperature: *m.Shared.Ctx.Config.Defaults.Temperature,
|
||||
ToolBag: m.Shared.Ctx.EnabledTools,
|
||||
MaxTokens: *m.State.Ctx.Config.Defaults.MaxTokens,
|
||||
Temperature: *m.State.Ctx.Config.Defaults.Temperature,
|
||||
ToolBag: m.State.Ctx.EnabledTools,
|
||||
}
|
||||
|
||||
replyHandler := func(msg models.Message) {
|
||||
|
@ -33,7 +33,7 @@ func (m *Model) HandleInput(msg tea.KeyMsg) (bool, tea.Cmd) {
|
||||
|
||||
switch msg.String() {
|
||||
case "esc":
|
||||
if m.state == pendingResponse {
|
||||
if m.waitingForReply {
|
||||
m.stopSignal <- struct{}{}
|
||||
return true, nil
|
||||
}
|
||||
@ -41,7 +41,7 @@ func (m *Model) HandleInput(msg tea.KeyMsg) (bool, tea.Cmd) {
|
||||
return shared.MsgViewChange(shared.StateConversations)
|
||||
}
|
||||
case "ctrl+c":
|
||||
if m.state == pendingResponse {
|
||||
if m.waitingForReply {
|
||||
m.stopSignal <- struct{}{}
|
||||
return true, nil
|
||||
}
|
||||
@ -112,7 +112,9 @@ func (m *Model) handleMessagesKey(msg tea.KeyMsg) (bool, tea.Cmd) {
|
||||
return cmd != nil, cmd
|
||||
case "ctrl+r":
|
||||
// resubmit the conversation with all messages up until and including the selected message
|
||||
if m.state == idle && m.selectedMessage < len(m.messages) {
|
||||
if m.waitingForReply || len(m.messages) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
m.messages = m.messages[:m.selectedMessage+1]
|
||||
m.messageCache = m.messageCache[:m.selectedMessage+1]
|
||||
cmd := m.promptLLM()
|
||||
@ -120,7 +122,6 @@ func (m *Model) handleMessagesKey(msg tea.KeyMsg) (bool, tea.Cmd) {
|
||||
m.content.GotoBottom()
|
||||
return true, cmd
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -140,8 +141,8 @@ func (m *Model) handleInputKey(msg tea.KeyMsg) (bool, tea.Cmd) {
|
||||
m.input.Blur()
|
||||
return true, nil
|
||||
case "ctrl+s":
|
||||
// TODO: call a "handleSend" function which returns a tea.Cmd
|
||||
if m.state != idle {
|
||||
// TODO: call a "handleSend" function with returns a tea.Cmd
|
||||
if m.waitingForReply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -42,11 +42,11 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
||||
// wake up spinners and cursors
|
||||
cmds = append(cmds, cursor.Blink, m.spinner.Tick)
|
||||
|
||||
if m.Shared.Values.ConvShortname != "" {
|
||||
if m.State.Values.ConvShortname != "" {
|
||||
// (re)load conversation contents
|
||||
cmds = append(cmds, m.loadConversation(m.Shared.Values.ConvShortname))
|
||||
cmds = append(cmds, m.loadConversation(m.State.Values.ConvShortname))
|
||||
|
||||
if m.conversation.ShortName.String != m.Shared.Values.ConvShortname {
|
||||
if m.conversation.ShortName.String != m.State.Values.ConvShortname {
|
||||
// clear existing messages if we're loading a new conversation
|
||||
m.messages = []models.Message{}
|
||||
m.selectedMessage = 0
|
||||
@ -90,19 +90,20 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
||||
case msgResponseChunk:
|
||||
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
|
||||
|
||||
if msg.Content == "" {
|
||||
chunk := string(msg)
|
||||
if chunk == "" {
|
||||
break
|
||||
}
|
||||
|
||||
last := len(m.messages) - 1
|
||||
if last >= 0 && m.messages[last].Role.IsAssistant() {
|
||||
// append chunk to existing message
|
||||
m.setMessageContents(last, m.messages[last].Content+msg.Content)
|
||||
m.setMessageContents(last, m.messages[last].Content+chunk)
|
||||
} else {
|
||||
// use chunk in new message
|
||||
m.addMessage(models.Message{
|
||||
Role: models.MessageRoleAssistant,
|
||||
Content: msg.Content,
|
||||
Content: chunk,
|
||||
})
|
||||
}
|
||||
m.updateContent()
|
||||
@ -141,16 +142,18 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
||||
|
||||
m.updateContent()
|
||||
case msgResponseEnd:
|
||||
m.state = idle
|
||||
m.waitingForReply = false
|
||||
last := len(m.messages) - 1
|
||||
if last < 0 {
|
||||
panic("Unexpected empty messages handling msgResponseEnd")
|
||||
}
|
||||
m.setMessageContents(last, strings.TrimSpace(m.messages[last].Content))
|
||||
m.updateContent()
|
||||
m.status = "Press ctrl+s to send"
|
||||
case msgResponseError:
|
||||
m.state = idle
|
||||
m.Shared.Err = error(msg)
|
||||
m.waitingForReply = false
|
||||
m.status = "Press ctrl+s to send"
|
||||
m.State.Err = error(msg)
|
||||
m.updateContent()
|
||||
case msgConversationTitleGenerated:
|
||||
title := string(msg)
|
||||
@ -159,21 +162,18 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
||||
cmds = append(cmds, m.updateConversationTitle(m.conversation))
|
||||
}
|
||||
case cursor.BlinkMsg:
|
||||
if m.state == pendingResponse {
|
||||
if m.waitingForReply {
|
||||
// ensure we show the updated "wait for response" cursor blink state
|
||||
m.updateContent()
|
||||
}
|
||||
case msgConversationPersisted:
|
||||
m.conversation = msg.conversation
|
||||
m.messages = msg.messages
|
||||
if msg.isNew {
|
||||
m.rootMessages = []models.Message{m.messages[0]}
|
||||
}
|
||||
m.rebuildMessageCache()
|
||||
m.updateContent()
|
||||
case msgMessageCloned:
|
||||
if msg.Parent == nil {
|
||||
m.conversation = msg.Conversation
|
||||
m.conversation = &msg.Conversation
|
||||
m.rootMessages = append(m.rootMessages, *msg)
|
||||
}
|
||||
cmds = append(cmds, m.loadConversationMessages())
|
||||
|
@ -131,7 +131,7 @@ func (m *Model) renderMessage(i int) string {
|
||||
sb := &strings.Builder{}
|
||||
sb.Grow(len(msg.Content) * 2)
|
||||
if msg.Content != "" {
|
||||
err := m.Shared.Ctx.Chroma.Highlight(sb, msg.Content)
|
||||
err := m.State.Ctx.Chroma.Highlight(sb, msg.Content)
|
||||
if err != nil {
|
||||
sb.Reset()
|
||||
sb.WriteString(msg.Content)
|
||||
@ -139,7 +139,7 @@ func (m *Model) renderMessage(i int) string {
|
||||
}
|
||||
|
||||
// Show the assistant's cursor
|
||||
if m.state == pendingResponse && i == len(m.messages)-1 && msg.Role == models.MessageRoleAssistant {
|
||||
if m.waitingForReply && i == len(m.messages)-1 && msg.Role == models.MessageRoleAssistant {
|
||||
sb.WriteString(m.replyCursor.View())
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ func (m *Model) renderMessage(i int) string {
|
||||
if msg.Content != "" {
|
||||
sb.WriteString("\n\n")
|
||||
}
|
||||
_ = m.Shared.Ctx.Chroma.HighlightLang(sb, toolString, "yaml")
|
||||
_ = m.State.Ctx.Chroma.HighlightLang(sb, toolString, "yaml")
|
||||
}
|
||||
|
||||
content := strings.TrimRight(sb.String(), "\n")
|
||||
@ -237,7 +237,7 @@ func (m *Model) conversationMessagesView() string {
|
||||
lineCnt += lipgloss.Height(heading)
|
||||
|
||||
var rendered string
|
||||
if m.state == pendingResponse && i == len(m.messages)-1 {
|
||||
if m.waitingForReply && i == len(m.messages)-1 {
|
||||
// do a direct render of final (assistant) message to handle the
|
||||
// assistant cursor blink
|
||||
rendered = m.renderMessage(i)
|
||||
@ -251,7 +251,7 @@ func (m *Model) conversationMessagesView() string {
|
||||
}
|
||||
|
||||
// Render a placeholder for the incoming assistant reply
|
||||
if m.state == pendingResponse && (len(m.messages) == 0 || m.messages[len(m.messages)-1].Role != models.MessageRoleAssistant) {
|
||||
if m.waitingForReply && (len(m.messages) == 0 || m.messages[len(m.messages)-1].Role != models.MessageRoleAssistant) {
|
||||
heading := m.renderMessageHeading(-1, &models.Message{
|
||||
Role: models.MessageRoleAssistant,
|
||||
})
|
||||
@ -289,12 +289,9 @@ func (m *Model) footerView() string {
|
||||
saving = savingStyle.Foreground(lipgloss.Color("1")).Render("❌💾")
|
||||
}
|
||||
|
||||
var status string
|
||||
switch m.state {
|
||||
case pendingResponse:
|
||||
status = "Press ctrl+c to cancel" + m.spinner.View()
|
||||
default:
|
||||
status = "Press ctrl+s to send"
|
||||
status := m.status
|
||||
if m.waitingForReply {
|
||||
status += m.spinner.View()
|
||||
}
|
||||
|
||||
leftSegments := []string{
|
||||
@ -308,7 +305,7 @@ func (m *Model) footerView() string {
|
||||
rightSegments = append(rightSegments, segmentStyle.Render(throughput))
|
||||
}
|
||||
|
||||
model := fmt.Sprintf("Model: %s", *m.Shared.Ctx.Config.Defaults.Model)
|
||||
model := fmt.Sprintf("Model: %s", *m.State.Ctx.Config.Defaults.Model)
|
||||
rightSegments = append(rightSegments, segmentStyle.Render(model))
|
||||
|
||||
left := strings.Join(leftSegments, segmentSeparator)
|
||||
|
@ -28,7 +28,7 @@ type (
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
shared.Shared
|
||||
shared.State
|
||||
shared.Sections
|
||||
|
||||
conversations []loadedConversation
|
||||
@ -38,9 +38,9 @@ type Model struct {
|
||||
content viewport.Model
|
||||
}
|
||||
|
||||
func Conversations(shared shared.Shared) Model {
|
||||
func Conversations(state shared.State) Model {
|
||||
m := Model{
|
||||
Shared: shared,
|
||||
State: state,
|
||||
content: viewport.New(0, 0),
|
||||
}
|
||||
return m
|
||||
@ -155,7 +155,7 @@ func (m *Model) loadConversations() tea.Cmd {
|
||||
loaded := make([]loadedConversation, len(messages))
|
||||
for i, m := range messages {
|
||||
loaded[i].lastReply = m
|
||||
loaded[i].conv = *m.Conversation
|
||||
loaded[i].conv = m.Conversation
|
||||
}
|
||||
|
||||
return msgConversationsLoaded(loaded)
|
||||
|
Loading…
Reference in New Issue
Block a user