Large refactor - it compiles!
This refactor splits out all conversation concerns into a new `conversation` package. There is now a split between `conversation` and `api`s representation of `Message`, the latter storing the minimum information required for interaction with LLM providers. There is necessary conversation between the two when making LLM calls.
This commit is contained in:
@@ -9,7 +9,8 @@ import (
|
||||
"time"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api/provider"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/provider"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/conversation"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
|
||||
"git.mlow.ca/mlow/lmcli/pkg/util"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
|
||||
// Prompt prompts the configured the configured model and streams the response
|
||||
// to stdout. Returns all model reply messages.
|
||||
func Prompt(ctx *lmcli.Context, messages []api.Message, callback func(api.Message)) (*api.Message, error) {
|
||||
func Prompt(ctx *lmcli.Context, messages []conversation.Message, callback func(conversation.Message)) (*api.Message, error) {
|
||||
m, _, p, err := ctx.GetModelProvider(*ctx.Config.Defaults.Model, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -40,7 +41,7 @@ func Prompt(ctx *lmcli.Context, messages []api.Message, callback func(api.Messag
|
||||
}
|
||||
|
||||
if system != "" {
|
||||
messages = api.ApplySystemPrompt(messages, system, false)
|
||||
messages = conversation.ApplySystemPrompt(messages, system, false)
|
||||
}
|
||||
|
||||
content := make(chan provider.Chunk)
|
||||
@@ -50,7 +51,7 @@ func Prompt(ctx *lmcli.Context, messages []api.Message, callback func(api.Messag
|
||||
go ShowDelayedContent(content)
|
||||
|
||||
reply, err := p.CreateChatCompletionStream(
|
||||
context.Background(), params, messages, content,
|
||||
context.Background(), params, conversation.MessagesToAPI(messages), content,
|
||||
)
|
||||
|
||||
if reply.Content != "" {
|
||||
@@ -67,8 +68,8 @@ func Prompt(ctx *lmcli.Context, messages []api.Message, callback func(api.Messag
|
||||
|
||||
// lookupConversation either returns the conversation found by the
|
||||
// short name or exits the program
|
||||
func LookupConversation(ctx *lmcli.Context, shortName string) *api.Conversation {
|
||||
c, err := ctx.Store.ConversationByShortName(shortName)
|
||||
func LookupConversation(ctx *lmcli.Context, shortName string) *conversation.Conversation {
|
||||
c, err := ctx.Conversations.FindConversationByShortName(shortName)
|
||||
if err != nil {
|
||||
lmcli.Fatal("Could not lookup conversation: %v\n", err)
|
||||
}
|
||||
@@ -78,8 +79,8 @@ func LookupConversation(ctx *lmcli.Context, shortName string) *api.Conversation
|
||||
return c
|
||||
}
|
||||
|
||||
func LookupConversationE(ctx *lmcli.Context, shortName string) (*api.Conversation, error) {
|
||||
c, err := ctx.Store.ConversationByShortName(shortName)
|
||||
func LookupConversationE(ctx *lmcli.Context, shortName string) (*conversation.Conversation, error) {
|
||||
c, err := ctx.Conversations.FindConversationByShortName(shortName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not lookup conversation: %v", err)
|
||||
}
|
||||
@@ -89,8 +90,8 @@ func LookupConversationE(ctx *lmcli.Context, shortName string) (*api.Conversatio
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func HandleConversationReply(ctx *lmcli.Context, c *api.Conversation, persist bool, toSend ...api.Message) {
|
||||
messages, err := ctx.Store.PathToLeaf(c.SelectedRoot)
|
||||
func HandleConversationReply(ctx *lmcli.Context, c *conversation.Conversation, persist bool, toSend ...conversation.Message) {
|
||||
messages, err := ctx.Conversations.PathToLeaf(c.SelectedRoot)
|
||||
if err != nil {
|
||||
lmcli.Fatal("Could not load messages: %v\n", err)
|
||||
}
|
||||
@@ -99,40 +100,40 @@ func HandleConversationReply(ctx *lmcli.Context, c *api.Conversation, persist bo
|
||||
|
||||
// handleConversationReply handles sending messages to an existing
|
||||
// conversation, optionally persisting both the sent replies and responses.
|
||||
func HandleReply(ctx *lmcli.Context, to *api.Message, persist bool, messages ...api.Message) {
|
||||
func HandleReply(ctx *lmcli.Context, to *conversation.Message, persist bool, messages ...conversation.Message) {
|
||||
if to == nil {
|
||||
lmcli.Fatal("Can't prompt from an empty message.")
|
||||
}
|
||||
|
||||
existing, err := ctx.Store.PathToRoot(to)
|
||||
existing, err := ctx.Conversations.PathToRoot(to)
|
||||
if err != nil {
|
||||
lmcli.Fatal("Could not load messages: %v\n", err)
|
||||
}
|
||||
|
||||
RenderConversation(ctx, append(existing, messages...), true)
|
||||
|
||||
var savedReplies []api.Message
|
||||
var savedReplies []conversation.Message
|
||||
if persist && len(messages) > 0 {
|
||||
savedReplies, err = ctx.Store.Reply(to, messages...)
|
||||
savedReplies, err = ctx.Conversations.Reply(to, messages...)
|
||||
if err != nil {
|
||||
lmcli.Warn("Could not save messages: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// render a message header with no contents
|
||||
RenderMessage(ctx, (&api.Message{Role: api.MessageRoleAssistant}))
|
||||
RenderMessage(ctx, (&conversation.Message{Role: api.MessageRoleAssistant}))
|
||||
|
||||
var lastSavedMessage *api.Message
|
||||
var lastSavedMessage *conversation.Message
|
||||
lastSavedMessage = to
|
||||
if len(savedReplies) > 0 {
|
||||
lastSavedMessage = &savedReplies[len(savedReplies)-1]
|
||||
}
|
||||
|
||||
replyCallback := func(reply api.Message) {
|
||||
replyCallback := func(reply conversation.Message) {
|
||||
if !persist {
|
||||
return
|
||||
}
|
||||
savedReplies, err = ctx.Store.Reply(lastSavedMessage, reply)
|
||||
savedReplies, err = ctx.Conversations.Reply(lastSavedMessage, reply)
|
||||
if err != nil {
|
||||
lmcli.Warn("Could not save reply: %v\n", err)
|
||||
}
|
||||
@@ -145,7 +146,7 @@ func HandleReply(ctx *lmcli.Context, to *api.Message, persist bool, messages ...
|
||||
}
|
||||
}
|
||||
|
||||
func FormatForExternalPrompt(messages []api.Message, system bool) string {
|
||||
func FormatForExternalPrompt(messages []conversation.Message, system bool) string {
|
||||
sb := strings.Builder{}
|
||||
for _, message := range messages {
|
||||
if message.Content == "" {
|
||||
@@ -164,7 +165,7 @@ func FormatForExternalPrompt(messages []api.Message, system bool) string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func GenerateTitle(ctx *lmcli.Context, messages []api.Message) (string, error) {
|
||||
func GenerateTitle(ctx *lmcli.Context, messages []conversation.Message) (string, error) {
|
||||
const systemPrompt = `You will be shown a conversation between a user and an AI assistant. Your task is to generate a short title (8 words or less) for the provided conversation that reflects the conversation's topic. Your response is expected to be in JSON in the format shown below.
|
||||
|
||||
Example conversation:
|
||||
@@ -189,19 +190,19 @@ Example response:
|
||||
}
|
||||
|
||||
// Serialize the conversation to JSON
|
||||
conversation, err := json.Marshal(msgs)
|
||||
jsonBytes, err := json.Marshal(msgs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
generateRequest := []api.Message{
|
||||
generateRequest := []conversation.Message{
|
||||
{
|
||||
Role: api.MessageRoleSystem,
|
||||
Content: systemPrompt,
|
||||
},
|
||||
{
|
||||
Role: api.MessageRoleUser,
|
||||
Content: string(conversation),
|
||||
Content: string(jsonBytes),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -218,7 +219,7 @@ Example response:
|
||||
}
|
||||
|
||||
response, err := p.CreateChatCompletion(
|
||||
context.Background(), requestParams, generateRequest,
|
||||
context.Background(), requestParams, conversation.MessagesToAPI(generateRequest),
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -293,7 +294,7 @@ func ShowDelayedContent(content <-chan provider.Chunk) {
|
||||
// RenderConversation renders the given messages to TTY, with optional space
|
||||
// for a subsequent message. spaceForResponse controls how many '\n' characters
|
||||
// are printed immediately after the final message (1 if false, 2 if true)
|
||||
func RenderConversation(ctx *lmcli.Context, messages []api.Message, spaceForResponse bool) {
|
||||
func RenderConversation(ctx *lmcli.Context, messages []conversation.Message, spaceForResponse bool) {
|
||||
l := len(messages)
|
||||
for i, message := range messages {
|
||||
RenderMessage(ctx, &message)
|
||||
@@ -304,7 +305,7 @@ func RenderConversation(ctx *lmcli.Context, messages []api.Message, spaceForResp
|
||||
}
|
||||
}
|
||||
|
||||
func RenderMessage(ctx *lmcli.Context, m *api.Message) {
|
||||
func RenderMessage(ctx *lmcli.Context, m *conversation.Message) {
|
||||
var messageAge string
|
||||
if m.CreatedAt.IsZero() {
|
||||
messageAge = "now"
|
||||
|
||||
Reference in New Issue
Block a user