lmcli/pkg/cmd/prompt.go
Matt Low 91d3c9c2e1 Update ChatCompletionClient
Instead of CreateChatCompletion* accepting a pointer to a slice of reply
messages, it accepts a callback which is called with each successive
reply the conversation.

This gives the caller more flexibility in how it handles replies (e.g.
it can react to them immediately now, instead of waiting for the entire
call to finish)
2024-03-12 20:39:34 +00:00

43 lines
1002 B
Go

package cmd
import (
"fmt"
cmdutil "git.mlow.ca/mlow/lmcli/pkg/cmd/util"
"git.mlow.ca/mlow/lmcli/pkg/lmcli"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"github.com/spf13/cobra"
)
func PromptCmd(ctx *lmcli.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "prompt [message]",
Short: "Do a one-shot prompt",
Long: `Prompt the Large Language Model and get a response.`,
RunE: func(cmd *cobra.Command, args []string) error {
message := inputFromArgsOrEditor(args, "# What would you like to say?\n", "")
if message == "" {
return fmt.Errorf("No message was provided.")
}
messages := []model.Message{
{
Role: model.MessageRoleSystem,
Content: getSystemPrompt(ctx),
},
{
Role: model.MessageRoleUser,
Content: message,
},
}
_, err := cmdutil.FetchAndShowCompletion(ctx, messages, nil)
if err != nil {
return fmt.Errorf("Error fetching LLM response: %v", err)
}
return nil
},
}
return cmd
}