Private
Public Access
1
0

Update command flag handling

`lmcli chat` now supports common prompt flags (model, length, system
prompt, etc)
This commit is contained in:
2024-05-07 07:11:04 +00:00
parent 8e4ff90ab4
commit 2b38db7db7
11 changed files with 74 additions and 71 deletions

View File

@@ -13,9 +13,9 @@ import (
"github.com/charmbracelet/lipgloss"
)
// fetchAndShowCompletion prompts the LLM with the given messages and streams
// the response to stdout. Returns all model reply messages.
func FetchAndShowCompletion(ctx *lmcli.Context, messages []model.Message, callback func(model.Message)) (string, error) {
// Prompt prompts the configured the configured model and streams the response
// to stdout. Returns all model reply messages.
func Prompt(ctx *lmcli.Context, messages []model.Message, callback func(model.Message)) (string, error) {
content := make(chan string) // receives the reponse from LLM
defer close(content)
@@ -109,7 +109,7 @@ func HandleConversationReply(ctx *lmcli.Context, c *model.Conversation, persist
}
}
_, err = FetchAndShowCompletion(ctx, allMessages, replyCallback)
_, err = Prompt(ctx, allMessages, replyCallback)
if err != nil {
lmcli.Fatal("Error fetching LLM response: %v\n", err)
}