Update command flag handling
`lmcli chat` now supports common prompt flags (model, length, system prompt, etc)
This commit is contained in:
@@ -13,9 +13,9 @@ import (
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
// fetchAndShowCompletion prompts the LLM with the given messages and streams
|
||||
// the response to stdout. Returns all model reply messages.
|
||||
func FetchAndShowCompletion(ctx *lmcli.Context, messages []model.Message, callback func(model.Message)) (string, error) {
|
||||
// Prompt prompts the configured the configured model and streams the response
|
||||
// to stdout. Returns all model reply messages.
|
||||
func Prompt(ctx *lmcli.Context, messages []model.Message, callback func(model.Message)) (string, error) {
|
||||
content := make(chan string) // receives the reponse from LLM
|
||||
defer close(content)
|
||||
|
||||
@@ -109,7 +109,7 @@ func HandleConversationReply(ctx *lmcli.Context, c *model.Conversation, persist
|
||||
}
|
||||
}
|
||||
|
||||
_, err = FetchAndShowCompletion(ctx, allMessages, replyCallback)
|
||||
_, err = Prompt(ctx, allMessages, replyCallback)
|
||||
if err != nil {
|
||||
lmcli.Fatal("Error fetching LLM response: %v\n", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user