Private
Public Access
1
0

Update command flag handling

`lmcli chat` now supports common prompt flags (model, length, system
prompt, etc)
This commit is contained in:
2024-05-07 07:11:04 +00:00
parent 8e4ff90ab4
commit 2b38db7db7
11 changed files with 74 additions and 71 deletions

View File

@@ -22,21 +22,23 @@ func PromptCmd(ctx *lmcli.Context) *cobra.Command {
messages := []model.Message{
{
Role: model.MessageRoleSystem,
Content: getSystemPrompt(ctx),
Role: model.MessageRoleSystem,
Content: ctx.GetSystemPrompt(),
},
{
Role: model.MessageRoleUser,
Role: model.MessageRoleUser,
Content: message,
},
}
_, err := cmdutil.FetchAndShowCompletion(ctx, messages, nil)
_, err := cmdutil.Prompt(ctx, messages, nil)
if err != nil {
return fmt.Errorf("Error fetching LLM response: %v", err)
}
return nil
},
}
applyPromptFlags(ctx, cmd)
return cmd
}