Private
Public Access
1
0

Properly support per-model maxTokens/temperature

This commit is contained in:
2025-06-25 07:49:29 +00:00
parent 259648f699
commit 3cd897d494
3 changed files with 76 additions and 32 deletions

View File

@@ -19,15 +19,16 @@ import (
// Prompt prompts the configured the configured model and streams the response
// to stdout. Returns all model reply messages.
func Prompt(ctx *lmcli.Context, messages []conversation.Message, callback func(conversation.Message)) (*api.Message, error) {
m, _, p, err := ctx.GetModelProvider(*ctx.Config.Defaults.Model, "")
modelConfig, err := ctx.GetModelProvider(*ctx.Config.Defaults.Model, "")
if err != nil {
return nil, err
}
p := modelConfig.Client
params := provider.RequestParameters{
Model: m,
MaxTokens: *ctx.Config.Defaults.MaxTokens,
Temperature: *ctx.Config.Defaults.Temperature,
Model: modelConfig.Model,
MaxTokens: modelConfig.MaxTokens,
Temperature: modelConfig.Temperature,
}
system := ctx.DefaultSystemPrompt()
@@ -206,15 +207,16 @@ Example response:
},
}
m, _, p, err := ctx.GetModelProvider(
modelConfig, err := ctx.GetModelProvider(
*ctx.Config.Conversations.TitleGenerationModel, "",
)
if err != nil {
return "", err
}
p := modelConfig.Client
requestParams := provider.RequestParameters{
Model: m,
Model: modelConfig.Model,
MaxTokens: 25,
}