Add --model flag to control which language model to use
This commit is contained in:
parent
8bc8312154
commit
965043c908
@ -11,12 +11,14 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
maxTokens int
|
maxTokens int
|
||||||
|
model string
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputCmds := []*cobra.Command{newCmd, promptCmd, replyCmd}
|
inputCmds := []*cobra.Command{newCmd, promptCmd, replyCmd}
|
||||||
for _, cmd := range inputCmds {
|
for _, cmd := range inputCmds {
|
||||||
cmd.Flags().IntVar(&maxTokens, "length", config.OpenAI.DefaultMaxLength, "Max response length in tokens")
|
cmd.Flags().IntVar(&maxTokens, "length", config.OpenAI.DefaultMaxLength, "Max response length in tokens")
|
||||||
|
cmd.Flags().StringVar(&model, "model", config.OpenAI.DefaultModel, "The language model to use")
|
||||||
}
|
}
|
||||||
|
|
||||||
rootCmd.AddCommand(
|
rootCmd.AddCommand(
|
||||||
@ -274,7 +276,7 @@ var replyCmd = &cobra.Command{
|
|||||||
response <- HandleDelayedResponse(receiver)
|
response <- HandleDelayedResponse(receiver)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = CreateChatCompletionStream(config.OpenAI.DefaultModel, messages, maxTokens, receiver)
|
err = CreateChatCompletionStream(model, messages, maxTokens, receiver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatal("%v\n", err)
|
Fatal("%v\n", err)
|
||||||
}
|
}
|
||||||
@ -354,7 +356,7 @@ var newCmd = &cobra.Command{
|
|||||||
response <- HandleDelayedResponse(receiver)
|
response <- HandleDelayedResponse(receiver)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = CreateChatCompletionStream(config.OpenAI.DefaultModel, messages, maxTokens, receiver)
|
err = CreateChatCompletionStream(model, messages, maxTokens, receiver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatal("%v\n", err)
|
Fatal("%v\n", err)
|
||||||
}
|
}
|
||||||
@ -403,7 +405,7 @@ var promptCmd = &cobra.Command{
|
|||||||
|
|
||||||
receiver := make(chan string)
|
receiver := make(chan string)
|
||||||
go HandleDelayedResponse(receiver)
|
go HandleDelayedResponse(receiver)
|
||||||
err := CreateChatCompletionStream(config.OpenAI.DefaultModel, messages, maxTokens, receiver)
|
err := CreateChatCompletionStream(model, messages, maxTokens, receiver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatal("%v\n", err)
|
Fatal("%v\n", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user