Parameterize the openai model used

Add `openai.defaultConfig` to set the default, will allow overriding
with CLI flag
This commit is contained in:
Matt Low 2023-11-09 06:07:52 +00:00
parent 168e0cf5d3
commit ae424530f9
3 changed files with 9 additions and 8 deletions

View File

@ -123,7 +123,7 @@ var newCmd = &cobra.Command{
response <- HandleDelayedResponse(receiver)
}()
err = CreateChatCompletionStream(messages, MAX_TOKENS, receiver)
err = CreateChatCompletionStream(config.OpenAI.DefaultModel, messages, MAX_TOKENS, receiver)
if err != nil {
Fatal("%v\n", err)
}
@ -163,7 +163,7 @@ var promptCmd = &cobra.Command{
receiver := make(chan string)
go HandleDelayedResponse(receiver)
err := CreateChatCompletionStream(messages, MAX_TOKENS, receiver)
err := CreateChatCompletionStream(config.OpenAI.DefaultModel, messages, MAX_TOKENS, receiver)
if err != nil {
Fatal("%v\n", err)
}

View File

@ -11,6 +11,7 @@ import (
type Config struct {
OpenAI struct {
APIKey string `yaml:"apiKey"`
DefaultModel string `yaml:"defaultModel"`
} `yaml:"openai"`
}

View File

@ -8,7 +8,7 @@ import (
openai "github.com/sashabaranov/go-openai"
)
func CreateChatCompletionRequest(messages []Message, maxTokens int) openai.ChatCompletionRequest {
func CreateChatCompletionRequest(model string, messages []Message, maxTokens int) openai.ChatCompletionRequest {
chatCompletionMessages := []openai.ChatCompletionMessage{}
for _, m := range messages {
chatCompletionMessages = append(chatCompletionMessages, openai.ChatCompletionMessage{
@ -18,7 +18,7 @@ func CreateChatCompletionRequest(messages []Message, maxTokens int) openai.ChatC
}
return openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Model: model,
Messages: chatCompletionMessages,
MaxTokens: maxTokens,
}
@ -26,9 +26,9 @@ func CreateChatCompletionRequest(messages []Message, maxTokens int) openai.ChatC
// CreateChatCompletion submits a Chat Completion API request and returns the
// response.
func CreateChatCompletion(messages []Message, maxTokens int) (string, error) {
func CreateChatCompletion(model string, messages []Message, maxTokens int) (string, error) {
client := openai.NewClient(config.OpenAI.APIKey)
req := CreateChatCompletionRequest(messages, maxTokens)
req := CreateChatCompletionRequest(model, messages, maxTokens)
resp, err := client.CreateChatCompletion(context.Background(), req)
if err != nil {
return "", err
@ -39,9 +39,9 @@ func CreateChatCompletion(messages []Message, maxTokens int) (string, error) {
// CreateChatCompletionStream submits a streaming Chat Completion API request
// and streams the response to the provided output channel.
func CreateChatCompletionStream(messages []Message, maxTokens int, output chan string) error {
func CreateChatCompletionStream(model string, messages []Message, maxTokens int, output chan string) error {
client := openai.NewClient(config.OpenAI.APIKey)
req := CreateChatCompletionRequest(messages, maxTokens)
req := CreateChatCompletionRequest(model, messages, maxTokens)
defer close(output)