Increase MaxTokens to 256 on OpenAI requests
Slight refactor
This commit is contained in:
parent
68f986dc06
commit
7b9cd76555
44
openai.go
44
openai.go
@ -9,25 +9,30 @@ import (
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
// CreateChatCompletion accepts a slice of Message and returns the response
|
||||
// of the Large Language Model.
|
||||
func CreateChatCompletion(system string, messages []Message) (string, error) {
|
||||
client := openai.NewClient(os.Getenv("OPENAI_APIKEY"))
|
||||
|
||||
var openaiMessages []openai.ChatCompletionMessage
|
||||
func CreateChatCompletionRequest(messages []Message) (openai.ChatCompletionRequest) {
|
||||
var chatCompletionMessages []openai.ChatCompletionMessage
|
||||
for _, m := range(messages) {
|
||||
openaiMessages = append(openaiMessages, openai.ChatCompletionMessage{
|
||||
chatCompletionMessages = append(chatCompletionMessages, openai.ChatCompletionMessage{
|
||||
Role: m.Role,
|
||||
Content: m.OriginalContent,
|
||||
})
|
||||
}
|
||||
|
||||
return openai.ChatCompletionRequest{
|
||||
Model: openai.GPT4,
|
||||
MaxTokens: 256,
|
||||
Messages: chatCompletionMessages,
|
||||
Stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateChatCompletion accepts a slice of Message and returns the response
|
||||
// of the Large Language Model.
|
||||
func CreateChatCompletion(system string, messages []Message) (string, error) {
|
||||
client := openai.NewClient(os.Getenv("OPENAI_APIKEY"))
|
||||
resp, err := client.CreateChatCompletion(
|
||||
context.Background(),
|
||||
openai.ChatCompletionRequest{
|
||||
Model: openai.GPT4,
|
||||
Messages: openaiMessages,
|
||||
},
|
||||
CreateChatCompletionRequest(messages),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@ -41,21 +46,8 @@ func CreateChatCompletionStream(system string, messages []Message, output io.Wri
|
||||
client := openai.NewClient(os.Getenv("OPENAI_APIKEY"))
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
var chatCompletionMessages []openai.ChatCompletionMessage
|
||||
for _, m := range(messages) {
|
||||
chatCompletionMessages = append(chatCompletionMessages, openai.ChatCompletionMessage{
|
||||
Role: m.Role,
|
||||
Content: m.OriginalContent,
|
||||
})
|
||||
}
|
||||
|
||||
req := openai.ChatCompletionRequest{
|
||||
Model: openai.GPT3Dot5Turbo,
|
||||
MaxTokens: 20,
|
||||
Messages: chatCompletionMessages,
|
||||
Stream: true,
|
||||
}
|
||||
req := CreateChatCompletionRequest(messages)
|
||||
req.Stream = true
|
||||
|
||||
stream, err := client.CreateChatCompletionStream(ctx, req)
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user