Use the streamed response API

This commit is contained in:
Matt Low 2023-10-30 21:45:21 +00:00
parent c35967f797
commit 68f986dc06
2 changed files with 47 additions and 3 deletions

View File

@ -107,13 +107,13 @@ var newCmd = &cobra.Command{
}, },
} }
response, err := CreateChatCompletion("You are a helpful assistant.", messages) err = CreateChatCompletionStream("You are a helpful assistant.", messages, os.Stdout)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error getting chat response: %v\n", err) fmt.Fprintf(os.Stderr, "An error occured: %v\n", err)
os.Exit(1) os.Exit(1)
} }
fmt.Println(response); fmt.Println()
}, },
} }

View File

@ -2,7 +2,9 @@ package main
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io"
"os" "os"
openai "github.com/sashabaranov/go-openai" openai "github.com/sashabaranov/go-openai"
) )
@ -34,3 +36,45 @@ func CreateChatCompletion(system string, messages []Message) (string, error) {
return resp.Choices[0].Message.Content, nil return resp.Choices[0].Message.Content, nil
} }
func CreateChatCompletionStream(system string, messages []Message, output io.Writer) (error) {
client := openai.NewClient(os.Getenv("OPENAI_APIKEY"))
ctx := context.Background()
var chatCompletionMessages []openai.ChatCompletionMessage
for _, m := range(messages) {
chatCompletionMessages = append(chatCompletionMessages, openai.ChatCompletionMessage{
Role: m.Role,
Content: m.OriginalContent,
})
}
req := openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
MaxTokens: 20,
Messages: chatCompletionMessages,
Stream: true,
}
stream, err := client.CreateChatCompletionStream(ctx, req)
if err != nil {
return err
}
defer stream.Close()
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
return nil
}
if err != nil {
//fmt.Printf("\nStream error: %v\n", err)
return err
}
fmt.Fprint(output, response.Choices[0].Delta.Content)
}
}