Matt Low
91d3c9c2e1
Instead of CreateChatCompletion* accepting a pointer to a slice of reply messages, it accepts a callback which is called with each successive reply the conversation. This gives the caller more flexibility in how it handles replies (e.g. it can react to them immediately now, instead of waiting for the entire call to finish)
32 lines
802 B
Go
32 lines
802 B
Go
package provider
|
|
|
|
import (
|
|
"context"
|
|
|
|
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
|
)
|
|
|
|
type ReplyCallback func(model.Message)
|
|
|
|
type ChatCompletionClient interface {
|
|
// CreateChatCompletion requests a response to the provided messages.
|
|
// Replies are appended to the given replies struct, and the
|
|
// complete user-facing response is returned as a string.
|
|
CreateChatCompletion(
|
|
ctx context.Context,
|
|
params model.RequestParameters,
|
|
messages []model.Message,
|
|
callback ReplyCallback,
|
|
) (string, error)
|
|
|
|
// Like CreateChageCompletion, except the response is streamed via
|
|
// the output channel as it's received.
|
|
CreateChatCompletionStream(
|
|
ctx context.Context,
|
|
params model.RequestParameters,
|
|
messages []model.Message,
|
|
callback ReplyCallback,
|
|
output chan<- string,
|
|
) (string, error)
|
|
}
|