Large refactor - it compiles!
This refactor splits out all conversation concerns into a new `conversation` package. There is now a split between `conversation` and `api`s representation of `Message`, the latter storing the minimum information required for interaction with LLM providers. There is necessary conversation between the two when making LLM calls.
This commit is contained in:
41
pkg/provider/provider.go
Normal file
41
pkg/provider/provider.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||
)
|
||||
|
||||
type Chunk struct {
|
||||
Content string
|
||||
TokenCount uint
|
||||
}
|
||||
|
||||
type RequestParameters struct {
|
||||
Model string
|
||||
|
||||
MaxTokens int
|
||||
Temperature float32
|
||||
TopP float32
|
||||
|
||||
Toolbox []api.ToolSpec
|
||||
}
|
||||
|
||||
type ChatCompletionProvider interface {
|
||||
// CreateChatCompletion generates a chat completion response to the
|
||||
// provided messages.
|
||||
CreateChatCompletion(
|
||||
ctx context.Context,
|
||||
params RequestParameters,
|
||||
messages []api.Message,
|
||||
) (*api.Message, error)
|
||||
|
||||
// Like CreateChageCompletion, except the response is streamed via
|
||||
// the output channel.
|
||||
CreateChatCompletionStream(
|
||||
ctx context.Context,
|
||||
params RequestParameters,
|
||||
messages []api.Message,
|
||||
chunks chan<- Chunk,
|
||||
) (*api.Message, error)
|
||||
}
|
||||
Reference in New Issue
Block a user