Compare commits
2 Commits
7bfddaabce
...
9d83013f52
Author | SHA1 | Date | |
---|---|---|---|
9d83013f52 | |||
e92a0ff50d |
@ -1,7 +1,6 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@ -36,7 +35,7 @@ func FetchAndShowCompletion(ctx *lmcli.Context, messages []model.Message) ([]mod
|
|||||||
|
|
||||||
var apiReplies []model.Message
|
var apiReplies []model.Message
|
||||||
response, err := completionProvider.CreateChatCompletionStream(
|
response, err := completionProvider.CreateChatCompletionStream(
|
||||||
context.Background(), requestParams, messages, &apiReplies, content,
|
requestParams, messages, &apiReplies, content,
|
||||||
)
|
)
|
||||||
if response != "" {
|
if response != "" {
|
||||||
// there was some content, so break to a new line after it
|
// there was some content, so break to a new line after it
|
||||||
@ -154,7 +153,7 @@ func GenerateTitle(ctx *lmcli.Context, c *model.Conversation) (string, error) {
|
|||||||
MaxTokens: 25,
|
MaxTokens: 25,
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := completionProvider.CreateChatCompletion(context.Background(), requestParams, generateRequest, nil)
|
response, err := completionProvider.CreateChatCompletion(requestParams, generateRequest, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -3,15 +3,14 @@ package anthropic
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
|
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
|
||||||
|
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AnthropicClient struct {
|
type AnthropicClient struct {
|
||||||
@ -103,7 +102,7 @@ func buildRequest(params model.RequestParameters, messages []model.Message) Requ
|
|||||||
return requestBody
|
return requestBody
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendRequest(ctx context.Context, c *AnthropicClient, r Request) (*http.Response, error) {
|
func sendRequest(c *AnthropicClient, r Request) (*http.Response, error) {
|
||||||
url := "https://api.anthropic.com/v1/messages"
|
url := "https://api.anthropic.com/v1/messages"
|
||||||
|
|
||||||
jsonBody, err := json.Marshal(r)
|
jsonBody, err := json.Marshal(r)
|
||||||
@ -111,7 +110,7 @@ func sendRequest(ctx context.Context, c *AnthropicClient, r Request) (*http.Resp
|
|||||||
return nil, fmt.Errorf("failed to marshal request body: %v", err)
|
return nil, fmt.Errorf("failed to marshal request body: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonBody))
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
|
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
|
||||||
}
|
}
|
||||||
@ -130,14 +129,13 @@ func sendRequest(ctx context.Context, c *AnthropicClient, r Request) (*http.Resp
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *AnthropicClient) CreateChatCompletion(
|
func (c *AnthropicClient) CreateChatCompletion(
|
||||||
ctx context.Context,
|
|
||||||
params model.RequestParameters,
|
params model.RequestParameters,
|
||||||
messages []model.Message,
|
messages []model.Message,
|
||||||
replies *[]model.Message,
|
replies *[]model.Message,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
request := buildRequest(params, messages)
|
request := buildRequest(params, messages)
|
||||||
|
|
||||||
resp, err := sendRequest(ctx, c, request)
|
resp, err := sendRequest(c, request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -169,7 +167,6 @@ func (c *AnthropicClient) CreateChatCompletion(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *AnthropicClient) CreateChatCompletionStream(
|
func (c *AnthropicClient) CreateChatCompletionStream(
|
||||||
ctx context.Context,
|
|
||||||
params model.RequestParameters,
|
params model.RequestParameters,
|
||||||
messages []model.Message,
|
messages []model.Message,
|
||||||
replies *[]model.Message,
|
replies *[]model.Message,
|
||||||
@ -178,7 +175,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
|
|||||||
request := buildRequest(params, messages)
|
request := buildRequest(params, messages)
|
||||||
request.Stream = true
|
request.Stream = true
|
||||||
|
|
||||||
resp, err := sendRequest(ctx, c, request)
|
resp, err := sendRequest(c, request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -298,7 +295,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
|
|||||||
// Recurse into CreateChatCompletionStream with the tool call replies
|
// Recurse into CreateChatCompletionStream with the tool call replies
|
||||||
// added to the original messages
|
// added to the original messages
|
||||||
messages = append(append(messages, toolCall), toolReply)
|
messages = append(append(messages, toolCall), toolReply)
|
||||||
return c.CreateChatCompletionStream(ctx, params, messages, replies, output)
|
return c.CreateChatCompletionStream(params, messages, replies, output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "message_stop":
|
case "message_stop":
|
||||||
|
@ -157,14 +157,13 @@ func handleToolCalls(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *OpenAIClient) CreateChatCompletion(
|
func (c *OpenAIClient) CreateChatCompletion(
|
||||||
ctx context.Context,
|
|
||||||
params model.RequestParameters,
|
params model.RequestParameters,
|
||||||
messages []model.Message,
|
messages []model.Message,
|
||||||
replies *[]model.Message,
|
replies *[]model.Message,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
client := openai.NewClient(c.APIKey)
|
client := openai.NewClient(c.APIKey)
|
||||||
req := createChatCompletionRequest(c, params, messages)
|
req := createChatCompletionRequest(c, params, messages)
|
||||||
resp, err := client.CreateChatCompletion(ctx, req)
|
resp, err := client.CreateChatCompletion(context.Background(), req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -183,7 +182,7 @@ func (c *OpenAIClient) CreateChatCompletion(
|
|||||||
|
|
||||||
// Recurse into CreateChatCompletion with the tool call replies
|
// Recurse into CreateChatCompletion with the tool call replies
|
||||||
messages = append(messages, results...)
|
messages = append(messages, results...)
|
||||||
return c.CreateChatCompletion(ctx, params, messages, replies)
|
return c.CreateChatCompletion(params, messages, replies)
|
||||||
}
|
}
|
||||||
|
|
||||||
if replies != nil {
|
if replies != nil {
|
||||||
@ -198,7 +197,6 @@ func (c *OpenAIClient) CreateChatCompletion(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *OpenAIClient) CreateChatCompletionStream(
|
func (c *OpenAIClient) CreateChatCompletionStream(
|
||||||
ctx context.Context,
|
|
||||||
params model.RequestParameters,
|
params model.RequestParameters,
|
||||||
messages []model.Message,
|
messages []model.Message,
|
||||||
replies *[]model.Message,
|
replies *[]model.Message,
|
||||||
@ -207,7 +205,7 @@ func (c *OpenAIClient) CreateChatCompletionStream(
|
|||||||
client := openai.NewClient(c.APIKey)
|
client := openai.NewClient(c.APIKey)
|
||||||
req := createChatCompletionRequest(c, params, messages)
|
req := createChatCompletionRequest(c, params, messages)
|
||||||
|
|
||||||
stream, err := client.CreateChatCompletionStream(ctx, req)
|
stream, err := client.CreateChatCompletionStream(context.Background(), req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -258,7 +256,7 @@ func (c *OpenAIClient) CreateChatCompletionStream(
|
|||||||
|
|
||||||
// Recurse into CreateChatCompletionStream with the tool call replies
|
// Recurse into CreateChatCompletionStream with the tool call replies
|
||||||
messages = append(messages, results...)
|
messages = append(messages, results...)
|
||||||
return c.CreateChatCompletionStream(ctx, params, messages, replies, output)
|
return c.CreateChatCompletionStream(params, messages, replies, output)
|
||||||
}
|
}
|
||||||
|
|
||||||
if replies != nil {
|
if replies != nil {
|
||||||
|
@ -1,17 +1,12 @@
|
|||||||
package provider
|
package provider
|
||||||
|
|
||||||
import (
|
import "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
||||||
"context"
|
|
||||||
|
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ChatCompletionClient interface {
|
type ChatCompletionClient interface {
|
||||||
// CreateChatCompletion requests a response to the provided messages.
|
// CreateChatCompletion requests a response to the provided messages.
|
||||||
// Replies are appended to the given replies struct, and the
|
// Replies are appended to the given replies struct, and the
|
||||||
// complete user-facing response is returned as a string.
|
// complete user-facing response is returned as a string.
|
||||||
CreateChatCompletion(
|
CreateChatCompletion(
|
||||||
ctx context.Context,
|
|
||||||
params model.RequestParameters,
|
params model.RequestParameters,
|
||||||
messages []model.Message,
|
messages []model.Message,
|
||||||
replies *[]model.Message,
|
replies *[]model.Message,
|
||||||
@ -20,7 +15,6 @@ type ChatCompletionClient interface {
|
|||||||
// Like CreateChageCompletion, except the response is streamed via
|
// Like CreateChageCompletion, except the response is streamed via
|
||||||
// the output channel as it's received.
|
// the output channel as it's received.
|
||||||
CreateChatCompletionStream(
|
CreateChatCompletionStream(
|
||||||
ctx context.Context,
|
|
||||||
params model.RequestParameters,
|
params model.RequestParameters,
|
||||||
messages []model.Message,
|
messages []model.Message,
|
||||||
replies *[]model.Message,
|
replies *[]model.Message,
|
||||||
|
126
pkg/tui/tui.go
126
pkg/tui/tui.go
@ -2,6 +2,7 @@ package tui
|
|||||||
|
|
||||||
// The terminal UI for lmcli, launched from the `lmcli chat` command
|
// The terminal UI for lmcli, launched from the `lmcli chat` command
|
||||||
// TODO:
|
// TODO:
|
||||||
|
// - mode/focus changing between input and message selection
|
||||||
// - binding to open selected message/input in $EDITOR
|
// - binding to open selected message/input in $EDITOR
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -17,13 +18,6 @@ import (
|
|||||||
"github.com/charmbracelet/lipgloss"
|
"github.com/charmbracelet/lipgloss"
|
||||||
)
|
)
|
||||||
|
|
||||||
type focusState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
focusInput focusState = iota
|
|
||||||
focusMessages
|
|
||||||
)
|
|
||||||
|
|
||||||
type model struct {
|
type model struct {
|
||||||
ctx *lmcli.Context
|
ctx *lmcli.Context
|
||||||
convShortname string
|
convShortname string
|
||||||
@ -35,13 +29,13 @@ type model struct {
|
|||||||
err error
|
err error
|
||||||
|
|
||||||
// ui state
|
// ui state
|
||||||
focus focusState
|
isWaiting bool
|
||||||
isWaiting bool
|
confirmPrompt bool
|
||||||
status string // a general status message
|
|
||||||
|
|
||||||
// ui elements
|
// ui elements
|
||||||
content viewport.Model
|
content viewport.Model
|
||||||
input textarea.Model
|
input textarea.Model
|
||||||
|
footer string
|
||||||
}
|
}
|
||||||
|
|
||||||
type message struct {
|
type message struct {
|
||||||
@ -87,25 +81,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case tea.KeyMsg:
|
case tea.KeyMsg:
|
||||||
switch msg.String() {
|
cmd = m.handleKeyMsg(msg)
|
||||||
case "ctrl+c":
|
|
||||||
return m, tea.Quit
|
|
||||||
case "q":
|
|
||||||
if m.focus != focusInput {
|
|
||||||
return m, tea.Quit
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
var inputHandled tea.Cmd
|
|
||||||
switch m.focus {
|
|
||||||
case focusInput:
|
|
||||||
inputHandled = m.handleInputKey(msg)
|
|
||||||
case focusMessages:
|
|
||||||
inputHandled = m.handleMessagesKey(msg)
|
|
||||||
}
|
|
||||||
if inputHandled != nil {
|
|
||||||
return m, inputHandled
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case tea.WindowSizeMsg:
|
case tea.WindowSizeMsg:
|
||||||
m.content.Width = msg.Width
|
m.content.Width = msg.Width
|
||||||
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView())
|
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView())
|
||||||
@ -126,7 +102,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.messages[i].Content += chunk
|
m.messages[i].Content += chunk
|
||||||
default:
|
default:
|
||||||
m.messages = append(m.messages, models.Message{
|
m.messages = append(m.messages, models.Message{
|
||||||
Role: models.MessageRoleAssistant,
|
Role: models.MessageRoleAssistant,
|
||||||
Content: chunk,
|
Content: chunk,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -135,7 +111,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
cmd = waitForChunk(m.replyChan) // wait for the next chunk
|
cmd = waitForChunk(m.replyChan) // wait for the next chunk
|
||||||
case msgResponseEnd:
|
case msgResponseEnd:
|
||||||
m.isWaiting = false
|
m.isWaiting = false
|
||||||
m.status = "Press ctrl+s to send"
|
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd != nil {
|
if cmd != nil {
|
||||||
@ -170,6 +146,9 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
|
|||||||
convShortname: convShortname,
|
convShortname: convShortname,
|
||||||
|
|
||||||
replyChan: make(chan string),
|
replyChan: make(chan string),
|
||||||
|
|
||||||
|
isWaiting: false,
|
||||||
|
confirmPrompt: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
m.content = viewport.New(0, 0)
|
m.content = viewport.New(0, 0)
|
||||||
@ -183,43 +162,50 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
|
|||||||
|
|
||||||
m.updateContent()
|
m.updateContent()
|
||||||
|
|
||||||
m.isWaiting = false
|
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
|
||||||
m.status = "Press ctrl+s to send"
|
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *model) handleMessagesKey(msg tea.KeyMsg) tea.Cmd {
|
func (m *model) handleKeyMsg(msg tea.KeyMsg) tea.Cmd {
|
||||||
switch msg.String() {
|
switch msg.String() {
|
||||||
case "tab":
|
case "ctrl+c", "q":
|
||||||
m.focus = focusInput
|
return tea.Quit
|
||||||
m.input.Focus()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
|
|
||||||
switch msg.String() {
|
|
||||||
case "esc":
|
|
||||||
m.focus = focusMessages
|
|
||||||
m.input.Blur()
|
|
||||||
case "ctrl+s":
|
case "ctrl+s":
|
||||||
userInput := strings.TrimSpace(m.input.Value())
|
if !m.isWaiting && !m.confirmPrompt {
|
||||||
if strings.TrimSpace(userInput) == "" {
|
m.confirmPrompt = true
|
||||||
|
m.footer = "Press 'y' to confirm sending the message, 'n' to cancel"
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case "y":
|
||||||
|
if m.confirmPrompt {
|
||||||
|
userInput := m.input.Value()
|
||||||
|
m.input.SetValue("")
|
||||||
|
m.messages = append(m.messages, models.Message{
|
||||||
|
Role: models.MessageRoleUser,
|
||||||
|
Content: userInput,
|
||||||
|
})
|
||||||
|
m.updateContent()
|
||||||
|
m.content.GotoBottom()
|
||||||
|
m.isWaiting = true
|
||||||
|
m.confirmPrompt = false
|
||||||
|
m.footer = "Waiting for response... (Press 's' to stop)"
|
||||||
|
return m.promptLLM()
|
||||||
|
}
|
||||||
|
case "n":
|
||||||
|
if m.confirmPrompt {
|
||||||
|
m.confirmPrompt = false
|
||||||
|
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case "s":
|
||||||
|
if m.isWaiting {
|
||||||
|
m.isWaiting = false
|
||||||
|
m.footer = "Response generation stopped. Press Ctrl+S to send, Ctrl+C or 'q' to quit"
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
m.input.SetValue("")
|
|
||||||
m.messages = append(m.messages, models.Message{
|
|
||||||
Role: models.MessageRoleUser,
|
|
||||||
Content: userInput,
|
|
||||||
})
|
|
||||||
|
|
||||||
m.updateContent()
|
|
||||||
m.content.GotoBottom()
|
|
||||||
|
|
||||||
m.isWaiting = true
|
|
||||||
m.status = "Waiting for response... (Press 's' to stop)"
|
|
||||||
return m.promptLLM()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,7 +283,7 @@ func (m *model) updateContent() {
|
|||||||
style = assistantStyle
|
style = assistantStyle
|
||||||
}
|
}
|
||||||
|
|
||||||
sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role.FriendlyRole()))))
|
sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role))))
|
||||||
|
|
||||||
highlighted, _ := m.ctx.Chroma.HighlightS(message.Content)
|
highlighted, _ := m.ctx.Chroma.HighlightS(message.Content)
|
||||||
sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted))
|
sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted))
|
||||||
@ -319,20 +305,10 @@ func (m model) inputView() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m model) footerView() string {
|
func (m model) footerView() string {
|
||||||
left := m.status
|
return footerStyle.
|
||||||
right := fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)
|
Width(m.content.Width).
|
||||||
|
Align(lipgloss.Center).
|
||||||
totalWidth := lipgloss.Width(left + right)
|
Render(m.footer)
|
||||||
var padding string
|
|
||||||
if m.content.Width-totalWidth > 0 {
|
|
||||||
padding = strings.Repeat(" ", m.content.Width-totalWidth)
|
|
||||||
} else {
|
|
||||||
padding = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
footer := lipgloss.JoinHorizontal(lipgloss.Center, left, padding, right)
|
|
||||||
|
|
||||||
return footerStyle.Width(m.content.Width).Render(footer)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Launch(ctx *lmcli.Context, convShortname string) error {
|
func Launch(ctx *lmcli.Context, convShortname string) error {
|
||||||
|
Loading…
Reference in New Issue
Block a user