Compare commits

..

5 Commits

Author SHA1 Message Date
7bfddaabce tui: add focus switching between input/messages view 2024-03-12 18:24:46 +00:00
a5f39f7944 tui: removed confirm before send, dynamic footer
footer now rendered based on model data, instead of being set to a fixed
string
2024-03-12 18:24:46 +00:00
3b71f08d19 tui: use ctx chroma highlighter 2024-03-12 18:24:46 +00:00
b803ea7a52 Add initial TUI 2024-03-12 18:24:46 +00:00
8bdb155bf7 Update ChatCompletionClient to accept context.Context 2024-03-12 18:24:46 +00:00
5 changed files with 102 additions and 66 deletions

View File

@ -1,6 +1,7 @@
package util package util
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"strings" "strings"
@ -35,7 +36,7 @@ func FetchAndShowCompletion(ctx *lmcli.Context, messages []model.Message) ([]mod
var apiReplies []model.Message var apiReplies []model.Message
response, err := completionProvider.CreateChatCompletionStream( response, err := completionProvider.CreateChatCompletionStream(
requestParams, messages, &apiReplies, content, context.Background(), requestParams, messages, &apiReplies, content,
) )
if response != "" { if response != "" {
// there was some content, so break to a new line after it // there was some content, so break to a new line after it
@ -153,7 +154,7 @@ func GenerateTitle(ctx *lmcli.Context, c *model.Conversation) (string, error) {
MaxTokens: 25, MaxTokens: 25,
} }
response, err := completionProvider.CreateChatCompletion(requestParams, generateRequest, nil) response, err := completionProvider.CreateChatCompletion(context.Background(), requestParams, generateRequest, nil)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -3,14 +3,15 @@ package anthropic
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model" "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/tools"
) )
type AnthropicClient struct { type AnthropicClient struct {
@ -102,7 +103,7 @@ func buildRequest(params model.RequestParameters, messages []model.Message) Requ
return requestBody return requestBody
} }
func sendRequest(c *AnthropicClient, r Request) (*http.Response, error) { func sendRequest(ctx context.Context, c *AnthropicClient, r Request) (*http.Response, error) {
url := "https://api.anthropic.com/v1/messages" url := "https://api.anthropic.com/v1/messages"
jsonBody, err := json.Marshal(r) jsonBody, err := json.Marshal(r)
@ -110,7 +111,7 @@ func sendRequest(c *AnthropicClient, r Request) (*http.Response, error) {
return nil, fmt.Errorf("failed to marshal request body: %v", err) return nil, fmt.Errorf("failed to marshal request body: %v", err)
} }
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonBody))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %v", err) return nil, fmt.Errorf("failed to create HTTP request: %v", err)
} }
@ -129,13 +130,14 @@ func sendRequest(c *AnthropicClient, r Request) (*http.Response, error) {
} }
func (c *AnthropicClient) CreateChatCompletion( func (c *AnthropicClient) CreateChatCompletion(
ctx context.Context,
params model.RequestParameters, params model.RequestParameters,
messages []model.Message, messages []model.Message,
replies *[]model.Message, replies *[]model.Message,
) (string, error) { ) (string, error) {
request := buildRequest(params, messages) request := buildRequest(params, messages)
resp, err := sendRequest(c, request) resp, err := sendRequest(ctx, c, request)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -167,6 +169,7 @@ func (c *AnthropicClient) CreateChatCompletion(
} }
func (c *AnthropicClient) CreateChatCompletionStream( func (c *AnthropicClient) CreateChatCompletionStream(
ctx context.Context,
params model.RequestParameters, params model.RequestParameters,
messages []model.Message, messages []model.Message,
replies *[]model.Message, replies *[]model.Message,
@ -175,7 +178,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
request := buildRequest(params, messages) request := buildRequest(params, messages)
request.Stream = true request.Stream = true
resp, err := sendRequest(c, request) resp, err := sendRequest(ctx, c, request)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -295,7 +298,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
// Recurse into CreateChatCompletionStream with the tool call replies // Recurse into CreateChatCompletionStream with the tool call replies
// added to the original messages // added to the original messages
messages = append(append(messages, toolCall), toolReply) messages = append(append(messages, toolCall), toolReply)
return c.CreateChatCompletionStream(params, messages, replies, output) return c.CreateChatCompletionStream(ctx, params, messages, replies, output)
} }
} }
case "message_stop": case "message_stop":

View File

@ -157,13 +157,14 @@ func handleToolCalls(
} }
func (c *OpenAIClient) CreateChatCompletion( func (c *OpenAIClient) CreateChatCompletion(
ctx context.Context,
params model.RequestParameters, params model.RequestParameters,
messages []model.Message, messages []model.Message,
replies *[]model.Message, replies *[]model.Message,
) (string, error) { ) (string, error) {
client := openai.NewClient(c.APIKey) client := openai.NewClient(c.APIKey)
req := createChatCompletionRequest(c, params, messages) req := createChatCompletionRequest(c, params, messages)
resp, err := client.CreateChatCompletion(context.Background(), req) resp, err := client.CreateChatCompletion(ctx, req)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -182,7 +183,7 @@ func (c *OpenAIClient) CreateChatCompletion(
// Recurse into CreateChatCompletion with the tool call replies // Recurse into CreateChatCompletion with the tool call replies
messages = append(messages, results...) messages = append(messages, results...)
return c.CreateChatCompletion(params, messages, replies) return c.CreateChatCompletion(ctx, params, messages, replies)
} }
if replies != nil { if replies != nil {
@ -197,6 +198,7 @@ func (c *OpenAIClient) CreateChatCompletion(
} }
func (c *OpenAIClient) CreateChatCompletionStream( func (c *OpenAIClient) CreateChatCompletionStream(
ctx context.Context,
params model.RequestParameters, params model.RequestParameters,
messages []model.Message, messages []model.Message,
replies *[]model.Message, replies *[]model.Message,
@ -205,7 +207,7 @@ func (c *OpenAIClient) CreateChatCompletionStream(
client := openai.NewClient(c.APIKey) client := openai.NewClient(c.APIKey)
req := createChatCompletionRequest(c, params, messages) req := createChatCompletionRequest(c, params, messages)
stream, err := client.CreateChatCompletionStream(context.Background(), req) stream, err := client.CreateChatCompletionStream(ctx, req)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -256,7 +258,7 @@ func (c *OpenAIClient) CreateChatCompletionStream(
// Recurse into CreateChatCompletionStream with the tool call replies // Recurse into CreateChatCompletionStream with the tool call replies
messages = append(messages, results...) messages = append(messages, results...)
return c.CreateChatCompletionStream(params, messages, replies, output) return c.CreateChatCompletionStream(ctx, params, messages, replies, output)
} }
if replies != nil { if replies != nil {

View File

@ -1,12 +1,17 @@
package provider package provider
import "git.mlow.ca/mlow/lmcli/pkg/lmcli/model" import (
"context"
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
)
type ChatCompletionClient interface { type ChatCompletionClient interface {
// CreateChatCompletion requests a response to the provided messages. // CreateChatCompletion requests a response to the provided messages.
// Replies are appended to the given replies struct, and the // Replies are appended to the given replies struct, and the
// complete user-facing response is returned as a string. // complete user-facing response is returned as a string.
CreateChatCompletion( CreateChatCompletion(
ctx context.Context,
params model.RequestParameters, params model.RequestParameters,
messages []model.Message, messages []model.Message,
replies *[]model.Message, replies *[]model.Message,
@ -15,6 +20,7 @@ type ChatCompletionClient interface {
// Like CreateChageCompletion, except the response is streamed via // Like CreateChageCompletion, except the response is streamed via
// the output channel as it's received. // the output channel as it's received.
CreateChatCompletionStream( CreateChatCompletionStream(
ctx context.Context,
params model.RequestParameters, params model.RequestParameters,
messages []model.Message, messages []model.Message,
replies *[]model.Message, replies *[]model.Message,

View File

@ -2,7 +2,6 @@ package tui
// The terminal UI for lmcli, launched from the `lmcli chat` command // The terminal UI for lmcli, launched from the `lmcli chat` command
// TODO: // TODO:
// - mode/focus changing between input and message selection
// - binding to open selected message/input in $EDITOR // - binding to open selected message/input in $EDITOR
import ( import (
@ -18,6 +17,13 @@ import (
"github.com/charmbracelet/lipgloss" "github.com/charmbracelet/lipgloss"
) )
type focusState int
const (
focusInput focusState = iota
focusMessages
)
type model struct { type model struct {
ctx *lmcli.Context ctx *lmcli.Context
convShortname string convShortname string
@ -29,13 +35,13 @@ type model struct {
err error err error
// ui state // ui state
focus focusState
isWaiting bool isWaiting bool
confirmPrompt bool status string // a general status message
// ui elements // ui elements
content viewport.Model content viewport.Model
input textarea.Model input textarea.Model
footer string
} }
type message struct { type message struct {
@ -81,7 +87,25 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) { switch msg := msg.(type) {
case tea.KeyMsg: case tea.KeyMsg:
cmd = m.handleKeyMsg(msg) switch msg.String() {
case "ctrl+c":
return m, tea.Quit
case "q":
if m.focus != focusInput {
return m, tea.Quit
}
default:
var inputHandled tea.Cmd
switch m.focus {
case focusInput:
inputHandled = m.handleInputKey(msg)
case focusMessages:
inputHandled = m.handleMessagesKey(msg)
}
if inputHandled != nil {
return m, inputHandled
}
}
case tea.WindowSizeMsg: case tea.WindowSizeMsg:
m.content.Width = msg.Width m.content.Width = msg.Width
m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView()) m.content.Height = msg.Height - m.input.Height() - lipgloss.Height(m.footerView())
@ -111,7 +135,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
cmd = waitForChunk(m.replyChan) // wait for the next chunk cmd = waitForChunk(m.replyChan) // wait for the next chunk
case msgResponseEnd: case msgResponseEnd:
m.isWaiting = false m.isWaiting = false
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit" m.status = "Press ctrl+s to send"
} }
if cmd != nil { if cmd != nil {
@ -146,9 +170,6 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
convShortname: convShortname, convShortname: convShortname,
replyChan: make(chan string), replyChan: make(chan string),
isWaiting: false,
confirmPrompt: false,
} }
m.content = viewport.New(0, 0) m.content = viewport.New(0, 0)
@ -162,50 +183,43 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
m.updateContent() m.updateContent()
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit" m.isWaiting = false
m.status = "Press ctrl+s to send"
return m return m
} }
func (m *model) handleKeyMsg(msg tea.KeyMsg) tea.Cmd { func (m *model) handleMessagesKey(msg tea.KeyMsg) tea.Cmd {
switch msg.String() { switch msg.String() {
case "ctrl+c", "q": case "tab":
return tea.Quit m.focus = focusInput
m.input.Focus()
}
return nil
}
func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
switch msg.String() {
case "esc":
m.focus = focusMessages
m.input.Blur()
case "ctrl+s": case "ctrl+s":
if !m.isWaiting && !m.confirmPrompt { userInput := strings.TrimSpace(m.input.Value())
m.confirmPrompt = true if strings.TrimSpace(userInput) == "" {
m.footer = "Press 'y' to confirm sending the message, 'n' to cancel"
return nil return nil
} }
case "y":
if m.confirmPrompt {
userInput := m.input.Value()
m.input.SetValue("") m.input.SetValue("")
m.messages = append(m.messages, models.Message{ m.messages = append(m.messages, models.Message{
Role: models.MessageRoleUser, Role: models.MessageRoleUser,
Content: userInput, Content: userInput,
}) })
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
m.isWaiting = true m.isWaiting = true
m.confirmPrompt = false m.status = "Waiting for response... (Press 's' to stop)"
m.footer = "Waiting for response... (Press 's' to stop)"
return m.promptLLM() return m.promptLLM()
} }
case "n":
if m.confirmPrompt {
m.confirmPrompt = false
m.footer = "Press Ctrl+S to send, Ctrl+C or 'q' to quit"
return nil
}
case "s":
if m.isWaiting {
m.isWaiting = false
m.footer = "Response generation stopped. Press Ctrl+S to send, Ctrl+C or 'q' to quit"
return nil
}
}
return nil return nil
} }
@ -283,7 +297,7 @@ func (m *model) updateContent() {
style = assistantStyle style = assistantStyle
} }
sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role)))) sb.WriteString(fmt.Sprintf("%s:\n\n", style.Render(string(message.Role.FriendlyRole()))))
highlighted, _ := m.ctx.Chroma.HighlightS(message.Content) highlighted, _ := m.ctx.Chroma.HighlightS(message.Content)
sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted)) sb.WriteString(contentStyle.Width(m.content.Width - 5).Render(highlighted))
@ -305,10 +319,20 @@ func (m model) inputView() string {
} }
func (m model) footerView() string { func (m model) footerView() string {
return footerStyle. left := m.status
Width(m.content.Width). right := fmt.Sprintf("Model: %s", *m.ctx.Config.Defaults.Model)
Align(lipgloss.Center).
Render(m.footer) totalWidth := lipgloss.Width(left + right)
var padding string
if m.content.Width-totalWidth > 0 {
padding = strings.Repeat(" ", m.content.Width-totalWidth)
} else {
padding = ""
}
footer := lipgloss.JoinHorizontal(lipgloss.Center, left, padding, right)
return footerStyle.Width(m.content.Width).Render(footer)
} }
func Launch(ctx *lmcli.Context, convShortname string) error { func Launch(ctx *lmcli.Context, convShortname string) error {