Compare commits

..

41 Commits

Author SHA1 Message Date
1456442d4e tui: Fix response handling 2024-03-17 22:55:02 +00:00
3185b2d7d6 tui: show the message position when focused 2024-03-17 22:55:02 +00:00
6c64f21d9a tui: support for message retry/continue
Better handling of persistence, and we now ensure the response we
persist is trimmed of whitespace, particularly important when a response
is cancelled mid-stream
2024-03-17 22:55:02 +00:00
6f737ad19c tui: handle text wrapping ourselves, add ctrl+w wrap toggle
Gets rid of those pesky trailing characters
2024-03-17 22:55:02 +00:00
a8ffdc156a tui: open input/messages for editing in $EDITOR 2024-03-17 22:55:02 +00:00
7a974d9764 tui: add ability to select a message 2024-03-17 22:55:02 +00:00
adb61ffa59 tui: conversation rendering tweaks, remove input character limit 2024-03-17 22:55:02 +00:00
1c7ad75fd5 tui: fixed response cancelling 2024-03-17 22:55:02 +00:00
613aa1a552 tui: ctrl+r to retry previous message 2024-03-17 22:55:02 +00:00
71833b89cd tui: fixed footer styling 2024-03-17 22:55:02 +00:00
2ad93394b1 tui: removed scrollbar 2024-03-17 22:55:02 +00:00
f49b772960 tui: minor fixed and cleanup 2024-03-17 22:55:02 +00:00
29d8138dc0 tui: update lodos 2024-03-17 22:55:02 +00:00
3756f6d9e4 tui: add response waiting spinner 2024-03-17 22:55:02 +00:00
41916eb7b3 tui: add LLM response error handling
+ various other small tweaks
2024-03-17 22:55:02 +00:00
3892e68251 tui: add a "scroll bar" and error view 2024-03-17 22:55:02 +00:00
8697284064 tui: generate titles for conversations 2024-03-17 22:55:02 +00:00
383d34f311 tui: persist new conversations as well 2024-03-17 22:55:02 +00:00
ac0e380244 tui: add reply persistence 2024-03-17 22:55:02 +00:00
c3a3cb0181 tui: improve footer rendering
Made it easier to add segmemts later, better handling of padding
2024-03-17 22:55:02 +00:00
612ea90417 tui: slight function order change 2024-03-17 22:55:02 +00:00
94508b1dbf tui: cache highlighted messages
Syntax highlighting is fairly expensive, and this means we no longer
need to do syntax highlighting on the entire conversaion each time a new
message chunk is received
2024-03-17 22:55:02 +00:00
7e002e5214 tui: adjust message header styling 2024-03-17 22:55:02 +00:00
48e4dea3cf tui: style tweaks 2024-03-17 22:55:02 +00:00
0ab552303d tui: add contentStyle, applied to overall viewport content 2024-03-17 22:55:02 +00:00
6ce42a77f9 tui: update TODO 2024-03-17 22:55:02 +00:00
2cb1a0005d tui: fix conversation loading 2024-03-17 22:55:02 +00:00
ea78edf039 tui: use EnabledTools from lmcli.Context 2024-03-17 22:55:02 +00:00
793aaab50e tui: styling tweak 2024-03-17 22:55:02 +00:00
5afc9667c7 tui: add header with title 2024-03-17 22:55:02 +00:00
dfafc573e5 tui: handle multi part responses 2024-03-17 22:55:02 +00:00
97f81a0cbb tui: scroll content view with output
clean up msgResponseChunk handling
2024-03-17 22:55:02 +00:00
eca120cde6 tui: ability to cancel request in flight 2024-03-17 22:55:02 +00:00
12d4e495d4 tui: add focus switching between input/messages view 2024-03-17 22:55:02 +00:00
d8c8262890 tui: removed confirm before send, dynamic footer
footer now rendered based on model data, instead of being set to a fixed
string
2024-03-17 22:55:02 +00:00
758f74aba5 tui: use ctx chroma highlighter 2024-03-17 22:55:02 +00:00
1570c23d63 Add initial TUI 2024-03-17 22:55:02 +00:00
46149e0b67 Attempt to fix anthropic tool calling
Models have been way too eager to use tools when the task does not
require it (for example, reading the filesystem in order to show an
code example)
2024-03-17 22:55:02 +00:00
c2c61e2aaa Improve title generation prompt performance
The previous prompt was utterly broken with Anthropic models, they would
just try to continue the conversation
2024-03-17 22:55:02 +00:00
5e880d3b31 Lead anthropic function call XML with newline 2024-03-17 22:55:02 +00:00
62f07dd240 Fix double reply callback on tool calls 2024-03-17 22:55:02 +00:00
4 changed files with 62 additions and 19 deletions

View File

@ -118,11 +118,18 @@ func HandleConversationReply(ctx *lmcli.Context, c *model.Conversation, persist
func FormatForExternalPrompt(messages []model.Message, system bool) string { func FormatForExternalPrompt(messages []model.Message, system bool) string {
sb := strings.Builder{} sb := strings.Builder{}
for _, message := range messages { for _, message := range messages {
if message.Role != model.MessageRoleUser && (message.Role != model.MessageRoleSystem || !system) { if message.Content == "" {
continue continue
} }
sb.WriteString(fmt.Sprintf("<%s>\n", message.Role.FriendlyRole())) switch message.Role {
sb.WriteString(fmt.Sprintf("\"\"\"\n%s\n\"\"\"\n\n", message.Content)) case model.MessageRoleAssistant, model.MessageRoleToolCall:
sb.WriteString("Assistant:\n\n")
case model.MessageRoleUser:
sb.WriteString("User:\n\n")
default:
continue
}
sb.WriteString(fmt.Sprintf("%s", lipgloss.NewStyle().PaddingLeft(1).Render(message.Content)))
} }
return sb.String() return sb.String()
} }
@ -133,13 +140,32 @@ func GenerateTitle(ctx *lmcli.Context, c *model.Conversation) (string, error) {
return "", err return "", err
} }
const header = "Generate a concise 4-5 word title for the conversation below." const prompt = `Above is an excerpt from a conversation between a user and AI assistant. Please reply with a short title (no more than 8 words) that reflects the topic of the conversation, read from the user's perspective.
prompt := fmt.Sprintf("%s\n\n---\n\n%s", header, FormatForExternalPrompt(messages, false))
Example conversation:
"""
User:
Hello!
Assistant:
Hello! How may I assist you?
"""
Example response:
"""
Title: A brief introduction
"""
`
conversation := FormatForExternalPrompt(messages, false)
generateRequest := []model.Message{ generateRequest := []model.Message{
{ {
Role: model.MessageRoleUser, Role: model.MessageRoleUser,
Content: prompt, Content: fmt.Sprintf("\"\"\"\n%s\n\"\"\"\n\n%s", conversation, prompt),
}, },
} }
@ -158,12 +184,15 @@ func GenerateTitle(ctx *lmcli.Context, c *model.Conversation) (string, error) {
return "", err return "", err
} }
response = strings.TrimPrefix(response, "Title: ")
response = strings.Trim(response, "\"")
return response, nil return response, nil
} }
// ShowWaitAnimation prints an animated ellipses to stdout until something is // ShowWaitAnimation prints an animated ellipses to stdout until something is
// received on the signal channel. An empty string sent to the channel to // received on the signal channel. An empty string sent to the channel to
// noftify the caller that the animation has completed (carriage returned). // notify the caller that the animation has completed (carriage returned).
func ShowWaitAnimation(signal chan any) { func ShowWaitAnimation(signal chan any) {
// Save the current cursor position // Save the current cursor position
fmt.Print("\033[s") fmt.Print("\033[s")

View File

@ -68,7 +68,7 @@ func buildRequest(params model.RequestParameters, messages []model.Message) Requ
startIdx := 0 startIdx := 0
if len(messages) > 0 && messages[0].Role == model.MessageRoleSystem { if len(messages) > 0 && messages[0].Role == model.MessageRoleSystem {
requestBody.System = messages[0].Content requestBody.System = messages[0].Content
requestBody.Messages = requestBody.Messages[:len(messages)-1] requestBody.Messages = requestBody.Messages[1:]
startIdx = 1 startIdx = 1
} }

View File

@ -9,9 +9,10 @@ import (
"git.mlow.ca/mlow/lmcli/pkg/lmcli/model" "git.mlow.ca/mlow/lmcli/pkg/lmcli/model"
) )
const TOOL_PREAMBLE = `In this environment you have access to a set of tools which may assist you in fulfilling user requests. const TOOL_PREAMBLE = `You have access to the following tools when replying.
You may call them like this: You may call them like this:
<function_calls> <function_calls>
<invoke> <invoke>
<tool_name>$TOOL_NAME</tool_name> <tool_name>$TOOL_NAME</tool_name>
@ -24,6 +25,14 @@ You may call them like this:
Here are the tools available:` Here are the tools available:`
const TOOL_PREAMBLE_FOOTER = `Recognize the utility of these tools in a broad range of different applications, and the power they give you to solve a wide range of different problems. However, ensure that the tools are used judiciously and only when clearly relevant to the user's request. Specifically:
1. Only use a tool if the user has explicitly requested or provided information that warrants its use. Do not make assumptions about files or data existing without the user mentioning them.
2. If there is ambiguity about whether using a tool is appropriate, ask a clarifying question to the user before proceeding. Confirm your understanding of their request and intent.
3. Prioritize providing direct responses and explanations based on your own knowledge and understanding. Use tools to supplement and enhance your responses when clearly applicable, but not as a default action.`
type XMLTools struct { type XMLTools struct {
XMLName struct{} `xml:"tools"` XMLName struct{} `xml:"tools"`
ToolDescriptions []XMLToolDescription `xml:"tool_description"` ToolDescriptions []XMLToolDescription `xml:"tool_description"`
@ -151,7 +160,7 @@ func buildToolsSystemPrompt(tools []model.Tool) string {
if err != nil { if err != nil {
panic("Could not serialize []model.Tool to XMLTools") panic("Could not serialize []model.Tool to XMLTools")
} }
return TOOL_PREAMBLE + "\n" + xmlToolsString + "\n" return TOOL_PREAMBLE + "\n\n" + xmlToolsString + "\n\n" + TOOL_PREAMBLE_FOOTER
} }
func (x XMLTools) XMLString() (string, error) { func (x XMLTools) XMLString() (string, error) {

View File

@ -244,7 +244,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
if last < 0 { if last < 0 {
panic("Unexpected empty messages handling msgResponseEnd") panic("Unexpected empty messages handling msgResponseEnd")
} }
m.setMessageContents(last, strings.TrimSpace(m.messages[last].Content)) m.setMessageContents(last, strings.TrimSpace(string(msg)))
m.updateContent() m.updateContent()
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
case msgResponseError: case msgResponseError:
@ -786,17 +786,22 @@ func (m *model) conversationView() string {
// write message heading with space for content // write message heading with space for content
user := style.Render(icon + friendly) user := style.Render(icon + friendly)
var saved string var prefix string
var suffix string
faint := lipgloss.NewStyle().Faint(true)
if m.focus == focusMessages {
if i == m.selectedMessage {
prefix = "> "
}
suffix += faint.Render(fmt.Sprintf(" (%d/%d)", i+1, msgCnt))
}
if message.ID == 0 { if message.ID == 0 {
saved = lipgloss.NewStyle().Faint(true).Render(" (not saved)") suffix += faint.Render(" (not saved)")
} }
var selectedPrefix string header := lipgloss.NewStyle().PaddingLeft(1).Render(prefix + user + suffix)
if m.focus == focusMessages && i == m.selectedMessage {
selectedPrefix = "> "
}
header := lipgloss.NewStyle().PaddingLeft(1).Render(selectedPrefix + user + saved)
sb.WriteString(header) sb.WriteString(header)
lineCnt += lipgloss.Height(header) lineCnt += lipgloss.Height(header)