Improve title generation prompt performance
The previous prompt was utterly broken with Anthropic models, they would just try to continue the conversation
This commit is contained in:
parent
5e880d3b31
commit
c2c61e2aaa
@ -118,11 +118,18 @@ func HandleConversationReply(ctx *lmcli.Context, c *model.Conversation, persist
|
||||
func FormatForExternalPrompt(messages []model.Message, system bool) string {
|
||||
sb := strings.Builder{}
|
||||
for _, message := range messages {
|
||||
if message.Role != model.MessageRoleUser && (message.Role != model.MessageRoleSystem || !system) {
|
||||
if message.Content == "" {
|
||||
continue
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("<%s>\n", message.Role.FriendlyRole()))
|
||||
sb.WriteString(fmt.Sprintf("\"\"\"\n%s\n\"\"\"\n\n", message.Content))
|
||||
switch message.Role {
|
||||
case model.MessageRoleAssistant, model.MessageRoleToolCall:
|
||||
sb.WriteString("Assistant:\n\n")
|
||||
case model.MessageRoleUser:
|
||||
sb.WriteString("User:\n\n")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("%s", lipgloss.NewStyle().PaddingLeft(1).Render(message.Content)))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
@ -133,13 +140,32 @@ func GenerateTitle(ctx *lmcli.Context, c *model.Conversation) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
const header = "Generate a concise 4-5 word title for the conversation below."
|
||||
prompt := fmt.Sprintf("%s\n\n---\n\n%s", header, FormatForExternalPrompt(messages, false))
|
||||
const prompt = `Above is an excerpt from a conversation between a user and AI assistant. Please reply with a short title (no more than 8 words) that reflects the topic of the conversation, read from the user's perspective.
|
||||
|
||||
Example conversation:
|
||||
|
||||
"""
|
||||
User:
|
||||
|
||||
Hello!
|
||||
|
||||
Assistant:
|
||||
|
||||
Hello! How may I assist you?
|
||||
"""
|
||||
|
||||
Example response:
|
||||
|
||||
"""
|
||||
Title: A brief introduction
|
||||
"""
|
||||
`
|
||||
conversation := FormatForExternalPrompt(messages, false)
|
||||
|
||||
generateRequest := []model.Message{
|
||||
{
|
||||
Role: model.MessageRoleUser,
|
||||
Content: prompt,
|
||||
Content: fmt.Sprintf("\"\"\"\n%s\n\"\"\"\n\n%s", conversation, prompt),
|
||||
},
|
||||
}
|
||||
|
||||
@ -158,12 +184,15 @@ func GenerateTitle(ctx *lmcli.Context, c *model.Conversation) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
response = strings.TrimPrefix(response, "Title: ")
|
||||
response = strings.Trim(response, "\"")
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// ShowWaitAnimation prints an animated ellipses to stdout until something is
|
||||
// received on the signal channel. An empty string sent to the channel to
|
||||
// noftify the caller that the animation has completed (carriage returned).
|
||||
// notify the caller that the animation has completed (carriage returned).
|
||||
func ShowWaitAnimation(signal chan any) {
|
||||
// Save the current cursor position
|
||||
fmt.Print("\033[s")
|
||||
|
Loading…
Reference in New Issue
Block a user