tui: add LLM response error handling

+ various other small tweaks
This commit is contained in:
Matt Low 2024-03-14 03:07:41 +00:00
parent 3892e68251
commit 41916eb7b3

View File

@ -66,6 +66,8 @@ type (
msgResponseChunk string
// sent when response is finished being received
msgResponseEnd string
// a special case of msgError that stops the response waiting animation
msgResponseError error
// sent on each completed reply
msgAssistantReply models.Message
// sent when a conversation is (re)loaded
@ -198,6 +200,11 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
m.replyCancelFunc = nil
m.waitingForReply = false
m.status = "Press ctrl+s to send"
case msgResponseError:
m.replyCancelFunc = nil
m.waitingForReply = false
m.status = "Press ctrl+s to send"
m.err = error(msg)
case msgConversationTitleChanged:
title := string(msg)
m.conversation.Title = title
@ -288,7 +295,7 @@ func (m *model) headerView() string {
}
part := titleStyle.Render(title)
return headerStyle.Width(m.content.Width).Render(part)
return headerStyle.Width(m.width).Render(part)
}
func (m *model) contentView() string {
@ -300,10 +307,10 @@ func (m *model) errorView() string {
return ""
}
return lipgloss.NewStyle().
Width(m.width).
AlignHorizontal(lipgloss.Center).
Bold(true).
Foreground(lipgloss.Color("1")).
Width(m.content.Width).
AlignHorizontal(lipgloss.Center).
Render(fmt.Sprintf("%s", m.err))
}
@ -312,12 +319,12 @@ func (m *model) scrollbarView() string {
return ""
}
count := int(m.content.ScrollPercent() * float64(m.content.Width-2))
count := int(m.content.ScrollPercent() * float64(m.width-2))
fill := strings.Repeat("-", count)
return lipgloss.NewStyle().
Width(m.width).
PaddingLeft(1).
PaddingRight(1).
Width(m.content.Width).
Render(fill)
}
@ -381,6 +388,7 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
m.input.FocusedStyle.CursorLine = lipgloss.NewStyle()
m.input.ShowLineNumbers = false
m.input.SetHeight(4)
m.input.Focus()
m.updateContent()
@ -458,10 +466,10 @@ func (m *model) loadConversation(shortname string) tea.Cmd {
}
c, err := m.ctx.Store.ConversationByShortName(shortname)
if err != nil {
return msgError(fmt.Errorf("Could not lookup conversation: %v\n", err))
return msgError(fmt.Errorf("Could not lookup conversation: %v", err))
}
if c.ID == 0 {
return msgError(fmt.Errorf("Conversation not found with short name: %s\n", shortname))
return msgError(fmt.Errorf("Conversation not found: %s", shortname))
}
return msgConversationLoaded(c)
}
@ -520,11 +528,13 @@ func (m *model) promptLLM() tea.Cmd {
ctx, replyCancelFunc := context.WithCancel(context.Background())
m.replyCancelFunc = replyCancelFunc
// TODO: handle error
resp, _ := completionProvider.CreateChatCompletionStream(
resp, err := completionProvider.CreateChatCompletionStream(
ctx, requestParams, m.messages, replyHandler, m.replyChunkChan,
)
if err != nil {
return msgResponseError(err)
}
return msgResponseEnd(resp)
}
}