tui: ability to cancel request in flight

This commit is contained in:
Matt Low 2024-03-12 18:33:57 +00:00
parent 12d4e495d4
commit eca120cde6

View File

@ -32,12 +32,13 @@ type model struct {
// application state // application state
conversation *models.Conversation conversation *models.Conversation
messages []models.Message messages []models.Message
waitingForReply bool
replyChan chan string replyChan chan string
replyCancelFunc context.CancelFunc
err error err error
// ui state // ui state
focus focusState focus focusState
isWaiting bool
status string // a general status message status string // a general status message
// ui elements // ui elements
@ -90,7 +91,11 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
case tea.KeyMsg: case tea.KeyMsg:
switch msg.String() { switch msg.String() {
case "ctrl+c": case "ctrl+c":
if m.waitingForReply {
m.replyCancelFunc()
} else {
return m, tea.Quit return m, tea.Quit
}
case "q": case "q":
if m.focus != focusInput { if m.focus != focusInput {
return m, tea.Quit return m, tea.Quit
@ -135,7 +140,8 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
} }
cmd = waitForChunk(m.replyChan) // wait for the next chunk cmd = waitForChunk(m.replyChan) // wait for the next chunk
case msgResponseEnd: case msgResponseEnd:
m.isWaiting = false m.replyCancelFunc = nil
m.waitingForReply = false
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
} }
@ -184,7 +190,7 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
m.updateContent() m.updateContent()
m.isWaiting = false m.waitingForReply = false
m.status = "Press ctrl+s to send" m.status = "Press ctrl+s to send"
return m return m
} }
@ -217,8 +223,8 @@ func (m *model) handleInputKey(msg tea.KeyMsg) tea.Cmd {
m.updateContent() m.updateContent()
m.content.GotoBottom() m.content.GotoBottom()
m.isWaiting = true m.waitingForReply = true
m.status = "Waiting for response... (Press 's' to stop)" m.status = "Waiting for response, press ctrl+c to cancel..."
return m.promptLLM() return m.promptLLM()
} }
return nil return nil
@ -278,9 +284,12 @@ func (m *model) promptLLM() tea.Cmd {
ToolBag: toolBag, ToolBag: toolBag,
} }
ctx, replyCancelFunc := context.WithCancel(context.Background())
m.replyCancelFunc = replyCancelFunc
// TODO: supply a reply callback and handle error // TODO: supply a reply callback and handle error
resp, _ := completionProvider.CreateChatCompletionStream( resp, _ := completionProvider.CreateChatCompletionStream(
context.Background(), requestParams, m.messages, nil, m.replyChan, ctx, requestParams, m.messages, nil, m.replyChan,
) )
return msgResponseEnd(resp) return msgResponseEnd(resp)
@ -311,7 +320,7 @@ func (m *model) updateContent() {
func (m model) inputView() string { func (m model) inputView() string {
var inputView string var inputView string
if m.isWaiting { if m.waitingForReply {
inputView = inputStyle.Faint(true).Render(m.input.View()) inputView = inputStyle.Faint(true).Render(m.input.View())
} else { } else {
inputView = inputStyle.Render(m.input.View()) inputView = inputStyle.Render(m.input.View())