Hopeful fix to race condition in tui's streamed response handling
This commit is contained in:
parent
f05e2e30f7
commit
1996300c40
@ -81,7 +81,6 @@ type Model struct {
|
|||||||
selectedMessage int
|
selectedMessage int
|
||||||
editorTarget editorTarget
|
editorTarget editorTarget
|
||||||
stopSignal chan struct{}
|
stopSignal chan struct{}
|
||||||
replyChan chan conversation.Message
|
|
||||||
chatReplyChunks chan provider.Chunk
|
chatReplyChunks chan provider.Chunk
|
||||||
persistence bool // whether we will save new messages in the conversation
|
persistence bool // whether we will save new messages in the conversation
|
||||||
|
|
||||||
@ -134,8 +133,7 @@ func Chat(app *model.AppModel) *Model {
|
|||||||
persistence: true,
|
persistence: true,
|
||||||
|
|
||||||
stopSignal: make(chan struct{}),
|
stopSignal: make(chan struct{}),
|
||||||
replyChan: make(chan conversation.Message),
|
chatReplyChunks: make(chan provider.Chunk, 1),
|
||||||
chatReplyChunks: make(chan provider.Chunk),
|
|
||||||
|
|
||||||
wrap: true,
|
wrap: true,
|
||||||
selectedMessage: -1,
|
selectedMessage: -1,
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/api"
|
"git.mlow.ca/mlow/lmcli/pkg/api"
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/conversation"
|
"git.mlow.ca/mlow/lmcli/pkg/conversation"
|
||||||
|
"git.mlow.ca/mlow/lmcli/pkg/provider"
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/tui/model"
|
"git.mlow.ca/mlow/lmcli/pkg/tui/model"
|
||||||
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
|
||||||
tea "github.com/charmbracelet/bubbletea"
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
@ -127,6 +128,15 @@ func (m *Model) promptLLM() tea.Cmd {
|
|||||||
m.spinner.Tick,
|
m.spinner.Tick,
|
||||||
func() tea.Msg {
|
func() tea.Msg {
|
||||||
resp, err := m.App.Prompt(m.App.Messages, m.chatReplyChunks, m.stopSignal)
|
resp, err := m.App.Prompt(m.App.Messages, m.chatReplyChunks, m.stopSignal)
|
||||||
|
// These empty chunk sends prevent a race condition where a final
|
||||||
|
// chunk may be received on m.chatReplyChunks after the
|
||||||
|
// msgChatResponse message is handled, resulting in that chunk
|
||||||
|
// appearing twice at the end of the final output
|
||||||
|
// One send reduces the frequency of the race, two seems to
|
||||||
|
// eliminate it
|
||||||
|
m.chatReplyChunks <- provider.Chunk{}
|
||||||
|
m.chatReplyChunks <- provider.Chunk{}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return msgChatResponseError{Err: err}
|
return msgChatResponseError{Err: err}
|
||||||
}
|
}
|
||||||
|
@ -108,6 +108,7 @@ func (m *Model) Update(msg tea.Msg) (shared.ViewModel, tea.Cmd) {
|
|||||||
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
|
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
|
||||||
|
|
||||||
if msg.Content == "" {
|
if msg.Content == "" {
|
||||||
|
// skip empty chunks
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user