Hopeful fix to race condition in tui's streamed response handling

This commit is contained in:
Matt Low 2024-12-11 07:17:53 +00:00
parent f05e2e30f7
commit 1996300c40
3 changed files with 12 additions and 3 deletions

View File

@ -81,7 +81,6 @@ type Model struct {
selectedMessage int
editorTarget editorTarget
stopSignal chan struct{}
replyChan chan conversation.Message
chatReplyChunks chan provider.Chunk
persistence bool // whether we will save new messages in the conversation
@ -134,8 +133,7 @@ func Chat(app *model.AppModel) *Model {
persistence: true,
stopSignal: make(chan struct{}),
replyChan: make(chan conversation.Message),
chatReplyChunks: make(chan provider.Chunk),
chatReplyChunks: make(chan provider.Chunk, 1),
wrap: true,
selectedMessage: -1,

View File

@ -5,6 +5,7 @@ import (
"git.mlow.ca/mlow/lmcli/pkg/api"
"git.mlow.ca/mlow/lmcli/pkg/conversation"
"git.mlow.ca/mlow/lmcli/pkg/provider"
"git.mlow.ca/mlow/lmcli/pkg/tui/model"
"git.mlow.ca/mlow/lmcli/pkg/tui/shared"
tea "github.com/charmbracelet/bubbletea"
@ -127,6 +128,15 @@ func (m *Model) promptLLM() tea.Cmd {
m.spinner.Tick,
func() tea.Msg {
resp, err := m.App.Prompt(m.App.Messages, m.chatReplyChunks, m.stopSignal)
// These empty chunk sends prevent a race condition where a final
// chunk may be received on m.chatReplyChunks after the
// msgChatResponse message is handled, resulting in that chunk
// appearing twice at the end of the final output
// One send reduces the frequency of the race, two seems to
// eliminate it
m.chatReplyChunks <- provider.Chunk{}
m.chatReplyChunks <- provider.Chunk{}
if err != nil {
return msgChatResponseError{Err: err}
}

View File

@ -108,6 +108,7 @@ func (m *Model) Update(msg tea.Msg) (shared.ViewModel, tea.Cmd) {
cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk
if msg.Content == "" {
// skip empty chunks
break
}