From 1996300c40af422a0a8901c1e589c3d877be5e13 Mon Sep 17 00:00:00 2001 From: Matt Low Date: Wed, 11 Dec 2024 07:17:53 +0000 Subject: [PATCH] Hopeful fix to race condition in tui's streamed response handling --- pkg/tui/views/chat/chat.go | 4 +--- pkg/tui/views/chat/cmds.go | 10 ++++++++++ pkg/tui/views/chat/update.go | 1 + 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/pkg/tui/views/chat/chat.go b/pkg/tui/views/chat/chat.go index 1755418..5231e04 100644 --- a/pkg/tui/views/chat/chat.go +++ b/pkg/tui/views/chat/chat.go @@ -81,7 +81,6 @@ type Model struct { selectedMessage int editorTarget editorTarget stopSignal chan struct{} - replyChan chan conversation.Message chatReplyChunks chan provider.Chunk persistence bool // whether we will save new messages in the conversation @@ -134,8 +133,7 @@ func Chat(app *model.AppModel) *Model { persistence: true, stopSignal: make(chan struct{}), - replyChan: make(chan conversation.Message), - chatReplyChunks: make(chan provider.Chunk), + chatReplyChunks: make(chan provider.Chunk, 1), wrap: true, selectedMessage: -1, diff --git a/pkg/tui/views/chat/cmds.go b/pkg/tui/views/chat/cmds.go index 3a20acb..0679817 100644 --- a/pkg/tui/views/chat/cmds.go +++ b/pkg/tui/views/chat/cmds.go @@ -5,6 +5,7 @@ import ( "git.mlow.ca/mlow/lmcli/pkg/api" "git.mlow.ca/mlow/lmcli/pkg/conversation" + "git.mlow.ca/mlow/lmcli/pkg/provider" "git.mlow.ca/mlow/lmcli/pkg/tui/model" "git.mlow.ca/mlow/lmcli/pkg/tui/shared" tea "github.com/charmbracelet/bubbletea" @@ -127,6 +128,15 @@ func (m *Model) promptLLM() tea.Cmd { m.spinner.Tick, func() tea.Msg { resp, err := m.App.Prompt(m.App.Messages, m.chatReplyChunks, m.stopSignal) + // These empty chunk sends prevent a race condition where a final + // chunk may be received on m.chatReplyChunks after the + // msgChatResponse message is handled, resulting in that chunk + // appearing twice at the end of the final output + // One send reduces the frequency of the race, two seems to + // eliminate it + m.chatReplyChunks <- provider.Chunk{} + m.chatReplyChunks <- provider.Chunk{} + if err != nil { return msgChatResponseError{Err: err} } diff --git a/pkg/tui/views/chat/update.go b/pkg/tui/views/chat/update.go index e9b4b44..61d15d5 100644 --- a/pkg/tui/views/chat/update.go +++ b/pkg/tui/views/chat/update.go @@ -108,6 +108,7 @@ func (m *Model) Update(msg tea.Msg) (shared.ViewModel, tea.Cmd) { cmds = append(cmds, m.waitForResponseChunk()) // wait for the next chunk if msg.Content == "" { + // skip empty chunks break }