diff --git a/pkg/lmcli/provider/anthropic/anthropic.go b/pkg/lmcli/provider/anthropic/anthropic.go index 706f449..51586c4 100644 --- a/pkg/lmcli/provider/anthropic/anthropic.go +++ b/pkg/lmcli/provider/anthropic/anthropic.go @@ -229,8 +229,7 @@ func (c *AnthropicClient) CreateChatCompletionStream( case "message_start": // noop case "ping": - // write an empty string to signal start of text - output <- "" + // signals start of text - currently ignoring case "content_block_start": // ignore? case "content_block_delta": diff --git a/pkg/tui/views/chat/chat.go b/pkg/tui/views/chat/chat.go index 4295476..cd69232 100644 --- a/pkg/tui/views/chat/chat.go +++ b/pkg/tui/views/chat/chat.go @@ -278,6 +278,8 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { m.updateContent() m.content.GotoBottom() case msgResponseChunk: + cmds = append(cmds, m.waitForChunk()) // wait for the next chunk + chunk := string(msg) if chunk == "" { break @@ -295,7 +297,6 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { }) } m.updateContent() - cmds = append(cmds, m.waitForChunk()) // wait for the next chunk // show cursor and reset blink interval (simulate typing) m.replyCursor.Blink = false @@ -304,7 +305,8 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { m.tokenCount++ m.elapsed = time.Now().Sub(m.startTime) case msgAssistantReply: - // the last reply that was being worked on is finished + cmds = append(cmds, m.waitForReply()) // wait for the next reply + reply := models.Message(msg) reply.Content = strings.TrimSpace(reply.Content) @@ -332,7 +334,6 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { } m.updateContent() - cmds = append(cmds, m.waitForReply()) case msgResponseEnd: m.waitingForReply = false last := len(m.messages) - 1