Fixed regression from 3536438d
We were sending an empty string to the output channel when `ping` messages were received from Anthropic's API. This was causing the TUI to break since we started doing an empty chunk check (and mistakenly not waiting for future chunks if one was received). This commit makes it so we no longer an empty string on the ping message from Anthropic, and we update the handling of msgAssistantChunk and msgAssistantReply to make it less likely that we forget to wait for the next chunk/reply.
This commit is contained in:
parent
58e1b84fea
commit
b29a4c8b84
@ -229,8 +229,7 @@ func (c *AnthropicClient) CreateChatCompletionStream(
|
|||||||
case "message_start":
|
case "message_start":
|
||||||
// noop
|
// noop
|
||||||
case "ping":
|
case "ping":
|
||||||
// write an empty string to signal start of text
|
// signals start of text - currently ignoring
|
||||||
output <- ""
|
|
||||||
case "content_block_start":
|
case "content_block_start":
|
||||||
// ignore?
|
// ignore?
|
||||||
case "content_block_delta":
|
case "content_block_delta":
|
||||||
|
@ -278,6 +278,8 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
|||||||
m.updateContent()
|
m.updateContent()
|
||||||
m.content.GotoBottom()
|
m.content.GotoBottom()
|
||||||
case msgResponseChunk:
|
case msgResponseChunk:
|
||||||
|
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
|
||||||
|
|
||||||
chunk := string(msg)
|
chunk := string(msg)
|
||||||
if chunk == "" {
|
if chunk == "" {
|
||||||
break
|
break
|
||||||
@ -295,7 +297,6 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
m.updateContent()
|
m.updateContent()
|
||||||
cmds = append(cmds, m.waitForChunk()) // wait for the next chunk
|
|
||||||
|
|
||||||
// show cursor and reset blink interval (simulate typing)
|
// show cursor and reset blink interval (simulate typing)
|
||||||
m.replyCursor.Blink = false
|
m.replyCursor.Blink = false
|
||||||
@ -304,7 +305,8 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
|||||||
m.tokenCount++
|
m.tokenCount++
|
||||||
m.elapsed = time.Now().Sub(m.startTime)
|
m.elapsed = time.Now().Sub(m.startTime)
|
||||||
case msgAssistantReply:
|
case msgAssistantReply:
|
||||||
// the last reply that was being worked on is finished
|
cmds = append(cmds, m.waitForReply()) // wait for the next reply
|
||||||
|
|
||||||
reply := models.Message(msg)
|
reply := models.Message(msg)
|
||||||
reply.Content = strings.TrimSpace(reply.Content)
|
reply.Content = strings.TrimSpace(reply.Content)
|
||||||
|
|
||||||
@ -332,7 +334,6 @@ func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m.updateContent()
|
m.updateContent()
|
||||||
cmds = append(cmds, m.waitForReply())
|
|
||||||
case msgResponseEnd:
|
case msgResponseEnd:
|
||||||
m.waitingForReply = false
|
m.waitingForReply = false
|
||||||
last := len(m.messages) - 1
|
last := len(m.messages) - 1
|
||||||
|
Loading…
Reference in New Issue
Block a user