tui: handle text wrapping ourselves, add ctrl+w wrap toggle

Gets rid of those pesky trailing characters
This commit is contained in:
Matt Low 2024-03-17 00:25:42 +00:00
parent a8ffdc156a
commit 6f737ad19c
1 changed files with 17 additions and 4 deletions

View File

@ -23,6 +23,7 @@ import (
"github.com/charmbracelet/bubbles/viewport" "github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea" tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss" "github.com/charmbracelet/lipgloss"
"github.com/muesli/reflow/wordwrap"
) )
type focusState int type focusState int
@ -59,6 +60,7 @@ type model struct {
// ui state // ui state
focus focusState focus focusState
wrap bool // whether message content is wrapped to viewport width
status string // a general status message status string // a general status message
highlightCache []string // a cache of syntax highlighted message content highlightCache []string // a cache of syntax highlighted message content
messageOffsets []int messageOffsets []int
@ -156,6 +158,9 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
} }
case "ctrl+p": case "ctrl+p":
m.persistence = !m.persistence m.persistence = !m.persistence
case "ctrl+w":
m.wrap = !m.wrap
m.updateContent()
case "q": case "q":
if m.focus != focusInput { if m.focus != focusInput {
return m, tea.Quit return m, tea.Quit
@ -403,6 +408,7 @@ func initialModel(ctx *lmcli.Context, convShortname string) model {
replyChan: make(chan models.Message), replyChan: make(chan models.Message),
replyChunkChan: make(chan string), replyChunkChan: make(chan string),
wrap: true,
selectedMessage: -1, selectedMessage: -1,
} }
@ -696,7 +702,7 @@ func (m *model) setMessageContents(i int, content string) {
func (m *model) updateContent() { func (m *model) updateContent() {
atBottom := m.content.AtBottom() atBottom := m.content.AtBottom()
m.content.SetContent(conversationStyle.Render(m.conversationView())) m.content.SetContent(m.conversationView())
if atBottom { if atBottom {
// if we were at bottom before the update, scroll with the output // if we were at bottom before the update, scroll with the output
m.content.GotoBottom() m.content.GotoBottom()
@ -757,8 +763,15 @@ func (m *model) conversationView() string {
} else { } else {
highlighted = m.highlightCache[i] highlighted = m.highlightCache[i]
} }
contents := messageStyle.Width(m.content.Width).Render(highlighted) var contents string
sb.WriteString(contents) if m.wrap {
wrapWidth := m.content.Width - messageStyle.GetHorizontalPadding() - 2
wrapped := wordwrap.String(highlighted, wrapWidth)
contents = wrapped
} else {
contents = highlighted
}
sb.WriteString(messageStyle.Width(0).Render(contents))
lineCnt += lipgloss.Height(contents) lineCnt += lipgloss.Height(contents)
} }
@ -767,7 +780,7 @@ func (m *model) conversationView() string {
lineCnt += 1 lineCnt += 1
} }
} }
return sb.String() return conversationStyle.Render(sb.String())
} }
func Launch(ctx *lmcli.Context, convShortname string) error { func Launch(ctx *lmcli.Context, convShortname string) error {