refactor tui to re-render history for streaming

This commit is contained in:
aibrahim-oai
2025-07-31 21:28:48 -07:00
parent ad0295b893
commit 16bfbc883e
2 changed files with 119 additions and 56 deletions

View File

@@ -17,8 +17,7 @@ use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::terminal::supports_keyboard_enhancement;
use ratatui::layout::Offset;
use ratatui::prelude::Backend;
use ratatui::text::Line;
use ratatui::layout::Rect;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
@@ -57,8 +56,6 @@ pub(crate) struct App<'a> {
/// True when a redraw has been scheduled but not yet executed.
pending_redraw: Arc<AtomicBool>,
pending_history_lines: Vec<Line<'static>>,
/// Stored parameters needed to instantiate the ChatWidget later, e.g.,
/// after dismissing the Git-repo warning.
chat_args: Option<ChatWidgetArgs>,
@@ -163,7 +160,6 @@ impl App<'_> {
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
Self {
app_event_tx,
pending_history_lines: Vec::new(),
app_event_rx,
app_state,
config,
@@ -210,8 +206,9 @@ impl App<'_> {
while let Ok(event) = self.app_event_rx.recv() {
match event {
AppEvent::InsertHistory(lines) => {
self.pending_history_lines.extend(lines);
self.app_event_tx.send(AppEvent::RequestRedraw);
if let AppState::Chat { widget } = &mut self.app_state {
widget.add_history_lines(lines);
}
}
AppEvent::RequestRedraw => {
self.schedule_redraw();
@@ -406,30 +403,15 @@ impl App<'_> {
}
let size = terminal.size()?;
let desired_height = match &self.app_state {
AppState::Chat { widget } => widget.desired_height(size.width),
AppState::GitWarning { .. } => 10,
let area = Rect {
x: 0,
y: 0,
width: size.width,
height: size.height,
};
let mut area = terminal.viewport_area;
area.height = desired_height.min(size.height);
area.width = size.width;
if area.bottom() > size.height {
terminal
.backend_mut()
.scroll_region_up(0..area.top(), area.bottom() - size.height)?;
area.y = size.height - area.height;
}
if area != terminal.viewport_area {
terminal.clear()?;
terminal.set_viewport_area(area);
}
if !self.pending_history_lines.is_empty() {
crate::insert_history::insert_history_lines(
terminal,
self.pending_history_lines.clone(),
);
self.pending_history_lines.clear();
terminal.clear()?;
}
match &mut self.app_state {
AppState::Chat { widget } => {

View File

@@ -28,8 +28,12 @@ use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Widget;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::mpsc::unbounded_channel;
@@ -43,6 +47,7 @@ use crate::exec_command::strip_bash_lc_and_escape;
use crate::history_cell::CommandOutput;
use crate::history_cell::HistoryCell;
use crate::history_cell::PatchEventType;
use crate::markdown::append_markdown;
use crate::user_approval_widget::ApprovalRequest;
use codex_file_search::FileMatch;
@@ -60,10 +65,14 @@ pub(crate) struct ChatWidget<'a> {
initial_user_message: Option<UserMessage>,
token_usage: TokenUsage,
reasoning_buffer: String,
// Buffer for streaming assistant answer text; we do not surface partial
// We wait for the final AgentMessage event and then emit the full text
// at once into scrollback so the history contains a single message.
/// Buffer for streaming assistant answer text.
answer_buffer: String,
/// Full history rendered by the widget.
history: Vec<Line<'static>>,
/// Index where the current streaming agent message begins in `history`.
current_answer_start: Option<usize>,
/// Index where the current streaming reasoning message begins in `history`.
current_reasoning_start: Option<usize>,
running_commands: HashMap<String, RunningCommand>,
}
@@ -151,6 +160,9 @@ impl ChatWidget<'_> {
token_usage: TokenUsage::default(),
reasoning_buffer: String::new(),
answer_buffer: String::new(),
history: Vec::new(),
current_answer_start: None,
current_reasoning_start: None,
running_commands: HashMap::new(),
}
}
@@ -177,8 +189,12 @@ impl ChatWidget<'_> {
}
fn add_to_history(&mut self, cell: HistoryCell) {
self.app_event_tx
.send(AppEvent::InsertHistory(cell.plain_lines()));
self.add_history_lines(cell.plain_lines());
}
pub(crate) fn add_history_lines(&mut self, lines: Vec<Line<'static>>) {
self.history.extend(lines);
self.request_redraw();
}
fn submit_user_message(&mut self, user_message: UserMessage) {
@@ -236,10 +252,6 @@ impl ChatWidget<'_> {
self.request_redraw();
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
// Final assistant answer. Prefer the fully provided message
// from the event; if it is empty fall back to any accumulated
// delta buffer (some providers may only stream deltas and send
// an empty final message).
let full = if message.is_empty() {
std::mem::take(&mut self.answer_buffer)
} else {
@@ -247,26 +259,48 @@ impl ChatWidget<'_> {
message
};
if !full.is_empty() {
self.add_to_history(HistoryCell::new_agent_message(&self.config, full));
let lines = build_agent_message_lines(&self.config, &full, true);
if let Some(start) = self.current_answer_start.take() {
self.history.truncate(start);
self.history.extend(lines);
} else {
self.history.extend(lines);
}
self.request_redraw();
}
}
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
self.answer_buffer.push_str(&delta);
let lines = build_agent_message_lines(&self.config, &self.answer_buffer, false);
match self.current_answer_start {
Some(start) => {
self.history.truncate(start);
self.history.extend(lines);
}
None => {
self.current_answer_start = Some(self.history.len());
self.history.extend(lines);
}
}
self.request_redraw();
}
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
// Buffer only do not emit partial lines. This avoids cases
// where long responses appear truncated if the terminal
// wrapped early. The full message is emitted on
// AgentMessage.
self.answer_buffer.push_str(&delta);
}
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
// Buffer only disable incremental reasoning streaming so we
// avoid truncated intermediate lines. Full text emitted on
// AgentReasoning.
self.reasoning_buffer.push_str(&delta);
let lines =
build_agent_reasoning_lines(&self.config, &self.reasoning_buffer, false);
match self.current_reasoning_start {
Some(start) => {
self.history.truncate(start);
self.history.extend(lines);
}
None => {
self.current_reasoning_start = Some(self.history.len());
self.history.extend(lines);
}
}
self.request_redraw();
}
EventMsg::AgentReasoning(AgentReasoningEvent { text }) => {
// Emit full reasoning text once. Some providers might send
// final event with empty text if only deltas were used.
let full = if text.is_empty() {
std::mem::take(&mut self.reasoning_buffer)
} else {
@@ -274,9 +308,15 @@ impl ChatWidget<'_> {
text
};
if !full.is_empty() {
self.add_to_history(HistoryCell::new_agent_reasoning(&self.config, full));
let lines = build_agent_reasoning_lines(&self.config, &full, true);
if let Some(start) = self.current_reasoning_start.take() {
self.history.truncate(start);
self.history.extend(lines);
} else {
self.history.extend(lines);
}
self.request_redraw();
}
self.request_redraw();
}
EventMsg::TaskStarted => {
self.bottom_pane.clear_ctrl_c_quit_hint();
@@ -506,10 +546,31 @@ impl ChatWidget<'_> {
impl WidgetRef for &ChatWidget<'_> {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
// In the hybrid inline viewport mode we only draw the interactive
// bottom pane; history entries are injected directly into scrollback
// via `Terminal::insert_before`.
(&self.bottom_pane).render(area, buf);
let bottom_height = self.bottom_pane.desired_height(area.width);
let history_height = area.height.saturating_sub(bottom_height);
if history_height > 0 {
let history_area = Rect {
x: area.x,
y: area.y,
width: area.width,
height: history_height,
};
let total_lines = self.history.len() as u16;
let scroll = total_lines.saturating_sub(history_height);
Paragraph::new(self.history.clone())
.wrap(Wrap { trim: false })
.scroll((scroll, 0))
.render(history_area, buf);
}
let bottom_area = Rect {
x: area.x,
y: area.y + history_height,
width: area.width,
height: bottom_height,
};
(&self.bottom_pane).render(bottom_area, buf);
}
}
@@ -540,3 +601,23 @@ fn add_token_usage(current_usage: &TokenUsage, new_usage: &TokenUsage) -> TokenU
total_tokens: current_usage.total_tokens + new_usage.total_tokens,
}
}
fn build_agent_message_lines(config: &Config, message: &str, finalize: bool) -> Vec<Line<'static>> {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from("codex".magenta().bold()));
append_markdown(message, &mut lines, config);
if finalize {
lines.push(Line::from(""));
}
lines
}
fn build_agent_reasoning_lines(config: &Config, text: &str, finalize: bool) -> Vec<Line<'static>> {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from("thinking".magenta().italic()));
append_markdown(text, &mut lines, config);
if finalize {
lines.push(Line::from(""));
}
lines
}