Files
codex/codex-rs/tui/src/app.rs
Devon Rifkin fe03320791 ollama: default to Responses API for built-ins (#8798)
This is an alternate PR to solving the same problem as
<https://github.com/openai/codex/pull/8227>.

In this PR, when Ollama is used via `--oss` (or via `model_provider =
"ollama"`), we default it to use the Responses format. At runtime, we do
an Ollama version check, and if the version is older than when Responses
support was added to Ollama, we print out a warning.

Because there's no way of configuring the wire api for a built-in
provider, we temporarily add a new `oss_provider`/`model_provider`
called `"ollama-chat"` that will force the chat format.

Once the `"chat"` format is fully removed (see
<https://github.com/openai/codex/discussions/7782>), `ollama-chat` can
be removed as well

---------

Co-authored-by: Eric Traut <etraut@openai.com>
Co-authored-by: Michael Bolin <mbolin@openai.com>
2026-01-13 09:51:41 -08:00

1944 lines
79 KiB
Rust

use crate::app_backtrack::BacktrackState;
use crate::app_event::AppEvent;
#[cfg(target_os = "windows")]
use crate::app_event::WindowsSandboxEnableMode;
#[cfg(target_os = "windows")]
use crate::app_event::WindowsSandboxFallbackReason;
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::ApprovalRequest;
use crate::chatwidget::ChatWidget;
use crate::chatwidget::ExternalEditorState;
use crate::diff_render::DiffSummary;
use crate::exec_command::strip_bash_lc_and_escape;
use crate::external_editor;
use crate::file_search::FileSearchManager;
use crate::history_cell;
use crate::history_cell::HistoryCell;
use crate::model_migration::ModelMigrationOutcome;
use crate::model_migration::migration_copy_for_models;
use crate::model_migration::run_model_migration_prompt;
use crate::pager_overlay::Overlay;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::Renderable;
use crate::resume_picker::SessionSelection;
use crate::tui;
use crate::tui::TuiEvent;
use crate::update_action::UpdateAction;
use codex_ansi_escape::ansi_escape_line;
use codex_core::AuthManager;
use codex_core::ThreadManager;
use codex_core::config::Config;
use codex_core::config::edit::ConfigEdit;
use codex_core::config::edit::ConfigEditsBuilder;
#[cfg(target_os = "windows")]
use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::protocol::DeprecationNoticeEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
use codex_core::protocol::Op;
use codex_core::protocol::SessionSource;
use codex_core::protocol::SkillErrorInfo;
use codex_core::protocol::TokenUsage;
use codex_protocol::ThreadId;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use color_eyre::eyre::Result;
use color_eyre::eyre::WrapErr;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use std::collections::BTreeMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use tokio::select;
use tokio::sync::mpsc::unbounded_channel;
#[cfg(not(debug_assertions))]
use crate::history_cell::UpdateAvailableHistoryCell;
const EXTERNAL_EDITOR_HINT: &str = "Save and close external editor to continue.";
#[derive(Debug, Clone)]
pub struct AppExitInfo {
pub token_usage: TokenUsage,
pub thread_id: Option<ThreadId>,
pub update_action: Option<UpdateAction>,
}
fn session_summary(token_usage: TokenUsage, thread_id: Option<ThreadId>) -> Option<SessionSummary> {
if token_usage.is_zero() {
return None;
}
let usage_line = FinalOutput::from(token_usage).to_string();
let resume_command = thread_id.map(|thread_id| format!("codex resume {thread_id}"));
Some(SessionSummary {
usage_line,
resume_command,
})
}
fn errors_for_cwd(cwd: &Path, response: &ListSkillsResponseEvent) -> Vec<SkillErrorInfo> {
response
.skills
.iter()
.find(|entry| entry.cwd.as_path() == cwd)
.map(|entry| entry.errors.clone())
.unwrap_or_default()
}
fn emit_skill_load_warnings(app_event_tx: &AppEventSender, errors: &[SkillErrorInfo]) {
if errors.is_empty() {
return;
}
let error_count = errors.len();
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_warning_event(format!(
"Skipped loading {error_count} skill(s) due to invalid SKILL.md files."
)),
)));
for error in errors {
let path = error.path.display();
let message = error.message.as_str();
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_warning_event(format!("{path}: {message}")),
)));
}
}
fn emit_deprecation_notice(app_event_tx: &AppEventSender, notice: Option<DeprecationNoticeEvent>) {
let Some(DeprecationNoticeEvent { summary, details }) = notice else {
return;
};
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_deprecation_notice(summary, details),
)));
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct SessionSummary {
usage_line: String,
resume_command: Option<String>,
}
fn should_show_model_migration_prompt(
current_model: &str,
target_model: &str,
seen_migrations: &BTreeMap<String, String>,
available_models: &[ModelPreset],
) -> bool {
if target_model == current_model {
return false;
}
if let Some(seen_target) = seen_migrations.get(current_model)
&& seen_target == target_model
{
return false;
}
if available_models
.iter()
.any(|preset| preset.model == current_model && preset.upgrade.is_some())
{
return true;
}
if available_models
.iter()
.any(|preset| preset.upgrade.as_ref().map(|u| u.id.as_str()) == Some(target_model))
{
return true;
}
false
}
fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> bool {
match migration_config_key {
HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => config
.notices
.hide_gpt_5_1_codex_max_migration_prompt
.unwrap_or(false),
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => {
config.notices.hide_gpt5_1_migration_prompt.unwrap_or(false)
}
_ => false,
}
}
fn target_preset_for_upgrade<'a>(
available_models: &'a [ModelPreset],
target_model: &str,
) -> Option<&'a ModelPreset> {
available_models
.iter()
.find(|preset| preset.model == target_model)
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
model: &str,
app_event_tx: &AppEventSender,
models_manager: Arc<ModelsManager>,
) -> Option<AppExitInfo> {
let available_models = models_manager.list_models(config).await;
let upgrade = available_models
.iter()
.find(|preset| preset.model == model)
.and_then(|preset| preset.upgrade.as_ref());
if let Some(ModelUpgrade {
id: target_model,
reasoning_effort_mapping,
migration_config_key,
model_link,
upgrade_copy,
migration_markdown,
}) = upgrade
{
if migration_prompt_hidden(config, migration_config_key.as_str()) {
return None;
}
let target_model = target_model.to_string();
if !should_show_model_migration_prompt(
model,
&target_model,
&config.notices.model_migrations,
&available_models,
) {
return None;
}
let current_preset = available_models.iter().find(|preset| preset.model == model);
let target_preset = target_preset_for_upgrade(&available_models, &target_model);
let target_preset = target_preset?;
let target_display_name = target_preset.display_name.clone();
let heading_label = if target_display_name == model {
target_model.clone()
} else {
target_display_name.clone()
};
let target_description =
(!target_preset.description.is_empty()).then(|| target_preset.description.clone());
let can_opt_out = current_preset.is_some();
let prompt_copy = migration_copy_for_models(
model,
&target_model,
model_link.clone(),
upgrade_copy.clone(),
migration_markdown.clone(),
heading_label,
target_description,
can_opt_out,
);
match run_model_migration_prompt(tui, prompt_copy).await {
ModelMigrationOutcome::Accepted => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
config.model = Some(target_model.clone());
let mapped_effort = if let Some(reasoning_effort_mapping) = reasoning_effort_mapping
&& let Some(reasoning_effort) = config.model_reasoning_effort
{
reasoning_effort_mapping
.get(&reasoning_effort)
.cloned()
.or(config.model_reasoning_effort)
} else {
config.model_reasoning_effort
};
config.model_reasoning_effort = mapped_effort;
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
app_event_tx.send(AppEvent::UpdateReasoningEffort(mapped_effort));
app_event_tx.send(AppEvent::PersistModelSelection {
model: target_model.clone(),
effort: mapped_effort,
});
}
ModelMigrationOutcome::Rejected => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
}
ModelMigrationOutcome::Exit => {
return Some(AppExitInfo {
token_usage: TokenUsage::default(),
thread_id: None,
update_action: None,
});
}
}
}
None
}
pub(crate) struct App {
pub(crate) server: Arc<ThreadManager>,
pub(crate) app_event_tx: AppEventSender,
pub(crate) chat_widget: ChatWidget,
pub(crate) auth_manager: Arc<AuthManager>,
/// Config is stored here so we can recreate ChatWidgets as needed.
pub(crate) config: Config,
pub(crate) current_model: String,
pub(crate) active_profile: Option<String>,
pub(crate) file_search: FileSearchManager,
pub(crate) transcript_cells: Vec<Arc<dyn HistoryCell>>,
// Pager overlay state (Transcript or Static like Diff)
pub(crate) overlay: Option<Overlay>,
pub(crate) deferred_history_lines: Vec<Line<'static>>,
has_emitted_history_lines: bool,
pub(crate) enhanced_keys_supported: bool,
/// Controls the animation thread that sends CommitTick events.
pub(crate) commit_anim_running: Arc<AtomicBool>,
// Esc-backtracking state grouped
pub(crate) backtrack: crate::app_backtrack::BacktrackState,
pub(crate) feedback: codex_feedback::CodexFeedback,
/// Set when the user confirms an update; propagated on exit.
pub(crate) pending_update_action: Option<UpdateAction>,
/// Ignore the next ShutdownComplete event when we're intentionally
/// stopping a thread (e.g., before starting a new one).
suppress_shutdown_complete: bool,
// One-shot suppression of the next world-writable scan after user confirmation.
skip_world_writable_scan_once: bool,
}
impl App {
async fn shutdown_current_thread(&mut self) {
if let Some(thread_id) = self.chat_widget.thread_id() {
self.suppress_shutdown_complete = true;
self.chat_widget.submit_op(Op::Shutdown);
self.server.remove_thread(&thread_id).await;
}
}
#[allow(clippy::too_many_arguments)]
pub async fn run(
tui: &mut tui::Tui,
auth_manager: Arc<AuthManager>,
mut config: Config,
active_profile: Option<String>,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
session_selection: SessionSelection,
feedback: codex_feedback::CodexFeedback,
is_first_run: bool,
ollama_chat_support_notice: Option<DeprecationNoticeEvent>,
) -> Result<AppExitInfo> {
use tokio_stream::StreamExt;
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
emit_deprecation_notice(&app_event_tx, ollama_chat_support_notice);
let thread_manager = Arc::new(ThreadManager::new(
config.codex_home.clone(),
auth_manager.clone(),
SessionSource::Cli,
));
let mut model = thread_manager
.get_models_manager()
.get_model(&config.model, &config)
.await;
let exit_info = handle_model_migration_prompt_if_needed(
tui,
&mut config,
model.as_str(),
&app_event_tx,
thread_manager.get_models_manager(),
)
.await;
if let Some(exit_info) = exit_info {
return Ok(exit_info);
}
if let Some(updated_model) = config.model.clone() {
model = updated_model;
}
let enhanced_keys_supported = tui.enhanced_keys_supported();
let mut chat_widget = match session_selection {
SessionSelection::StartFresh | SessionSelection::Exit => {
let init = crate::chatwidget::ChatWidgetInit {
config: config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: app_event_tx.clone(),
initial_prompt: initial_prompt.clone(),
initial_images: initial_images.clone(),
enhanced_keys_supported,
auth_manager: auth_manager.clone(),
models_manager: thread_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
};
ChatWidget::new(init, thread_manager.clone())
}
SessionSelection::Resume(path) => {
let resumed = thread_manager
.resume_thread_from_rollout(config.clone(), path.clone(), auth_manager.clone())
.await
.wrap_err_with(|| {
let path_display = path.display();
format!("Failed to resume session from {path_display}")
})?;
let init = crate::chatwidget::ChatWidgetInit {
config: config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: app_event_tx.clone(),
initial_prompt: initial_prompt.clone(),
initial_images: initial_images.clone(),
enhanced_keys_supported,
auth_manager: auth_manager.clone(),
models_manager: thread_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
};
ChatWidget::new_from_existing(init, resumed.thread, resumed.session_configured)
}
SessionSelection::Fork(path) => {
let forked = thread_manager
.fork_thread(usize::MAX, config.clone(), path.clone())
.await
.wrap_err_with(|| {
let path_display = path.display();
format!("Failed to fork session from {path_display}")
})?;
let init = crate::chatwidget::ChatWidgetInit {
config: config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: app_event_tx.clone(),
initial_prompt: initial_prompt.clone(),
initial_images: initial_images.clone(),
enhanced_keys_supported,
auth_manager: auth_manager.clone(),
models_manager: thread_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
};
ChatWidget::new_from_existing(init, forked.thread, forked.session_configured)
}
};
chat_widget.maybe_prompt_windows_sandbox_enable();
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
#[cfg(not(debug_assertions))]
let upgrade_version = crate::updates::get_upgrade_version(&config);
let mut app = Self {
server: thread_manager.clone(),
app_event_tx,
chat_widget,
auth_manager: auth_manager.clone(),
config,
current_model: model.clone(),
active_profile,
file_search,
enhanced_keys_supported,
transcript_cells: Vec::new(),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
commit_anim_running: Arc::new(AtomicBool::new(false)),
backtrack: BacktrackState::default(),
feedback: feedback.clone(),
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
};
// On startup, if Agent mode (workspace-write) or ReadOnly is active, warn about world-writable dirs on Windows.
#[cfg(target_os = "windows")]
{
let should_check = codex_core::get_platform_sandbox().is_some()
&& matches!(
app.config.sandbox_policy.get(),
codex_core::protocol::SandboxPolicy::WorkspaceWrite { .. }
| codex_core::protocol::SandboxPolicy::ReadOnly
)
&& !app
.config
.notices
.hide_world_writable_warning
.unwrap_or(false);
if should_check {
let cwd = app.config.cwd.clone();
let env_map: std::collections::HashMap<String, String> = std::env::vars().collect();
let tx = app.app_event_tx.clone();
let logs_base_dir = app.config.codex_home.clone();
let sandbox_policy = app.config.sandbox_policy.get().clone();
Self::spawn_world_writable_scan(cwd, env_map, logs_base_dir, sandbox_policy, tx);
}
}
#[cfg(not(debug_assertions))]
if let Some(latest_version) = upgrade_version {
app.handle_event(
tui,
AppEvent::InsertHistoryCell(Box::new(UpdateAvailableHistoryCell::new(
latest_version,
crate::update_action::get_update_action(),
))),
)
.await?;
}
let tui_events = tui.event_stream();
tokio::pin!(tui_events);
tui.frame_requester().schedule_frame();
while select! {
Some(event) = app_event_rx.recv() => {
app.handle_event(tui, event).await?
}
Some(event) = tui_events.next() => {
app.handle_tui_event(tui, event).await?
}
} {}
tui.terminal.clear()?;
Ok(AppExitInfo {
token_usage: app.token_usage(),
thread_id: app.chat_widget.thread_id(),
update_action: app.pending_update_action,
})
}
pub(crate) async fn handle_tui_event(
&mut self,
tui: &mut tui::Tui,
event: TuiEvent,
) -> Result<bool> {
if self.overlay.is_some() {
let _ = self.handle_backtrack_overlay_event(tui, event).await?;
} else {
match event {
TuiEvent::Key(key_event) => {
self.handle_key_event(tui, key_event).await;
}
TuiEvent::Paste(pasted) => {
// Many terminals convert newlines to \r when pasting (e.g., iTerm2),
// but tui-textarea expects \n. Normalize CR to LF.
// [tui-textarea]: https://github.com/rhysd/tui-textarea/blob/4d18622eeac13b309e0ff6a55a46ac6706da68cf/src/textarea.rs#L782-L783
// [iTerm2]: https://github.com/gnachman/iTerm2/blob/5d0c0d9f68523cbd0494dad5422998964a2ecd8d/sources/iTermPasteHelper.m#L206-L216
let pasted = pasted.replace("\r", "\n");
self.chat_widget.handle_paste_event(pasted);
}
TuiEvent::Draw => {
self.chat_widget.maybe_post_pending_notification(tui);
if self
.chat_widget
.handle_paste_burst_tick(tui.frame_requester())
{
return Ok(true);
}
tui.draw(
self.chat_widget.desired_height(tui.terminal.size()?.width),
|frame| {
self.chat_widget.render(frame.area(), frame.buffer);
if let Some((x, y)) = self.chat_widget.cursor_pos(frame.area()) {
frame.set_cursor_position((x, y));
}
},
)?;
if self.chat_widget.external_editor_state() == ExternalEditorState::Requested {
self.chat_widget
.set_external_editor_state(ExternalEditorState::Active);
self.app_event_tx.send(AppEvent::LaunchExternalEditor);
}
}
}
}
Ok(true)
}
async fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
let model_info = self
.server
.get_models_manager()
.construct_model_info(self.current_model.as_str(), &self.config)
.await;
match event {
AppEvent::NewSession => {
let summary =
session_summary(self.chat_widget.token_usage(), self.chat_widget.thread_id());
self.shutdown_current_thread().await;
let init = crate::chatwidget::ChatWidgetInit {
config: self.config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: self.app_event_tx.clone(),
initial_prompt: None,
initial_images: Vec::new(),
enhanced_keys_supported: self.enhanced_keys_supported,
auth_manager: self.auth_manager.clone(),
models_manager: self.server.get_models_manager(),
feedback: self.feedback.clone(),
is_first_run: false,
model: self.current_model.clone(),
};
self.chat_widget = ChatWidget::new(init, self.server.clone());
self.current_model = model_info.slug.clone();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> = vec![summary.usage_line.clone().into()];
if let Some(command) = summary.resume_command {
let spans = vec!["To continue this session, run ".into(), command.cyan()];
lines.push(spans.into());
}
self.chat_widget.add_plain_history_lines(lines);
}
tui.frame_requester().schedule_frame();
}
AppEvent::OpenResumePicker => {
match crate::resume_picker::run_resume_picker(
tui,
&self.config.codex_home,
&self.config.model_provider_id,
false,
)
.await?
{
SessionSelection::Resume(path) => {
let summary = session_summary(
self.chat_widget.token_usage(),
self.chat_widget.thread_id(),
);
match self
.server
.resume_thread_from_rollout(
self.config.clone(),
path.clone(),
self.auth_manager.clone(),
)
.await
{
Ok(resumed) => {
self.shutdown_current_thread().await;
let init = crate::chatwidget::ChatWidgetInit {
config: self.config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: self.app_event_tx.clone(),
initial_prompt: None,
initial_images: Vec::new(),
enhanced_keys_supported: self.enhanced_keys_supported,
auth_manager: self.auth_manager.clone(),
models_manager: self.server.get_models_manager(),
feedback: self.feedback.clone(),
is_first_run: false,
model: self.current_model.clone(),
};
self.chat_widget = ChatWidget::new_from_existing(
init,
resumed.thread,
resumed.session_configured,
);
self.current_model = model_info.slug.clone();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> =
vec![summary.usage_line.clone().into()];
if let Some(command) = summary.resume_command {
let spans = vec![
"To continue this session, run ".into(),
command.cyan(),
];
lines.push(spans.into());
}
self.chat_widget.add_plain_history_lines(lines);
}
}
Err(err) => {
let path_display = path.display();
self.chat_widget.add_error_message(format!(
"Failed to resume session from {path_display}: {err}"
));
}
}
}
SessionSelection::Exit
| SessionSelection::StartFresh
| SessionSelection::Fork(_) => {}
}
// Leaving alt-screen may blank the inline viewport; force a redraw either way.
tui.frame_requester().schedule_frame();
}
AppEvent::OpenForkPicker => {
match crate::resume_picker::run_fork_picker(
tui,
&self.config.codex_home,
&self.config.model_provider_id,
false,
)
.await?
{
SessionSelection::Fork(path) => {
let summary = session_summary(
self.chat_widget.token_usage(),
self.chat_widget.thread_id(),
);
match self
.server
.fork_thread(usize::MAX, self.config.clone(), path.clone())
.await
{
Ok(forked) => {
self.shutdown_current_thread().await;
let init = crate::chatwidget::ChatWidgetInit {
config: self.config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: self.app_event_tx.clone(),
initial_prompt: None,
initial_images: Vec::new(),
enhanced_keys_supported: self.enhanced_keys_supported,
auth_manager: self.auth_manager.clone(),
models_manager: self.server.get_models_manager(),
feedback: self.feedback.clone(),
is_first_run: false,
model: self.current_model.clone(),
};
self.chat_widget = ChatWidget::new_from_existing(
init,
forked.thread,
forked.session_configured,
);
self.current_model = model_info.slug.clone();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> =
vec![summary.usage_line.clone().into()];
if let Some(command) = summary.resume_command {
let spans = vec![
"To continue this session, run ".into(),
command.cyan(),
];
lines.push(spans.into());
}
self.chat_widget.add_plain_history_lines(lines);
}
}
Err(err) => {
let path_display = path.display();
self.chat_widget.add_error_message(format!(
"Failed to fork session from {path_display}: {err}"
));
}
}
}
SessionSelection::Exit
| SessionSelection::StartFresh
| SessionSelection::Resume(_) => {}
}
// Leaving alt-screen may blank the inline viewport; force a redraw either way.
tui.frame_requester().schedule_frame();
}
AppEvent::InsertHistoryCell(cell) => {
let cell: Arc<dyn HistoryCell> = cell.into();
if let Some(Overlay::Transcript(t)) = &mut self.overlay {
t.insert_cell(cell.clone());
tui.frame_requester().schedule_frame();
}
self.transcript_cells.push(cell.clone());
let mut display = cell.display_lines(tui.terminal.last_known_screen_size.width);
if !display.is_empty() {
// Only insert a separating blank line for new cells that are not
// part of an ongoing stream. Streaming continuations should not
// accrue extra blank lines between chunks.
if !cell.is_stream_continuation() {
if self.has_emitted_history_lines {
display.insert(0, Line::from(""));
} else {
self.has_emitted_history_lines = true;
}
}
if self.overlay.is_some() {
self.deferred_history_lines.extend(display);
} else {
tui.insert_history_lines(display);
}
}
}
AppEvent::StartCommitAnimation => {
if self
.commit_anim_running
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
let tx = self.app_event_tx.clone();
let running = self.commit_anim_running.clone();
thread::spawn(move || {
while running.load(Ordering::Relaxed) {
thread::sleep(Duration::from_millis(50));
tx.send(AppEvent::CommitTick);
}
});
}
}
AppEvent::StopCommitAnimation => {
self.commit_anim_running.store(false, Ordering::Release);
}
AppEvent::CommitTick => {
self.chat_widget.on_commit_tick();
}
AppEvent::CodexEvent(event) => {
if self.suppress_shutdown_complete
&& matches!(event.msg, EventMsg::ShutdownComplete)
{
self.suppress_shutdown_complete = false;
return Ok(true);
}
if let EventMsg::ListSkillsResponse(response) = &event.msg {
let cwd = self.chat_widget.config_ref().cwd.clone();
let errors = errors_for_cwd(&cwd, response);
emit_skill_load_warnings(&self.app_event_tx, &errors);
}
self.chat_widget.handle_codex_event(event);
}
AppEvent::ExitRequest => {
return Ok(false);
}
AppEvent::CodexOp(op) => self.chat_widget.submit_op(op),
AppEvent::DiffResult(text) => {
// Clear the in-progress state in the bottom pane
self.chat_widget.on_diff_complete();
// Enter alternate screen using TUI helper and build pager lines
let _ = tui.enter_alt_screen();
let pager_lines: Vec<ratatui::text::Line<'static>> = if text.trim().is_empty() {
vec!["No changes detected.".italic().into()]
} else {
text.lines().map(ansi_escape_line).collect()
};
self.overlay = Some(Overlay::new_static_with_lines(
pager_lines,
"D I F F".to_string(),
));
tui.frame_requester().schedule_frame();
}
AppEvent::StartFileSearch(query) => {
if !query.is_empty() {
self.file_search.on_user_query(query);
}
}
AppEvent::FileSearchResult { query, matches } => {
self.chat_widget.apply_file_search_result(query, matches);
}
AppEvent::RateLimitSnapshotFetched(snapshot) => {
self.chat_widget.on_rate_limit_snapshot(Some(snapshot));
}
AppEvent::UpdateReasoningEffort(effort) => {
self.on_update_reasoning_effort(effort);
}
AppEvent::UpdateModel(model) => {
self.chat_widget.set_model(&model);
self.current_model = model;
}
AppEvent::OpenReasoningPopup { model } => {
self.chat_widget.open_reasoning_popup(model);
}
AppEvent::OpenAllModelsPopup { models } => {
self.chat_widget.open_all_models_popup(models);
}
AppEvent::OpenFullAccessConfirmation { preset } => {
self.chat_widget.open_full_access_confirmation(preset);
}
AppEvent::OpenWorldWritableWarningConfirmation {
preset,
sample_paths,
extra_count,
failed_scan,
} => {
self.chat_widget.open_world_writable_warning_confirmation(
preset,
sample_paths,
extra_count,
failed_scan,
);
}
AppEvent::OpenFeedbackNote {
category,
include_logs,
} => {
self.chat_widget.open_feedback_note(category, include_logs);
}
AppEvent::OpenFeedbackConsent { category } => {
self.chat_widget.open_feedback_consent(category);
}
AppEvent::LaunchExternalEditor => {
if self.chat_widget.external_editor_state() == ExternalEditorState::Active {
self.launch_external_editor(tui).await;
}
}
AppEvent::OpenWindowsSandboxEnablePrompt { preset } => {
self.chat_widget.open_windows_sandbox_enable_prompt(preset);
}
AppEvent::OpenWindowsSandboxFallbackPrompt { preset, reason } => {
self.chat_widget.clear_windows_sandbox_setup_status();
self.chat_widget
.open_windows_sandbox_fallback_prompt(preset, reason);
}
AppEvent::BeginWindowsSandboxElevatedSetup { preset } => {
#[cfg(target_os = "windows")]
{
let policy = preset.sandbox.clone();
let policy_cwd = self.config.cwd.clone();
let command_cwd = policy_cwd.clone();
let env_map: std::collections::HashMap<String, String> =
std::env::vars().collect();
let codex_home = self.config.codex_home.clone();
let tx = self.app_event_tx.clone();
// If the elevated setup already ran on this machine, don't prompt for
// elevation again - just flip the config to use the elevated path.
if codex_core::windows_sandbox::sandbox_setup_is_complete(codex_home.as_path())
{
tx.send(AppEvent::EnableWindowsSandboxForAgentMode {
preset,
mode: WindowsSandboxEnableMode::Elevated,
});
return Ok(true);
}
self.chat_widget.show_windows_sandbox_setup_status();
tokio::task::spawn_blocking(move || {
let result = codex_core::windows_sandbox::run_elevated_setup(
&policy,
policy_cwd.as_path(),
command_cwd.as_path(),
&env_map,
codex_home.as_path(),
);
let event = match result {
Ok(()) => AppEvent::EnableWindowsSandboxForAgentMode {
preset: preset.clone(),
mode: WindowsSandboxEnableMode::Elevated,
},
Err(err) => {
tracing::error!(
error = %err,
"failed to run elevated Windows sandbox setup"
);
AppEvent::OpenWindowsSandboxFallbackPrompt {
preset,
reason: WindowsSandboxFallbackReason::ElevationFailed,
}
}
};
tx.send(event);
});
}
#[cfg(not(target_os = "windows"))]
{
let _ = preset;
}
}
AppEvent::EnableWindowsSandboxForAgentMode { preset, mode } => {
#[cfg(target_os = "windows")]
{
self.chat_widget.clear_windows_sandbox_setup_status();
let profile = self.active_profile.as_deref();
let feature_key = Feature::WindowsSandbox.key();
let elevated_key = Feature::WindowsSandboxElevated.key();
let elevated_enabled = matches!(mode, WindowsSandboxEnableMode::Elevated);
match ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(profile)
.set_feature_enabled(feature_key, true)
.set_feature_enabled(elevated_key, elevated_enabled)
.apply()
.await
{
Ok(()) => {
self.config.set_windows_sandbox_globally(true);
self.config
.set_windows_elevated_sandbox_globally(elevated_enabled);
self.chat_widget
.set_feature_enabled(Feature::WindowsSandbox, true);
self.chat_widget.set_feature_enabled(
Feature::WindowsSandboxElevated,
elevated_enabled,
);
self.chat_widget.clear_forced_auto_mode_downgrade();
if let Some((sample_paths, extra_count, failed_scan)) =
self.chat_widget.world_writable_warning_details()
{
self.app_event_tx.send(
AppEvent::OpenWorldWritableWarningConfirmation {
preset: Some(preset.clone()),
sample_paths,
extra_count,
failed_scan,
},
);
} else {
self.app_event_tx.send(AppEvent::CodexOp(
Op::OverrideTurnContext {
cwd: None,
approval_policy: Some(preset.approval),
sandbox_policy: Some(preset.sandbox.clone()),
model: None,
effort: None,
summary: None,
},
));
self.app_event_tx
.send(AppEvent::UpdateAskForApprovalPolicy(preset.approval));
self.app_event_tx
.send(AppEvent::UpdateSandboxPolicy(preset.sandbox.clone()));
self.chat_widget.add_info_message(
match mode {
WindowsSandboxEnableMode::Elevated => {
"Enabled elevated agent sandbox.".to_string()
}
WindowsSandboxEnableMode::Legacy => {
"Enabled non-elevated agent sandbox.".to_string()
}
},
None,
);
}
}
Err(err) => {
tracing::error!(
error = %err,
"failed to enable Windows sandbox feature"
);
self.chat_widget.add_error_message(format!(
"Failed to enable the Windows sandbox feature: {err}"
));
}
}
}
#[cfg(not(target_os = "windows"))]
{
let _ = (preset, mode);
}
}
AppEvent::PersistModelSelection { model, effort } => {
let profile = self.active_profile.as_deref();
match ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(profile)
.set_model(Some(model.as_str()), effort)
.apply()
.await
{
Ok(()) => {
let mut message = format!("Model changed to {model}");
if let Some(label) = Self::reasoning_label_for(&model, effort) {
message.push(' ');
message.push_str(label);
}
if let Some(profile) = profile {
message.push_str(" for ");
message.push_str(profile);
message.push_str(" profile");
}
self.chat_widget.add_info_message(message, None);
}
Err(err) => {
tracing::error!(
error = %err,
"failed to persist model selection"
);
if let Some(profile) = profile {
self.chat_widget.add_error_message(format!(
"Failed to save model for profile `{profile}`: {err}"
));
} else {
self.chat_widget
.add_error_message(format!("Failed to save default model: {err}"));
}
}
}
}
AppEvent::UpdateAskForApprovalPolicy(policy) => {
self.chat_widget.set_approval_policy(policy);
}
AppEvent::UpdateSandboxPolicy(policy) => {
#[cfg(target_os = "windows")]
let policy_is_workspace_write_or_ro = matches!(
&policy,
codex_core::protocol::SandboxPolicy::WorkspaceWrite { .. }
| codex_core::protocol::SandboxPolicy::ReadOnly
);
if let Err(err) = self.config.sandbox_policy.set(policy.clone()) {
tracing::warn!(%err, "failed to set sandbox policy on app config");
self.chat_widget
.add_error_message(format!("Failed to set sandbox policy: {err}"));
return Ok(true);
}
#[cfg(target_os = "windows")]
if !matches!(&policy, codex_core::protocol::SandboxPolicy::ReadOnly)
|| codex_core::get_platform_sandbox().is_some()
{
self.config.forced_auto_mode_downgraded_on_windows = false;
}
if let Err(err) = self.chat_widget.set_sandbox_policy(policy) {
tracing::warn!(%err, "failed to set sandbox policy on chat config");
self.chat_widget
.add_error_message(format!("Failed to set sandbox policy: {err}"));
return Ok(true);
}
// If sandbox policy becomes workspace-write or read-only, run the Windows world-writable scan.
#[cfg(target_os = "windows")]
{
// One-shot suppression if the user just confirmed continue.
if self.skip_world_writable_scan_once {
self.skip_world_writable_scan_once = false;
return Ok(true);
}
let should_check = codex_core::get_platform_sandbox().is_some()
&& policy_is_workspace_write_or_ro
&& !self.chat_widget.world_writable_warning_hidden();
if should_check {
let cwd = self.config.cwd.clone();
let env_map: std::collections::HashMap<String, String> =
std::env::vars().collect();
let tx = self.app_event_tx.clone();
let logs_base_dir = self.config.codex_home.clone();
let sandbox_policy = self.config.sandbox_policy.get().clone();
Self::spawn_world_writable_scan(
cwd,
env_map,
logs_base_dir,
sandbox_policy,
tx,
);
}
}
}
AppEvent::UpdateFeatureFlags { updates } => {
if updates.is_empty() {
return Ok(true);
}
let mut builder = ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(self.active_profile.as_deref());
for (feature, enabled) in &updates {
let feature_key = feature.key();
if *enabled {
// Update the in-memory configs.
self.config.features.enable(*feature);
self.chat_widget.set_feature_enabled(*feature, true);
builder = builder.set_feature_enabled(feature_key, true);
} else {
// Update the in-memory configs.
self.config.features.disable(*feature);
self.chat_widget.set_feature_enabled(*feature, false);
if feature.default_enabled() {
builder = builder.set_feature_enabled(feature_key, false);
} else {
// If the feature already default to `false`, we drop the key
// in the config file so that the user does not miss the feature
// once it gets globally released.
builder = builder.with_edits(vec![ConfigEdit::ClearPath {
segments: vec!["features".to_string(), feature_key.to_string()],
}]);
}
}
}
if let Err(err) = builder.apply().await {
tracing::error!(error = %err, "failed to persist feature flags");
self.chat_widget.add_error_message(format!(
"Failed to update experimental features: {err}"
));
}
}
AppEvent::SkipNextWorldWritableScan => {
self.skip_world_writable_scan_once = true;
}
AppEvent::UpdateFullAccessWarningAcknowledged(ack) => {
self.chat_widget.set_full_access_warning_acknowledged(ack);
}
AppEvent::UpdateWorldWritableWarningAcknowledged(ack) => {
self.chat_widget
.set_world_writable_warning_acknowledged(ack);
}
AppEvent::UpdateRateLimitSwitchPromptHidden(hidden) => {
self.chat_widget.set_rate_limit_switch_prompt_hidden(hidden);
}
AppEvent::PersistFullAccessWarningAcknowledged => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.set_hide_full_access_warning(true)
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist full access warning acknowledgement"
);
self.chat_widget.add_error_message(format!(
"Failed to save full access confirmation preference: {err}"
));
}
}
AppEvent::PersistWorldWritableWarningAcknowledged => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.set_hide_world_writable_warning(true)
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist world-writable warning acknowledgement"
);
self.chat_widget.add_error_message(format!(
"Failed to save Agent mode warning preference: {err}"
));
}
}
AppEvent::PersistRateLimitSwitchPromptHidden => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.set_hide_rate_limit_model_nudge(true)
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist rate limit switch prompt preference"
);
self.chat_widget.add_error_message(format!(
"Failed to save rate limit reminder preference: {err}"
));
}
}
AppEvent::PersistModelMigrationPromptAcknowledged {
from_model,
to_model,
} => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.record_model_migration_seen(from_model.as_str(), to_model.as_str())
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist model migration prompt acknowledgement"
);
self.chat_widget.add_error_message(format!(
"Failed to save model migration prompt preference: {err}"
));
}
}
AppEvent::OpenApprovalsPopup => {
self.chat_widget.open_approvals_popup();
}
AppEvent::OpenReviewBranchPicker(cwd) => {
self.chat_widget.show_review_branch_picker(&cwd).await;
}
AppEvent::OpenReviewCommitPicker(cwd) => {
self.chat_widget.show_review_commit_picker(&cwd).await;
}
AppEvent::OpenReviewCustomPrompt => {
self.chat_widget.show_review_custom_prompt();
}
AppEvent::FullScreenApprovalRequest(request) => match request {
ApprovalRequest::ApplyPatch { cwd, changes, .. } => {
let _ = tui.enter_alt_screen();
let diff_summary = DiffSummary::new(changes, cwd);
self.overlay = Some(Overlay::new_static_with_renderables(
vec![diff_summary.into()],
"P A T C H".to_string(),
));
}
ApprovalRequest::Exec { command, .. } => {
let _ = tui.enter_alt_screen();
let full_cmd = strip_bash_lc_and_escape(&command);
let full_cmd_lines = highlight_bash_to_lines(&full_cmd);
self.overlay = Some(Overlay::new_static_with_lines(
full_cmd_lines,
"E X E C".to_string(),
));
}
ApprovalRequest::McpElicitation {
server_name,
message,
..
} => {
let _ = tui.enter_alt_screen();
let paragraph = Paragraph::new(vec![
Line::from(vec!["Server: ".into(), server_name.bold()]),
Line::from(""),
Line::from(message),
])
.wrap(Wrap { trim: false });
self.overlay = Some(Overlay::new_static_with_renderables(
vec![Box::new(paragraph)],
"E L I C I T A T I O N".to_string(),
));
}
},
}
Ok(true)
}
fn reasoning_label(reasoning_effort: Option<ReasoningEffortConfig>) -> &'static str {
match reasoning_effort {
Some(ReasoningEffortConfig::Minimal) => "minimal",
Some(ReasoningEffortConfig::Low) => "low",
Some(ReasoningEffortConfig::Medium) => "medium",
Some(ReasoningEffortConfig::High) => "high",
Some(ReasoningEffortConfig::XHigh) => "xhigh",
None | Some(ReasoningEffortConfig::None) => "default",
}
}
fn reasoning_label_for(
model: &str,
reasoning_effort: Option<ReasoningEffortConfig>,
) -> Option<&'static str> {
(!model.starts_with("codex-auto-")).then(|| Self::reasoning_label(reasoning_effort))
}
pub(crate) fn token_usage(&self) -> codex_core::protocol::TokenUsage {
self.chat_widget.token_usage()
}
fn on_update_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
self.chat_widget.set_reasoning_effort(effort);
self.config.model_reasoning_effort = effort;
}
async fn launch_external_editor(&mut self, tui: &mut tui::Tui) {
let editor_cmd = match external_editor::resolve_editor_command() {
Ok(cmd) => cmd,
Err(external_editor::EditorError::MissingEditor) => {
self.chat_widget
.add_to_history(history_cell::new_error_event(
"Cannot open external editor: set $VISUAL or $EDITOR".to_string(),
));
self.reset_external_editor_state(tui);
return;
}
Err(err) => {
self.chat_widget
.add_to_history(history_cell::new_error_event(format!(
"Failed to open editor: {err}",
)));
self.reset_external_editor_state(tui);
return;
}
};
let seed = self.chat_widget.composer_text_with_pending();
let editor_result = tui
.with_restored(tui::RestoreMode::KeepRaw, || async {
external_editor::run_editor(&seed, &editor_cmd).await
})
.await;
self.reset_external_editor_state(tui);
match editor_result {
Ok(new_text) => {
// Trim trailing whitespace
let cleaned = new_text.trim_end().to_string();
self.chat_widget.apply_external_edit(cleaned);
}
Err(err) => {
self.chat_widget
.add_to_history(history_cell::new_error_event(format!(
"Failed to open editor: {err}",
)));
}
}
tui.frame_requester().schedule_frame();
}
fn request_external_editor_launch(&mut self, tui: &mut tui::Tui) {
self.chat_widget
.set_external_editor_state(ExternalEditorState::Requested);
self.chat_widget.set_footer_hint_override(Some(vec![(
EXTERNAL_EDITOR_HINT.to_string(),
String::new(),
)]));
tui.frame_requester().schedule_frame();
}
fn reset_external_editor_state(&mut self, tui: &mut tui::Tui) {
self.chat_widget
.set_external_editor_state(ExternalEditorState::Closed);
self.chat_widget.set_footer_hint_override(None);
tui.frame_requester().schedule_frame();
}
async fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) {
match key_event {
KeyEvent {
code: KeyCode::Char('t'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
..
} => {
// Enter alternate screen and set viewport to full size.
let _ = tui.enter_alt_screen();
self.overlay = Some(Overlay::new_transcript(self.transcript_cells.clone()));
tui.frame_requester().schedule_frame();
}
KeyEvent {
code: KeyCode::Char('g'),
modifiers: crossterm::event::KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
..
} => {
// Only launch the external editor if there is no overlay and the bottom pane is not in use.
// Note that it can be launched while a task is running to enable editing while the previous turn is ongoing.
if self.overlay.is_none()
&& self.chat_widget.can_launch_external_editor()
&& self.chat_widget.external_editor_state() == ExternalEditorState::Closed
{
self.request_external_editor_launch(tui);
}
}
// Esc primes/advances backtracking only in normal (not working) mode
// with the composer focused and empty. In any other state, forward
// Esc so the active UI (e.g. status indicator, modals, popups)
// handles it.
KeyEvent {
code: KeyCode::Esc,
kind: KeyEventKind::Press | KeyEventKind::Repeat,
..
} => {
if self.chat_widget.is_normal_backtrack_mode()
&& self.chat_widget.composer_is_empty()
{
self.handle_backtrack_esc_key(tui);
} else {
self.chat_widget.handle_key_event(key_event);
}
}
// Enter confirms backtrack when primed + count > 0. Otherwise pass to widget.
KeyEvent {
code: KeyCode::Enter,
kind: KeyEventKind::Press,
..
} if self.backtrack.primed
&& self.backtrack.nth_user_message != usize::MAX
&& self.chat_widget.composer_is_empty() =>
{
if let Some(selection) = self.confirm_backtrack_from_main() {
self.apply_backtrack_selection(tui, selection);
}
}
KeyEvent {
kind: KeyEventKind::Press | KeyEventKind::Repeat,
..
} => {
// Any non-Esc key press should cancel a primed backtrack.
// This avoids stale "Esc-primed" state after the user starts typing
// (even if they later backspace to empty).
if key_event.code != KeyCode::Esc && self.backtrack.primed {
self.reset_backtrack_state();
}
self.chat_widget.handle_key_event(key_event);
}
_ => {
// Ignore Release key events.
}
};
}
#[cfg(target_os = "windows")]
fn spawn_world_writable_scan(
cwd: PathBuf,
env_map: std::collections::HashMap<String, String>,
logs_base_dir: PathBuf,
sandbox_policy: codex_core::protocol::SandboxPolicy,
tx: AppEventSender,
) {
tokio::task::spawn_blocking(move || {
let result = codex_windows_sandbox::apply_world_writable_scan_and_denies(
&logs_base_dir,
&cwd,
&env_map,
&sandbox_policy,
Some(logs_base_dir.as_path()),
);
if result.is_err() {
// Scan failed: warn without examples.
tx.send(AppEvent::OpenWorldWritableWarningConfirmation {
preset: None,
sample_paths: Vec::new(),
extra_count: 0usize,
failed_scan: true,
});
}
});
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::app_backtrack::BacktrackState;
use crate::app_backtrack::user_count;
use crate::chatwidget::tests::make_chatwidget_manual_with_sender;
use crate::file_search::FileSearchManager;
use crate::history_cell::AgentMessageCell;
use crate::history_cell::HistoryCell;
use crate::history_cell::UserHistoryCell;
use crate::history_cell::new_session_info;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::ThreadManager;
use codex_core::config::ConfigBuilder;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol::SessionConfiguredEvent;
use codex_protocol::ThreadId;
use insta::assert_snapshot;
use pretty_assertions::assert_eq;
use ratatui::prelude::Line;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use tempfile::tempdir;
async fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
let current_model = "gpt-5.2-codex".to_string();
let server = Arc::new(ThreadManager::with_models_provider(
CodexAuth::from_api_key("Test API Key"),
config.model_provider.clone(),
));
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
App {
server,
app_event_tx,
chat_widget,
auth_manager,
config,
current_model,
active_profile: None,
file_search,
transcript_cells: Vec::new(),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
enhanced_keys_supported: false,
commit_anim_running: Arc::new(AtomicBool::new(false)),
backtrack: BacktrackState::default(),
feedback: codex_feedback::CodexFeedback::new(),
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
}
}
async fn make_test_app_with_channels() -> (
App,
tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
tokio::sync::mpsc::UnboundedReceiver<Op>,
) {
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
let current_model = "gpt-5.2-codex".to_string();
let server = Arc::new(ThreadManager::with_models_provider(
CodexAuth::from_api_key("Test API Key"),
config.model_provider.clone(),
));
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
(
App {
server,
app_event_tx,
chat_widget,
auth_manager,
config,
current_model,
active_profile: None,
file_search,
transcript_cells: Vec::new(),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
enhanced_keys_supported: false,
commit_anim_running: Arc::new(AtomicBool::new(false)),
backtrack: BacktrackState::default(),
feedback: codex_feedback::CodexFeedback::new(),
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
},
rx,
op_rx,
)
}
fn all_model_presets() -> Vec<ModelPreset> {
codex_core::models_manager::model_presets::all_model_presets().clone()
}
fn model_migration_copy_to_plain_text(
copy: &crate::model_migration::ModelMigrationCopy,
) -> String {
if let Some(markdown) = copy.markdown.as_ref() {
return markdown.clone();
}
let mut s = String::new();
for span in &copy.heading {
s.push_str(&span.content);
}
s.push('\n');
s.push('\n');
for line in &copy.content {
for span in &line.spans {
s.push_str(&span.content);
}
s.push('\n');
}
s
}
#[tokio::test]
async fn model_migration_prompt_only_shows_for_deprecated_models() {
let seen = BTreeMap::new();
assert!(should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex",
"gpt-5.1-codex",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex-mini",
"gpt-5.1-codex-mini",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex-max",
&seen,
&all_model_presets()
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex",
&seen,
&all_model_presets()
));
}
#[tokio::test]
async fn model_migration_prompt_respects_hide_flag_and_self_target() {
let mut seen = BTreeMap::new();
seen.insert("gpt-5".to_string(), "gpt-5.1".to_string());
assert!(!should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
&seen,
&all_model_presets()
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1",
"gpt-5.1",
&seen,
&all_model_presets()
));
}
#[tokio::test]
async fn model_migration_prompt_skips_when_target_missing() {
let mut available = all_model_presets();
let mut current = available
.iter()
.find(|preset| preset.model == "gpt-5-codex")
.cloned()
.expect("preset present");
current.upgrade = Some(ModelUpgrade {
id: "missing-target".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(),
model_link: None,
upgrade_copy: None,
migration_markdown: None,
});
available.retain(|preset| preset.model != "gpt-5-codex");
available.push(current.clone());
assert!(should_show_model_migration_prompt(
&current.model,
"missing-target",
&BTreeMap::new(),
&available,
));
assert!(target_preset_for_upgrade(&available, "missing-target").is_none());
}
#[tokio::test]
async fn model_migration_prompt_shows_for_hidden_model() {
let codex_home = tempdir().expect("temp codex home");
let config = ConfigBuilder::default()
.codex_home(codex_home.path().to_path_buf())
.build()
.await
.expect("config");
let available_models = all_model_presets();
let current = available_models
.iter()
.find(|preset| preset.model == "gpt-5.1-codex")
.cloned()
.expect("gpt-5.1-codex preset present");
assert!(
!current.show_in_picker,
"expected gpt-5.1-codex to be hidden from picker for this test"
);
let upgrade = current.upgrade.as_ref().expect("upgrade configured");
assert!(
should_show_model_migration_prompt(
&current.model,
&upgrade.id,
&config.notices.model_migrations,
&available_models,
),
"expected migration prompt to be eligible for hidden model"
);
let target = target_preset_for_upgrade(&available_models, &upgrade.id)
.expect("upgrade target present");
let target_description =
(!target.description.is_empty()).then(|| target.description.clone());
let can_opt_out = true;
let copy = migration_copy_for_models(
&current.model,
&upgrade.id,
upgrade.model_link.clone(),
upgrade.upgrade_copy.clone(),
upgrade.migration_markdown.clone(),
target.display_name.clone(),
target_description,
can_opt_out,
);
// Snapshot the copy we would show; rendering is covered by model_migration snapshots.
assert_snapshot!(
"model_migration_prompt_shows_for_hidden_model",
model_migration_copy_to_plain_text(&copy)
);
}
#[tokio::test]
async fn update_reasoning_effort_updates_config() {
let mut app = make_test_app().await;
app.config.model_reasoning_effort = Some(ReasoningEffortConfig::Medium);
app.chat_widget
.set_reasoning_effort(Some(ReasoningEffortConfig::Medium));
app.on_update_reasoning_effort(Some(ReasoningEffortConfig::High));
assert_eq!(
app.config.model_reasoning_effort,
Some(ReasoningEffortConfig::High)
);
assert_eq!(
app.chat_widget.config_ref().model_reasoning_effort,
Some(ReasoningEffortConfig::High)
);
}
#[tokio::test]
async fn backtrack_selection_with_duplicate_history_targets_unique_turn() {
let (mut app, _app_event_rx, mut op_rx) = make_test_app_with_channels().await;
let user_cell = |text: &str| -> Arc<dyn HistoryCell> {
Arc::new(UserHistoryCell {
message: text.to_string(),
}) as Arc<dyn HistoryCell>
};
let agent_cell = |text: &str| -> Arc<dyn HistoryCell> {
Arc::new(AgentMessageCell::new(
vec![Line::from(text.to_string())],
true,
)) as Arc<dyn HistoryCell>
};
let make_header = |is_first| {
let event = SessionConfiguredEvent {
session_id: ThreadId::new(),
model: "gpt-test".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
rollout_path: PathBuf::new(),
};
Arc::new(new_session_info(
app.chat_widget.config_ref(),
app.current_model.as_str(),
event,
is_first,
)) as Arc<dyn HistoryCell>
};
// Simulate a transcript with duplicated history (e.g., from prior backtracks)
// and an edited turn appended after a session header boundary.
app.transcript_cells = vec![
make_header(true),
user_cell("first question"),
agent_cell("answer first"),
user_cell("follow-up"),
agent_cell("answer follow-up"),
make_header(false),
user_cell("first question"),
agent_cell("answer first"),
user_cell("follow-up (edited)"),
agent_cell("answer edited"),
];
assert_eq!(user_count(&app.transcript_cells), 2);
let base_id = ThreadId::new();
app.chat_widget.handle_codex_event(Event {
id: String::new(),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: base_id,
model: "gpt-test".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
rollout_path: PathBuf::new(),
}),
});
app.backtrack.base_id = Some(base_id);
app.backtrack.primed = true;
app.backtrack.nth_user_message = user_count(&app.transcript_cells).saturating_sub(1);
let selection = app
.confirm_backtrack_from_main()
.expect("backtrack selection");
assert_eq!(selection.nth_user_message, 1);
assert_eq!(selection.prefill, "follow-up (edited)");
app.apply_backtrack_rollback(selection);
let mut rollback_turns = None;
while let Ok(op) = op_rx.try_recv() {
if let Op::ThreadRollback { num_turns } = op {
rollback_turns = Some(num_turns);
}
}
assert_eq!(rollback_turns, Some(1));
}
#[tokio::test]
async fn new_session_requests_shutdown_for_previous_conversation() {
let (mut app, mut app_event_rx, mut op_rx) = make_test_app_with_channels().await;
let thread_id = ThreadId::new();
let event = SessionConfiguredEvent {
session_id: thread_id,
model: "gpt-test".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
rollout_path: PathBuf::new(),
};
app.chat_widget.handle_codex_event(Event {
id: String::new(),
msg: EventMsg::SessionConfigured(event),
});
while app_event_rx.try_recv().is_ok() {}
while op_rx.try_recv().is_ok() {}
app.shutdown_current_thread().await;
match op_rx.try_recv() {
Ok(Op::Shutdown) => {}
Ok(other) => panic!("expected Op::Shutdown, got {other:?}"),
Err(_) => panic!("expected shutdown op to be sent"),
}
}
#[tokio::test]
async fn session_summary_skip_zero_usage() {
assert!(session_summary(TokenUsage::default(), None).is_none());
}
#[tokio::test]
async fn session_summary_includes_resume_hint() {
let usage = TokenUsage {
input_tokens: 10,
output_tokens: 2,
total_tokens: 12,
..Default::default()
};
let conversation = ThreadId::from_string("123e4567-e89b-12d3-a456-426614174000").unwrap();
let summary = session_summary(usage, Some(conversation)).expect("summary");
assert_eq!(
summary.usage_line,
"Token usage: total=12 input=10 output=2"
);
assert_eq!(
summary.resume_command,
Some("codex resume 123e4567-e89b-12d3-a456-426614174000".to_string())
);
}
}