Tui: use collaboration mode instead of model and effort (#9507)

- Only use collaboration modes in the tui state to track model and
effort.
- No behavior change without the collaboration modes flag.
- Change model and effort on /model, /collab (behind a flag), and
shift+tab (behind flag)
This commit is contained in:
Ahmed Ibrahim
2026-01-20 10:26:12 -08:00
committed by GitHub
parent 7b27aa7707
commit 5ae6e70801
15 changed files with 841 additions and 558 deletions

View File

@@ -5,7 +5,8 @@ use crate::history_cell::with_border_with_inner_width;
use crate::version::CODEX_CLI_VERSION;
use chrono::DateTime;
use chrono::Local;
use codex_common::create_config_summary_entries;
use codex_common::summarize_sandbox_policy;
use codex_core::WireApi;
use codex_core::config::Config;
use codex_core::protocol::NetworkAccess;
use codex_core::protocol::SandboxPolicy;
@@ -13,6 +14,7 @@ use codex_core::protocol::TokenUsage;
use codex_core::protocol::TokenUsageInfo;
use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::openai_models::ReasoningEffort;
use ratatui::prelude::*;
use ratatui::style::Stylize;
use std::collections::BTreeSet;
@@ -85,6 +87,7 @@ pub(crate) fn new_status_output(
now: DateTime<Local>,
model_name: &str,
collaboration_mode: Option<&str>,
reasoning_effort_override: Option<Option<ReasoningEffort>>,
) -> CompositeHistoryCell {
let command = PlainHistoryCell::new(vec!["/status".magenta().into()]);
let card = StatusHistoryCell::new(
@@ -99,6 +102,7 @@ pub(crate) fn new_status_output(
now,
model_name,
collaboration_mode,
reasoning_effort_override,
);
CompositeHistoryCell::new(vec![Box::new(command), Box::new(card)])
@@ -118,8 +122,29 @@ impl StatusHistoryCell {
now: DateTime<Local>,
model_name: &str,
collaboration_mode: Option<&str>,
reasoning_effort_override: Option<Option<ReasoningEffort>>,
) -> Self {
let config_entries = create_config_summary_entries(config, model_name);
let mut config_entries = vec![
("workdir", config.cwd.display().to_string()),
("model", model_name.to_string()),
("provider", config.model_provider_id.clone()),
("approval", config.approval_policy.value().to_string()),
(
"sandbox",
summarize_sandbox_policy(config.sandbox_policy.get()),
),
];
if config.model_provider.wire_api == WireApi::Responses {
let effort_value = reasoning_effort_override
.unwrap_or(None)
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string());
config_entries.push(("reasoning effort", effort_value));
config_entries.push((
"reasoning summaries",
config.model_reasoning_summary.to_string(),
));
}
let (model_name, model_details) = compose_model_display(model_name, &config_entries);
let approval = config_entries
.iter()

View File

@@ -95,7 +95,6 @@ async fn status_snapshot_includes_reasoning_details() {
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.model_provider_id = "openai".to_string();
config.model_reasoning_effort = Some(ReasoningEffort::High);
config.model_reasoning_summary = ReasoningSummary::Detailed;
config
.sandbox_policy
@@ -141,6 +140,7 @@ async fn status_snapshot_includes_reasoning_details() {
let model_slug = ModelsManager::get_model_offline(config.model.as_deref());
let token_info = token_info_for(&model_slug, &config, &usage);
let reasoning_effort_override = Some(Some(ReasoningEffort::High));
let composite = new_status_output(
&config,
&auth_manager,
@@ -153,6 +153,7 @@ async fn status_snapshot_includes_reasoning_details() {
captured_at,
&model_slug,
None,
reasoning_effort_override,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -205,6 +206,7 @@ async fn status_snapshot_includes_forked_from() {
captured_at,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -263,6 +265,7 @@ async fn status_snapshot_includes_monthly_limit() {
captured_at,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -309,6 +312,7 @@ async fn status_snapshot_shows_unlimited_credits() {
captured_at,
&model_slug,
None,
None,
);
let rendered = render_lines(&composite.display_lines(120));
assert!(
@@ -354,6 +358,7 @@ async fn status_snapshot_shows_positive_credits() {
captured_at,
&model_slug,
None,
None,
);
let rendered = render_lines(&composite.display_lines(120));
assert!(
@@ -399,6 +404,7 @@ async fn status_snapshot_hides_zero_credits() {
captured_at,
&model_slug,
None,
None,
);
let rendered = render_lines(&composite.display_lines(120));
assert!(
@@ -442,6 +448,7 @@ async fn status_snapshot_hides_when_has_no_credits_flag() {
captured_at,
&model_slug,
None,
None,
);
let rendered = render_lines(&composite.display_lines(120));
assert!(
@@ -485,6 +492,7 @@ async fn status_card_token_usage_excludes_cached_tokens() {
now,
&model_slug,
None,
None,
);
let rendered = render_lines(&composite.display_lines(120));
@@ -500,7 +508,6 @@ async fn status_snapshot_truncates_in_narrow_terminal() {
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.model_provider_id = "openai".to_string();
config.model_reasoning_effort = Some(ReasoningEffort::High);
config.model_reasoning_summary = ReasoningSummary::Detailed;
config.cwd = PathBuf::from("/workspace/tests");
@@ -531,6 +538,7 @@ async fn status_snapshot_truncates_in_narrow_terminal() {
let model_slug = ModelsManager::get_model_offline(config.model.as_deref());
let token_info = token_info_for(&model_slug, &config, &usage);
let reasoning_effort_override = Some(Some(ReasoningEffort::High));
let composite = new_status_output(
&config,
&auth_manager,
@@ -543,6 +551,7 @@ async fn status_snapshot_truncates_in_narrow_terminal() {
captured_at,
&model_slug,
None,
reasoning_effort_override,
);
let mut rendered_lines = render_lines(&composite.display_lines(70));
if cfg!(windows) {
@@ -590,6 +599,7 @@ async fn status_snapshot_shows_missing_limits_message() {
now,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -655,6 +665,7 @@ async fn status_snapshot_includes_credits_and_limits() {
captured_at,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -708,6 +719,7 @@ async fn status_snapshot_shows_empty_limits_message() {
captured_at,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -770,6 +782,7 @@ async fn status_snapshot_shows_stale_limits_message() {
now,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -836,6 +849,7 @@ async fn status_snapshot_cached_limits_hide_credits_without_flag() {
now,
&model_slug,
None,
None,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
@@ -892,6 +906,7 @@ async fn status_context_window_uses_last_usage() {
now,
&model_slug,
None,
None,
);
let rendered_lines = render_lines(&composite.display_lines(80));
let context_line = rendered_lines