Remove the legacy TUI split (#15922)

This is the part 1 of 2 PRs that will delete the `tui` /
`tui_app_server` split. This part simply deletes the existing `tui`
directory and marks the `tui_app_server` feature flag as removed. I left
the `tui_app_server` feature flag in place for now so its presence
doesn't result in an error. It is simply ignored.

Part 2 will rename the `tui_app_server` directory `tui`. I did this as
two parts to reduce visible code churn.
This commit is contained in:
Eric Traut
2026-03-27 16:56:44 -06:00
committed by GitHub
parent 307e427a9b
commit d65deec617
856 changed files with 257 additions and 130872 deletions

View File

@@ -1,8 +0,0 @@
#[derive(Debug, Clone)]
pub(crate) enum StatusAccountDisplay {
ChatGpt {
email: Option<String>,
plan: Option<String>,
},
ApiKey,
}

View File

@@ -1,585 +0,0 @@
use crate::history_cell::CompositeHistoryCell;
use crate::history_cell::HistoryCell;
use crate::history_cell::PlainHistoryCell;
use crate::history_cell::with_border_with_inner_width;
use crate::version::CODEX_CLI_VERSION;
use chrono::DateTime;
use chrono::Local;
use codex_core::WireApi;
use codex_core::config::Config;
use codex_protocol::ThreadId;
use codex_protocol::account::PlanType;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::NetworkAccess;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::TokenUsage;
use codex_protocol::protocol::TokenUsageInfo;
use codex_utils_sandbox_summary::summarize_sandbox_policy;
use ratatui::prelude::*;
use ratatui::style::Stylize;
use std::collections::BTreeSet;
use std::path::PathBuf;
use url::Url;
use super::account::StatusAccountDisplay;
use super::format::FieldFormatter;
use super::format::line_display_width;
use super::format::push_label;
use super::format::truncate_line_to_width;
use super::helpers::compose_account_display;
use super::helpers::compose_agents_summary;
use super::helpers::compose_model_display;
use super::helpers::format_directory_display;
use super::helpers::format_tokens_compact;
use super::rate_limits::RateLimitSnapshotDisplay;
use super::rate_limits::StatusRateLimitData;
use super::rate_limits::StatusRateLimitRow;
use super::rate_limits::StatusRateLimitValue;
use super::rate_limits::compose_rate_limit_data;
use super::rate_limits::compose_rate_limit_data_many;
use super::rate_limits::format_status_limit_summary;
use super::rate_limits::render_status_limit_progress_bar;
use crate::wrapping::RtOptions;
use crate::wrapping::adaptive_wrap_lines;
use codex_core::AuthManager;
#[derive(Debug, Clone)]
struct StatusContextWindowData {
percent_remaining: i64,
tokens_in_context: i64,
window: i64,
}
#[derive(Debug, Clone)]
pub(crate) struct StatusTokenUsageData {
total: i64,
input: i64,
output: i64,
context_window: Option<StatusContextWindowData>,
}
#[derive(Debug)]
struct StatusHistoryCell {
model_name: String,
model_details: Vec<String>,
directory: PathBuf,
permissions: String,
agents_summary: String,
collaboration_mode: Option<String>,
model_provider: Option<String>,
account: Option<StatusAccountDisplay>,
thread_name: Option<String>,
session_id: Option<String>,
forked_from: Option<String>,
token_usage: StatusTokenUsageData,
rate_limits: StatusRateLimitData,
}
#[cfg(test)]
#[allow(clippy::too_many_arguments)]
pub(crate) fn new_status_output(
config: &Config,
auth_manager: &AuthManager,
token_info: Option<&TokenUsageInfo>,
total_usage: &TokenUsage,
session_id: &Option<ThreadId>,
thread_name: Option<String>,
forked_from: Option<ThreadId>,
rate_limits: Option<&RateLimitSnapshotDisplay>,
plan_type: Option<PlanType>,
now: DateTime<Local>,
model_name: &str,
collaboration_mode: Option<&str>,
reasoning_effort_override: Option<Option<ReasoningEffort>>,
) -> CompositeHistoryCell {
let snapshots = rate_limits.map(std::slice::from_ref).unwrap_or_default();
new_status_output_with_rate_limits(
config,
auth_manager,
token_info,
total_usage,
session_id,
thread_name,
forked_from,
snapshots,
plan_type,
now,
model_name,
collaboration_mode,
reasoning_effort_override,
)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn new_status_output_with_rate_limits(
config: &Config,
auth_manager: &AuthManager,
token_info: Option<&TokenUsageInfo>,
total_usage: &TokenUsage,
session_id: &Option<ThreadId>,
thread_name: Option<String>,
forked_from: Option<ThreadId>,
rate_limits: &[RateLimitSnapshotDisplay],
plan_type: Option<PlanType>,
now: DateTime<Local>,
model_name: &str,
collaboration_mode: Option<&str>,
reasoning_effort_override: Option<Option<ReasoningEffort>>,
) -> CompositeHistoryCell {
let command = PlainHistoryCell::new(vec!["/status".magenta().into()]);
let card = StatusHistoryCell::new(
config,
auth_manager,
token_info,
total_usage,
session_id,
thread_name,
forked_from,
rate_limits,
plan_type,
now,
model_name,
collaboration_mode,
reasoning_effort_override,
);
CompositeHistoryCell::new(vec![Box::new(command), Box::new(card)])
}
impl StatusHistoryCell {
#[allow(clippy::too_many_arguments)]
fn new(
config: &Config,
auth_manager: &AuthManager,
token_info: Option<&TokenUsageInfo>,
total_usage: &TokenUsage,
session_id: &Option<ThreadId>,
thread_name: Option<String>,
forked_from: Option<ThreadId>,
rate_limits: &[RateLimitSnapshotDisplay],
plan_type: Option<PlanType>,
now: DateTime<Local>,
model_name: &str,
collaboration_mode: Option<&str>,
reasoning_effort_override: Option<Option<ReasoningEffort>>,
) -> Self {
let mut config_entries = vec![
("workdir", config.cwd.display().to_string()),
("model", model_name.to_string()),
("provider", config.model_provider_id.clone()),
(
"approval",
config.permissions.approval_policy.value().to_string(),
),
(
"sandbox",
summarize_sandbox_policy(config.permissions.sandbox_policy.get()),
),
];
if config.model_provider.wire_api == WireApi::Responses {
let effort_value = reasoning_effort_override
.unwrap_or(None)
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string());
config_entries.push(("reasoning effort", effort_value));
config_entries.push((
"reasoning summaries",
config
.model_reasoning_summary
.map(|summary| summary.to_string())
.unwrap_or_else(|| "auto".to_string()),
));
}
let (model_name, model_details) = compose_model_display(model_name, &config_entries);
let approval = config_entries
.iter()
.find(|(k, _)| *k == "approval")
.map(|(_, v)| v.clone())
.unwrap_or_else(|| "<unknown>".to_string());
let sandbox = match config.permissions.sandbox_policy.get() {
SandboxPolicy::DangerFullAccess => "danger-full-access".to_string(),
SandboxPolicy::ReadOnly { .. } => "read-only".to_string(),
SandboxPolicy::WorkspaceWrite {
network_access: true,
..
} => "workspace-write with network access".to_string(),
SandboxPolicy::WorkspaceWrite { .. } => "workspace-write".to_string(),
SandboxPolicy::ExternalSandbox { network_access } => {
if matches!(network_access, NetworkAccess::Enabled) {
"external-sandbox (network access enabled)".to_string()
} else {
"external-sandbox".to_string()
}
}
};
let permissions = if config.permissions.approval_policy.value() == AskForApproval::OnRequest
&& *config.permissions.sandbox_policy.get()
== SandboxPolicy::new_workspace_write_policy()
{
"Default".to_string()
} else if config.permissions.approval_policy.value() == AskForApproval::Never
&& *config.permissions.sandbox_policy.get() == SandboxPolicy::DangerFullAccess
{
"Full Access".to_string()
} else {
format!("Custom ({sandbox}, {approval})")
};
let agents_summary = compose_agents_summary(config);
let model_provider = format_model_provider(config);
let account = compose_account_display(auth_manager, plan_type);
let session_id = session_id.as_ref().map(std::string::ToString::to_string);
let forked_from = forked_from.map(|id| id.to_string());
let default_usage = TokenUsage::default();
let (context_usage, context_window) = match token_info {
Some(info) => (&info.last_token_usage, info.model_context_window),
None => (&default_usage, config.model_context_window),
};
let context_window = context_window.map(|window| StatusContextWindowData {
percent_remaining: context_usage.percent_of_context_window_remaining(window),
tokens_in_context: context_usage.tokens_in_context_window(),
window,
});
let token_usage = StatusTokenUsageData {
total: total_usage.blended_total(),
input: total_usage.non_cached_input(),
output: total_usage.output_tokens,
context_window,
};
let rate_limits = if rate_limits.len() <= 1 {
compose_rate_limit_data(rate_limits.first(), now)
} else {
compose_rate_limit_data_many(rate_limits, now)
};
Self {
model_name,
model_details,
directory: config.cwd.to_path_buf(),
permissions,
agents_summary,
collaboration_mode: collaboration_mode.map(ToString::to_string),
model_provider,
account,
thread_name,
session_id,
forked_from,
token_usage,
rate_limits,
}
}
fn token_usage_spans(&self) -> Vec<Span<'static>> {
let total_fmt = format_tokens_compact(self.token_usage.total);
let input_fmt = format_tokens_compact(self.token_usage.input);
let output_fmt = format_tokens_compact(self.token_usage.output);
vec![
Span::from(total_fmt),
Span::from(" total "),
Span::from(" (").dim(),
Span::from(input_fmt).dim(),
Span::from(" input").dim(),
Span::from(" + ").dim(),
Span::from(output_fmt).dim(),
Span::from(" output").dim(),
Span::from(")").dim(),
]
}
fn context_window_spans(&self) -> Option<Vec<Span<'static>>> {
let context = self.token_usage.context_window.as_ref()?;
let percent = context.percent_remaining;
let used_fmt = format_tokens_compact(context.tokens_in_context);
let window_fmt = format_tokens_compact(context.window);
Some(vec![
Span::from(format!("{percent}% left")),
Span::from(" (").dim(),
Span::from(used_fmt).dim(),
Span::from(" used / ").dim(),
Span::from(window_fmt).dim(),
Span::from(")").dim(),
])
}
fn rate_limit_lines(
&self,
available_inner_width: usize,
formatter: &FieldFormatter,
) -> Vec<Line<'static>> {
match &self.rate_limits {
StatusRateLimitData::Available(rows_data) => {
if rows_data.is_empty() {
return vec![
formatter.line("Limits", vec![Span::from("data not available yet").dim()]),
];
}
self.rate_limit_row_lines(rows_data, available_inner_width, formatter)
}
StatusRateLimitData::Stale(rows_data) => {
let mut lines =
self.rate_limit_row_lines(rows_data, available_inner_width, formatter);
lines.push(formatter.line(
"Warning",
vec![Span::from("limits may be stale - start new turn to refresh.").dim()],
));
lines
}
StatusRateLimitData::Missing => {
vec![formatter.line("Limits", vec![Span::from("data not available yet").dim()])]
}
}
}
fn rate_limit_row_lines(
&self,
rows: &[StatusRateLimitRow],
available_inner_width: usize,
formatter: &FieldFormatter,
) -> Vec<Line<'static>> {
let mut lines = Vec::with_capacity(rows.len().saturating_mul(2));
for row in rows {
match &row.value {
StatusRateLimitValue::Window {
percent_used,
resets_at,
} => {
let percent_remaining = (100.0 - percent_used).clamp(0.0, 100.0);
let value_spans = vec![
Span::from(render_status_limit_progress_bar(percent_remaining)),
Span::from(" "),
Span::from(format_status_limit_summary(percent_remaining)),
];
let base_spans = formatter.full_spans(row.label.as_str(), value_spans);
let base_line = Line::from(base_spans.clone());
if let Some(resets_at) = resets_at.as_ref() {
let resets_span = Span::from(format!("(resets {resets_at})")).dim();
let mut inline_spans = base_spans.clone();
inline_spans.push(Span::from(" ").dim());
inline_spans.push(resets_span.clone());
if line_display_width(&Line::from(inline_spans.clone()))
<= available_inner_width
{
lines.push(Line::from(inline_spans));
} else {
lines.push(base_line);
lines.push(formatter.continuation(vec![resets_span]));
}
} else {
lines.push(base_line);
}
}
StatusRateLimitValue::Text(text) => {
let label = row.label.clone();
let spans =
formatter.full_spans(label.as_str(), vec![Span::from(text.clone())]);
lines.push(Line::from(spans));
}
}
}
lines
}
fn collect_rate_limit_labels(&self, seen: &mut BTreeSet<String>, labels: &mut Vec<String>) {
match &self.rate_limits {
StatusRateLimitData::Available(rows) => {
if rows.is_empty() {
push_label(labels, seen, "Limits");
} else {
for row in rows {
push_label(labels, seen, row.label.as_str());
}
}
}
StatusRateLimitData::Stale(rows) => {
for row in rows {
push_label(labels, seen, row.label.as_str());
}
push_label(labels, seen, "Warning");
}
StatusRateLimitData::Missing => push_label(labels, seen, "Limits"),
}
}
}
impl HistoryCell for StatusHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from(vec![
Span::from(format!("{}>_ ", FieldFormatter::INDENT)).dim(),
Span::from("OpenAI Codex").bold(),
Span::from(" ").dim(),
Span::from(format!("(v{CODEX_CLI_VERSION})")).dim(),
]));
lines.push(Line::from(Vec::<Span<'static>>::new()));
let available_inner_width = usize::from(width.saturating_sub(4));
if available_inner_width == 0 {
return Vec::new();
}
let account_value = self.account.as_ref().map(|account| match account {
StatusAccountDisplay::ChatGpt { email, plan } => match (email, plan) {
(Some(email), Some(plan)) => format!("{email} ({plan})"),
(Some(email), None) => email.clone(),
(None, Some(plan)) => plan.clone(),
(None, None) => "ChatGPT".to_string(),
},
StatusAccountDisplay::ApiKey => {
"API key configured (run codex login to use ChatGPT)".to_string()
}
});
let mut labels: Vec<String> = vec!["Model", "Directory", "Permissions", "Agents.md"]
.into_iter()
.map(str::to_string)
.collect();
let mut seen: BTreeSet<String> = labels.iter().cloned().collect();
let thread_name = self.thread_name.as_deref().filter(|name| !name.is_empty());
if self.model_provider.is_some() {
push_label(&mut labels, &mut seen, "Model provider");
}
if account_value.is_some() {
push_label(&mut labels, &mut seen, "Account");
}
if thread_name.is_some() {
push_label(&mut labels, &mut seen, "Thread name");
}
if self.session_id.is_some() {
push_label(&mut labels, &mut seen, "Session");
}
if self.session_id.is_some() && self.forked_from.is_some() {
push_label(&mut labels, &mut seen, "Forked from");
}
if self.collaboration_mode.is_some() {
push_label(&mut labels, &mut seen, "Collaboration mode");
}
push_label(&mut labels, &mut seen, "Token usage");
if self.token_usage.context_window.is_some() {
push_label(&mut labels, &mut seen, "Context window");
}
self.collect_rate_limit_labels(&mut seen, &mut labels);
let formatter = FieldFormatter::from_labels(labels.iter().map(String::as_str));
let value_width = formatter.value_width(available_inner_width);
let note_first_line = Line::from(vec![
Span::from("Visit ").cyan(),
"https://chatgpt.com/codex/settings/usage"
.cyan()
.underlined(),
Span::from(" for up-to-date").cyan(),
]);
let note_second_line = Line::from(vec![
Span::from("information on rate limits and credits").cyan(),
]);
let note_lines = adaptive_wrap_lines(
[note_first_line, note_second_line],
RtOptions::new(available_inner_width),
);
lines.extend(note_lines);
lines.push(Line::from(Vec::<Span<'static>>::new()));
let mut model_spans = vec![Span::from(self.model_name.clone())];
if !self.model_details.is_empty() {
model_spans.push(Span::from(" (").dim());
model_spans.push(Span::from(self.model_details.join(", ")).dim());
model_spans.push(Span::from(")").dim());
}
let directory_value = format_directory_display(&self.directory, Some(value_width));
lines.push(formatter.line("Model", model_spans));
if let Some(model_provider) = self.model_provider.as_ref() {
lines.push(formatter.line("Model provider", vec![Span::from(model_provider.clone())]));
}
lines.push(formatter.line("Directory", vec![Span::from(directory_value)]));
lines.push(formatter.line("Permissions", vec![Span::from(self.permissions.clone())]));
lines.push(formatter.line("Agents.md", vec![Span::from(self.agents_summary.clone())]));
if let Some(account_value) = account_value {
lines.push(formatter.line("Account", vec![Span::from(account_value)]));
}
if let Some(thread_name) = thread_name {
lines.push(formatter.line("Thread name", vec![Span::from(thread_name.to_string())]));
}
if let Some(collab_mode) = self.collaboration_mode.as_ref() {
lines.push(formatter.line("Collaboration mode", vec![Span::from(collab_mode.clone())]));
}
if let Some(session) = self.session_id.as_ref() {
lines.push(formatter.line("Session", vec![Span::from(session.clone())]));
}
if self.session_id.is_some()
&& let Some(forked_from) = self.forked_from.as_ref()
{
lines.push(formatter.line("Forked from", vec![Span::from(forked_from.clone())]));
}
lines.push(Line::from(Vec::<Span<'static>>::new()));
// Hide token usage only for ChatGPT subscribers
if !matches!(self.account, Some(StatusAccountDisplay::ChatGpt { .. })) {
lines.push(formatter.line("Token usage", self.token_usage_spans()));
}
if let Some(spans) = self.context_window_spans() {
lines.push(formatter.line("Context window", spans));
}
lines.extend(self.rate_limit_lines(available_inner_width, &formatter));
let content_width = lines.iter().map(line_display_width).max().unwrap_or(0);
let inner_width = content_width.min(available_inner_width);
let truncated_lines: Vec<Line<'static>> = lines
.into_iter()
.map(|line| truncate_line_to_width(line, inner_width))
.collect();
with_border_with_inner_width(truncated_lines, inner_width)
}
}
fn format_model_provider(config: &Config) -> Option<String> {
let provider = &config.model_provider;
let name = provider.name.trim();
let provider_name = if name.is_empty() {
config.model_provider_id.as_str()
} else {
name
};
let base_url = provider.base_url.as_deref().and_then(sanitize_base_url);
let is_default_openai = provider.is_openai() && base_url.is_none();
if is_default_openai {
return None;
}
Some(match base_url {
Some(base_url) => format!("{provider_name} - {base_url}"),
None => provider_name.to_string(),
})
}
fn sanitize_base_url(raw: &str) -> Option<String> {
let trimmed = raw.trim();
if trimmed.is_empty() {
return None;
}
let Ok(mut url) = Url::parse(trimmed) else {
return None;
};
let _ = url.set_username("");
let _ = url.set_password(None);
url.set_query(None);
url.set_fragment(None);
Some(url.to_string().trim_end_matches('/').to_string()).filter(|value| !value.is_empty())
}

View File

@@ -1,147 +0,0 @@
use ratatui::prelude::*;
use ratatui::style::Stylize;
use std::collections::BTreeSet;
use unicode_width::UnicodeWidthChar;
use unicode_width::UnicodeWidthStr;
#[derive(Debug, Clone)]
pub(crate) struct FieldFormatter {
indent: &'static str,
label_width: usize,
value_offset: usize,
value_indent: String,
}
impl FieldFormatter {
pub(crate) const INDENT: &'static str = " ";
pub(crate) fn from_labels<S>(labels: impl IntoIterator<Item = S>) -> Self
where
S: AsRef<str>,
{
let label_width = labels
.into_iter()
.map(|label| UnicodeWidthStr::width(label.as_ref()))
.max()
.unwrap_or(0);
let indent_width = UnicodeWidthStr::width(Self::INDENT);
let value_offset = indent_width + label_width + 1 + 3;
Self {
indent: Self::INDENT,
label_width,
value_offset,
value_indent: " ".repeat(value_offset),
}
}
pub(crate) fn line(
&self,
label: &'static str,
value_spans: Vec<Span<'static>>,
) -> Line<'static> {
Line::from(self.full_spans(label, value_spans))
}
pub(crate) fn continuation(&self, mut spans: Vec<Span<'static>>) -> Line<'static> {
let mut all_spans = Vec::with_capacity(spans.len() + 1);
all_spans.push(Span::from(self.value_indent.clone()).dim());
all_spans.append(&mut spans);
Line::from(all_spans)
}
pub(crate) fn value_width(&self, available_inner_width: usize) -> usize {
available_inner_width.saturating_sub(self.value_offset)
}
pub(crate) fn full_spans(
&self,
label: &str,
mut value_spans: Vec<Span<'static>>,
) -> Vec<Span<'static>> {
let mut spans = Vec::with_capacity(value_spans.len() + 1);
spans.push(self.label_span(label));
spans.append(&mut value_spans);
spans
}
fn label_span(&self, label: &str) -> Span<'static> {
let mut buf = String::with_capacity(self.value_offset);
buf.push_str(self.indent);
buf.push_str(label);
buf.push(':');
let label_width = UnicodeWidthStr::width(label);
let padding = 3 + self.label_width.saturating_sub(label_width);
for _ in 0..padding {
buf.push(' ');
}
Span::from(buf).dim()
}
}
pub(crate) fn push_label(labels: &mut Vec<String>, seen: &mut BTreeSet<String>, label: &str) {
if seen.contains(label) {
return;
}
let owned = label.to_string();
seen.insert(owned.clone());
labels.push(owned);
}
pub(crate) fn line_display_width(line: &Line<'static>) -> usize {
line.iter()
.map(|span| UnicodeWidthStr::width(span.content.as_ref()))
.sum()
}
pub(crate) fn truncate_line_to_width(line: Line<'static>, max_width: usize) -> Line<'static> {
if max_width == 0 {
return Line::from(Vec::<Span<'static>>::new());
}
let mut used = 0usize;
let mut spans_out: Vec<Span<'static>> = Vec::new();
for span in line.spans {
let text = span.content.into_owned();
let style = span.style;
let span_width = UnicodeWidthStr::width(text.as_str());
if span_width == 0 {
spans_out.push(Span::styled(text, style));
continue;
}
if used >= max_width {
break;
}
if used + span_width <= max_width {
used += span_width;
spans_out.push(Span::styled(text, style));
continue;
}
let mut truncated = String::new();
for ch in text.chars() {
let ch_width = UnicodeWidthChar::width(ch).unwrap_or(0);
if used + ch_width > max_width {
break;
}
truncated.push(ch);
used += ch_width;
}
if !truncated.is_empty() {
spans_out.push(Span::styled(truncated, style));
}
break;
}
Line::from(spans_out)
}

View File

@@ -1,244 +0,0 @@
use crate::exec_command::relativize_to_home;
use crate::text_formatting;
use chrono::DateTime;
use chrono::Local;
use codex_core::AuthManager;
use codex_core::auth::AuthMode as CoreAuthMode;
use codex_core::config::Config;
use codex_core::project_doc::discover_project_doc_paths;
use codex_protocol::account::PlanType;
use std::path::Path;
use unicode_width::UnicodeWidthStr;
use super::account::StatusAccountDisplay;
fn normalize_agents_display_path(path: &Path) -> String {
dunce::simplified(path).display().to_string()
}
pub(crate) fn compose_model_display(
model_name: &str,
entries: &[(&str, String)],
) -> (String, Vec<String>) {
let mut details: Vec<String> = Vec::new();
if let Some((_, effort)) = entries.iter().find(|(k, _)| *k == "reasoning effort") {
details.push(format!("reasoning {}", effort.to_ascii_lowercase()));
}
if let Some((_, summary)) = entries.iter().find(|(k, _)| *k == "reasoning summaries") {
let summary = summary.trim();
if summary.eq_ignore_ascii_case("none") || summary.eq_ignore_ascii_case("off") {
details.push("summaries off".to_string());
} else if !summary.is_empty() {
details.push(format!("summaries {}", summary.to_ascii_lowercase()));
}
}
(model_name.to_string(), details)
}
pub(crate) fn compose_agents_summary(config: &Config) -> String {
match discover_project_doc_paths(config) {
Ok(paths) => {
let mut rels: Vec<String> = Vec::new();
for p in paths {
let file_name = p
.file_name()
.map(|name| name.to_string_lossy().to_string())
.unwrap_or_else(|| "<unknown>".to_string());
let display = if let Some(parent) = p.parent() {
if parent == config.cwd.as_path() {
file_name.clone()
} else {
let mut cur = config.cwd.as_path();
let mut ups = 0usize;
let mut reached = false;
while let Some(c) = cur.parent() {
if cur == parent {
reached = true;
break;
}
cur = c;
ups += 1;
}
if reached {
let up = format!("..{}", std::path::MAIN_SEPARATOR);
format!("{}{}", up.repeat(ups), file_name)
} else if let Ok(stripped) = p.strip_prefix(&config.cwd) {
normalize_agents_display_path(stripped)
} else {
normalize_agents_display_path(&p)
}
}
} else {
normalize_agents_display_path(&p)
};
rels.push(display);
}
if rels.is_empty() {
"<none>".to_string()
} else {
rels.join(", ")
}
}
Err(_) => "<none>".to_string(),
}
}
pub(crate) fn compose_account_display(
auth_manager: &AuthManager,
plan: Option<PlanType>,
) -> Option<StatusAccountDisplay> {
let auth = auth_manager.auth_cached()?;
match auth.auth_mode() {
CoreAuthMode::ApiKey => Some(StatusAccountDisplay::ApiKey),
CoreAuthMode::Chatgpt | CoreAuthMode::ChatgptAuthTokens => {
let email = auth.get_account_email();
let plan = plan.map(plan_type_display_name);
let plan = plan.or_else(|| Some("Unknown".to_string()));
Some(StatusAccountDisplay::ChatGpt { email, plan })
}
}
}
pub(crate) fn plan_type_display_name(plan_type: PlanType) -> String {
if plan_type.is_team_like() {
"Business".to_string()
} else if plan_type.is_business_like() {
"Enterprise".to_string()
} else {
title_case(format!("{plan_type:?}").as_str())
}
}
pub(crate) fn format_tokens_compact(value: i64) -> String {
let value = value.max(0);
if value == 0 {
return "0".to_string();
}
if value < 1_000 {
return value.to_string();
}
let value_f64 = value as f64;
let (scaled, suffix) = if value >= 1_000_000_000_000 {
(value_f64 / 1_000_000_000_000.0, "T")
} else if value >= 1_000_000_000 {
(value_f64 / 1_000_000_000.0, "B")
} else if value >= 1_000_000 {
(value_f64 / 1_000_000.0, "M")
} else {
(value_f64 / 1_000.0, "K")
};
let decimals = if scaled < 10.0 {
2
} else if scaled < 100.0 {
1
} else {
0
};
let mut formatted = format!("{scaled:.decimals$}");
if formatted.contains('.') {
while formatted.ends_with('0') {
formatted.pop();
}
if formatted.ends_with('.') {
formatted.pop();
}
}
format!("{formatted}{suffix}")
}
pub(crate) fn format_directory_display(directory: &Path, max_width: Option<usize>) -> String {
let formatted = if let Some(rel) = relativize_to_home(directory) {
if rel.as_os_str().is_empty() {
"~".to_string()
} else {
format!("~{}{}", std::path::MAIN_SEPARATOR, rel.display())
}
} else {
directory.display().to_string()
};
if let Some(max_width) = max_width {
if max_width == 0 {
return String::new();
}
if UnicodeWidthStr::width(formatted.as_str()) > max_width {
return text_formatting::center_truncate_path(&formatted, max_width);
}
}
formatted
}
pub(crate) fn format_reset_timestamp(dt: DateTime<Local>, captured_at: DateTime<Local>) -> String {
let time = dt.format("%H:%M").to_string();
if dt.date_naive() == captured_at.date_naive() {
time
} else {
format!("{time} on {}", dt.format("%-d %b"))
}
}
pub(crate) fn title_case(s: &str) -> String {
if s.is_empty() {
return String::new();
}
let mut chars = s.chars();
let first = match chars.next() {
Some(c) => c,
None => return String::new(),
};
let rest: String = chars.as_str().to_ascii_lowercase();
first.to_uppercase().collect::<String>() + &rest
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::auth::CodexAuth;
use pretty_assertions::assert_eq;
#[test]
fn plan_type_display_name_remaps_display_labels() {
let cases = [
(PlanType::Free, "Free"),
(PlanType::Go, "Go"),
(PlanType::Plus, "Plus"),
(PlanType::Pro, "Pro"),
(PlanType::Team, "Business"),
(PlanType::SelfServeBusinessUsageBased, "Business"),
(PlanType::Business, "Enterprise"),
(PlanType::EnterpriseCbpUsageBased, "Enterprise"),
(PlanType::Enterprise, "Enterprise"),
(PlanType::Edu, "Edu"),
(PlanType::Unknown, "Unknown"),
];
for (plan_type, expected) in cases {
assert_eq!(plan_type_display_name(plan_type), expected);
}
}
#[test]
fn compose_account_display_uses_remapped_plan_label() {
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let display = compose_account_display(
auth_manager.as_ref(),
Some(PlanType::SelfServeBusinessUsageBased),
);
assert!(matches!(
display,
Some(StatusAccountDisplay::ChatGpt {
email: None,
plan: Some(ref plan),
}) if plan == "Business"
));
}
}

View File

@@ -1,27 +0,0 @@
//! Status output formatting and display adapters for the TUI.
//!
//! This module turns protocol-level snapshots into stable display structures used by `/status`
//! output and footer/status-line helpers, while keeping rendering concerns out of transport-facing
//! code.
//!
//! `rate_limits` is the main integration point for status-line usage-limit items: it converts raw
//! window snapshots into local-time labels and classifies data as available, stale, or missing.
mod account;
mod card;
mod format;
mod helpers;
mod rate_limits;
#[cfg(test)]
pub(crate) use card::new_status_output;
pub(crate) use card::new_status_output_with_rate_limits;
pub(crate) use helpers::format_directory_display;
pub(crate) use helpers::format_tokens_compact;
pub(crate) use rate_limits::RateLimitSnapshotDisplay;
pub(crate) use rate_limits::RateLimitWindowDisplay;
#[cfg(test)]
pub(crate) use rate_limits::rate_limit_snapshot_display;
pub(crate) use rate_limits::rate_limit_snapshot_display_for_limit;
#[cfg(test)]
mod tests;

View File

@@ -1,440 +0,0 @@
//! Rate-limit and credits display shaping for status surfaces.
//!
//! This module maps `RateLimitSnapshot` protocol payloads into display-oriented rows that the TUI
//! can render in `/status` and status-line contexts without duplicating formatting logic.
//!
//! The key contract is that time-sensitive values are interpreted relative to a caller-provided
//! capture timestamp so stale detection and reset labels remain coherent for a given draw cycle.
use crate::chatwidget::get_limits_duration;
use crate::text_formatting::capitalize_first;
use super::helpers::format_reset_timestamp;
use chrono::DateTime;
use chrono::Duration as ChronoDuration;
use chrono::Local;
use chrono::Utc;
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
const STATUS_LIMIT_BAR_SEGMENTS: usize = 20;
const STATUS_LIMIT_BAR_FILLED: &str = "";
const STATUS_LIMIT_BAR_EMPTY: &str = "";
#[derive(Debug, Clone)]
pub(crate) struct StatusRateLimitRow {
/// Human-readable row label, such as `"5h limit"` or `"Credits"`.
pub label: String,
/// Value payload for the row.
pub value: StatusRateLimitValue,
}
/// Display value variants for a single rate-limit row.
#[derive(Debug, Clone)]
pub(crate) enum StatusRateLimitValue {
/// Percent-based usage window with optional reset timestamp text.
Window {
/// Percent of the window that has been consumed.
percent_used: f64,
/// Localized reset string, or `None` when unknown.
resets_at: Option<String>,
},
/// Plain text value used for non-window rows.
Text(String),
}
/// Availability state for rate-limit data shown in status output.
#[derive(Debug, Clone)]
pub(crate) enum StatusRateLimitData {
/// Snapshot data is recent enough for normal rendering.
Available(Vec<StatusRateLimitRow>),
/// Snapshot data exists but is older than the staleness threshold.
Stale(Vec<StatusRateLimitRow>),
/// No snapshot data is currently available.
Missing,
}
/// Maximum age before a snapshot is considered stale in status output.
pub(crate) const RATE_LIMIT_STALE_THRESHOLD_MINUTES: i64 = 15;
/// Display-friendly representation of one usage window from a snapshot.
#[derive(Debug, Clone)]
pub(crate) struct RateLimitWindowDisplay {
/// Percent used for the window.
pub used_percent: f64,
/// Human-readable local reset time.
pub resets_at: Option<String>,
/// Window length in minutes when provided by the server.
pub window_minutes: Option<i64>,
}
impl RateLimitWindowDisplay {
fn from_window(window: &RateLimitWindow, captured_at: DateTime<Local>) -> Self {
let resets_at_utc = window
.resets_at
.and_then(|seconds| DateTime::<Utc>::from_timestamp(seconds, 0))
.map(|dt| dt.with_timezone(&Local));
let resets_at = resets_at_utc.map(|dt| format_reset_timestamp(dt, captured_at));
Self {
used_percent: window.used_percent,
resets_at,
window_minutes: window.window_minutes,
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct RateLimitSnapshotDisplay {
/// Canonical limit identifier (for example: `codex` or `codex_other`).
pub limit_name: String,
/// Local timestamp representing when this display snapshot was captured.
pub captured_at: DateTime<Local>,
/// Primary usage window (typically short duration).
pub primary: Option<RateLimitWindowDisplay>,
/// Secondary usage window (typically weekly).
pub secondary: Option<RateLimitWindowDisplay>,
/// Optional credits metadata when available.
pub credits: Option<CreditsSnapshotDisplay>,
}
/// Display-ready credits state extracted from protocol snapshots.
#[derive(Debug, Clone)]
pub(crate) struct CreditsSnapshotDisplay {
/// Whether credits tracking is enabled for the account.
pub has_credits: bool,
/// Whether the account has unlimited credits.
pub unlimited: bool,
/// Raw balance text as provided by the backend.
pub balance: Option<String>,
}
/// Converts a protocol snapshot into UI-friendly display data.
///
/// Pass the timestamp from the same observation point as `snapshot`; supplying a significantly
/// older or newer `captured_at` can produce misleading reset labels and stale classification.
#[cfg(test)]
pub(crate) fn rate_limit_snapshot_display(
snapshot: &RateLimitSnapshot,
captured_at: DateTime<Local>,
) -> RateLimitSnapshotDisplay {
rate_limit_snapshot_display_for_limit(snapshot, "codex".to_string(), captured_at)
}
pub(crate) fn rate_limit_snapshot_display_for_limit(
snapshot: &RateLimitSnapshot,
limit_name: String,
captured_at: DateTime<Local>,
) -> RateLimitSnapshotDisplay {
RateLimitSnapshotDisplay {
limit_name,
captured_at,
primary: snapshot
.primary
.as_ref()
.map(|window| RateLimitWindowDisplay::from_window(window, captured_at)),
secondary: snapshot
.secondary
.as_ref()
.map(|window| RateLimitWindowDisplay::from_window(window, captured_at)),
credits: snapshot.credits.as_ref().map(CreditsSnapshotDisplay::from),
}
}
impl From<&CoreCreditsSnapshot> for CreditsSnapshotDisplay {
fn from(value: &CoreCreditsSnapshot) -> Self {
Self {
has_credits: value.has_credits,
unlimited: value.unlimited,
balance: value.balance.clone(),
}
}
}
/// Builds display rows from a snapshot and marks stale data by capture age.
///
/// Callers should pass `Local::now()` for `now` at render time; using a cached timestamp can make
/// fresh data appear stale or prevent stale warnings from appearing.
pub(crate) fn compose_rate_limit_data(
snapshot: Option<&RateLimitSnapshotDisplay>,
now: DateTime<Local>,
) -> StatusRateLimitData {
match snapshot {
Some(snapshot) => compose_rate_limit_data_many(std::slice::from_ref(snapshot), now),
None => StatusRateLimitData::Missing,
}
}
pub(crate) fn compose_rate_limit_data_many(
snapshots: &[RateLimitSnapshotDisplay],
now: DateTime<Local>,
) -> StatusRateLimitData {
if snapshots.is_empty() {
return StatusRateLimitData::Missing;
}
let mut rows = Vec::with_capacity(snapshots.len().saturating_mul(3));
let mut stale = false;
for snapshot in snapshots {
stale |= now.signed_duration_since(snapshot.captured_at)
> ChronoDuration::minutes(RATE_LIMIT_STALE_THRESHOLD_MINUTES);
let limit_bucket_label = snapshot.limit_name.clone();
let show_limit_prefix = !limit_bucket_label.eq_ignore_ascii_case("codex");
let primary_label = snapshot
.primary
.as_ref()
.map(|window| {
window
.window_minutes
.map(get_limits_duration)
.unwrap_or_else(|| "5h".to_string())
})
.map(|label| capitalize_first(&label));
let secondary_label = snapshot
.secondary
.as_ref()
.map(|window| {
window
.window_minutes
.map(get_limits_duration)
.unwrap_or_else(|| "weekly".to_string())
})
.map(|label| capitalize_first(&label));
let window_count =
usize::from(snapshot.primary.is_some()) + usize::from(snapshot.secondary.is_some());
let combine_non_codex_single_limit = show_limit_prefix && window_count == 1;
if show_limit_prefix && !combine_non_codex_single_limit {
rows.push(StatusRateLimitRow {
label: format!("{limit_bucket_label} limit"),
value: StatusRateLimitValue::Text(String::new()),
});
}
if let Some(primary) = snapshot.primary.as_ref() {
let label = if combine_non_codex_single_limit {
format!(
"{} {} limit",
limit_bucket_label,
primary_label.clone().unwrap_or_else(|| "5h".to_string())
)
} else {
format!(
"{} limit",
primary_label.clone().unwrap_or_else(|| "5h".to_string())
)
};
rows.push(StatusRateLimitRow {
label,
value: StatusRateLimitValue::Window {
percent_used: primary.used_percent,
resets_at: primary.resets_at.clone(),
},
});
}
if let Some(secondary) = snapshot.secondary.as_ref() {
let label = if combine_non_codex_single_limit {
format!(
"{} {} limit",
limit_bucket_label,
secondary_label
.clone()
.unwrap_or_else(|| "weekly".to_string())
)
} else {
format!(
"{} limit",
secondary_label
.clone()
.unwrap_or_else(|| "weekly".to_string())
)
};
rows.push(StatusRateLimitRow {
label,
value: StatusRateLimitValue::Window {
percent_used: secondary.used_percent,
resets_at: secondary.resets_at.clone(),
},
});
}
if let Some(credits) = snapshot.credits.as_ref()
&& let Some(row) = credit_status_row(credits)
{
rows.push(row);
}
}
if rows.is_empty() {
StatusRateLimitData::Available(vec![])
} else if stale {
StatusRateLimitData::Stale(rows)
} else {
StatusRateLimitData::Available(rows)
}
}
/// Renders a fixed-width progress bar from remaining percentage.
///
/// This function expects a remaining value in the `0..=100` range and clamps out-of-range input.
/// Passing a used percentage by mistake will invert the bar and mislead users.
pub(crate) fn render_status_limit_progress_bar(percent_remaining: f64) -> String {
let ratio = (percent_remaining / 100.0).clamp(0.0, 1.0);
let filled = (ratio * STATUS_LIMIT_BAR_SEGMENTS as f64).round() as usize;
let filled = filled.min(STATUS_LIMIT_BAR_SEGMENTS);
let empty = STATUS_LIMIT_BAR_SEGMENTS.saturating_sub(filled);
format!(
"[{}{}]",
STATUS_LIMIT_BAR_FILLED.repeat(filled),
STATUS_LIMIT_BAR_EMPTY.repeat(empty)
)
}
/// Formats a compact textual summary from remaining percentage.
pub(crate) fn format_status_limit_summary(percent_remaining: f64) -> String {
format!("{percent_remaining:.0}% left")
}
/// Builds a single `StatusRateLimitRow` for credits when the snapshot indicates
/// that the account has credit tracking enabled. When credits are unlimited we
/// show that fact explicitly; otherwise we render the rounded balance in
/// credits. Accounts with credits = 0 skip this section entirely.
fn credit_status_row(credits: &CreditsSnapshotDisplay) -> Option<StatusRateLimitRow> {
if !credits.has_credits {
return None;
}
if credits.unlimited {
return Some(StatusRateLimitRow {
label: "Credits".to_string(),
value: StatusRateLimitValue::Text("Unlimited".to_string()),
});
}
let balance = credits.balance.as_ref()?;
let display_balance = format_credit_balance(balance)?;
Some(StatusRateLimitRow {
label: "Credits".to_string(),
value: StatusRateLimitValue::Text(format!("{display_balance} credits")),
})
}
fn format_credit_balance(raw: &str) -> Option<String> {
let trimmed = raw.trim();
if trimmed.is_empty() {
return None;
}
if let Ok(int_value) = trimmed.parse::<i64>()
&& int_value > 0
{
return Some(int_value.to_string());
}
if let Ok(value) = trimmed.parse::<f64>()
&& value > 0.0
{
let rounded = value.round() as i64;
return Some(rounded.to_string());
}
None
}
#[cfg(test)]
mod tests {
use super::CreditsSnapshotDisplay;
use super::RateLimitSnapshotDisplay;
use super::RateLimitWindowDisplay;
use super::StatusRateLimitData;
use super::compose_rate_limit_data_many;
use chrono::Local;
use pretty_assertions::assert_eq;
fn window(used_percent: f64) -> RateLimitWindowDisplay {
RateLimitWindowDisplay {
used_percent,
resets_at: Some("soon".to_string()),
window_minutes: Some(300),
}
}
#[test]
fn non_codex_single_limit_renders_combined_row() {
let now = Local::now();
let codex = RateLimitSnapshotDisplay {
limit_name: "codex".to_string(),
captured_at: now,
primary: Some(window(10.0)),
secondary: None,
credits: Some(CreditsSnapshotDisplay {
has_credits: true,
unlimited: false,
balance: Some("25".to_string()),
}),
};
let other = RateLimitSnapshotDisplay {
limit_name: "codex-other".to_string(),
captured_at: now,
primary: Some(window(20.0)),
secondary: None,
credits: Some(CreditsSnapshotDisplay {
has_credits: true,
unlimited: false,
balance: Some("99".to_string()),
}),
};
let rows = match compose_rate_limit_data_many(&[codex, other], now) {
StatusRateLimitData::Available(rows) => rows,
other => panic!("unexpected status: {other:?}"),
};
let labels: Vec<String> = rows.iter().map(|row| row.label.clone()).collect();
assert_eq!(
labels,
vec![
"5h limit".to_string(),
"Credits".to_string(),
"codex-other 5h limit".to_string(),
"Credits".to_string(),
]
);
assert_eq!(rows.iter().filter(|row| row.label == "Credits").count(), 2);
}
#[test]
fn non_codex_multi_limit_keeps_group_row() {
let now = Local::now();
let other = RateLimitSnapshotDisplay {
limit_name: "codex-other".to_string(),
captured_at: now,
primary: Some(RateLimitWindowDisplay {
used_percent: 20.0,
resets_at: Some("soon".to_string()),
window_minutes: Some(60),
}),
secondary: Some(RateLimitWindowDisplay {
used_percent: 40.0,
resets_at: Some("later".to_string()),
window_minutes: None,
}),
credits: None,
};
let rows = match compose_rate_limit_data_many(&[other], now) {
StatusRateLimitData::Available(rows) => rows,
other => panic!("unexpected status: {other:?}"),
};
let labels: Vec<String> = rows.iter().map(|row| row.label.clone()).collect();
assert_eq!(
labels,
vec![
"codex-other limit".to_string(),
"1h limit".to_string(),
"Weekly limit".to_string(),
]
);
}
}

View File

@@ -1,23 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭─────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 1.05K total (700 input + 350 output) │
│ Context window: 100% left (1.45K used / 272K) │
│ 5h limit: [████████░░░░░░░░░░░░] 40% left (resets 11:32) │
│ Weekly limit: [█████████████░░░░░░░] 65% left (resets 11:52) │
│ Warning: limits may be stale - start new turn to refresh. │
╰─────────────────────────────────────────────────────────────────────╯

View File

@@ -1,23 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 2K total (1.4K input + 600 output) │
│ Context window: 100% left (2.2K used / 272K) │
│ 5h limit: [███████████░░░░░░░░░] 55% left (resets 09:25) │
│ Weekly limit: [██████████████░░░░░░] 70% left (resets 09:55) │
│ Credits: 38 credits │
╰───────────────────────────────────────────────────────────────────╯

View File

@@ -1,23 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ Session: 0f0f3c13-6cf9-4aa4-8b80-7d49c2f1be2e │
│ Forked from: e9f18a88-8081-4e51-9d4e-8af5cde2d8dd │
│ │
│ Token usage: 1.2K total (800 input + 400 output) │
│ Context window: 100% left (1.2K used / 272K) │
│ Limits: data not available yet │
╰───────────────────────────────────────────────────────────────────────╯

View File

@@ -1,21 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭────────────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 1.2K total (800 input + 400 output) │
│ Context window: 100% left (1.2K used / 272K) │
│ Monthly limit: [██████████████████░░] 88% left (resets 07:08 on 7 May) │
╰────────────────────────────────────────────────────────────────────────────╯

View File

@@ -1,22 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning high, summaries detailed) │
│ Directory: [[workspace]] │
│ Permissions: Default │
│ Agents.md: <none> │
│ │
│ Token usage: 1.9K total (1K input + 900 output) │
│ Context window: 100% left (2.25K used / 272K) │
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │
│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │
╰───────────────────────────────────────────────────────────────────────────╯

View File

@@ -1,21 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 750 total (500 input + 250 output) │
│ Context window: 100% left (750 used / 272K) │
│ Limits: data not available yet │
╰───────────────────────────────────────────────────────────────────────╯

View File

@@ -1,21 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 750 total (500 input + 250 output) │
│ Context window: 100% left (750 used / 272K) │
│ Limits: data not available yet │
╰───────────────────────────────────────────────────────────────────────╯

View File

@@ -1,23 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 1.9K total (1K input + 900 output) │
│ Context window: 100% left (2.25K used / 272K) │
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │
│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │
│ Warning: limits may be stale - start new turn to refresh. │
╰───────────────────────────────────────────────────────────────────────╯

View File

@@ -1,21 +0,0 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning high, summaries de │
│ Directory: [[workspace]] │
│ Permissions: Custom (read-only, on-request) │
│ Agents.md: <none> │
│ │
│ Token usage: 1.9K total (1K input + 900 output) │
│ Context window: 100% left (2.25K used / 272K) │
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │
╰────────────────────────────────────────────────────────────────────╯

File diff suppressed because it is too large Load Diff