feat: show forked from session id in /status (#9330)

Summary:
- Add forked_from to SessionMeta/SessionConfiguredEvent and persist it
for forked sessions.
- Surface forked_from in /status for tui + tui2 and add snapshots.
This commit is contained in:
Anton Panasenko
2026-01-16 13:41:46 -08:00
committed by GitHub
parent f1653dd4d3
commit c26fe64539
19 changed files with 267 additions and 0 deletions

View File

@@ -66,6 +66,7 @@ struct StatusHistoryCell {
model_provider: Option<String>,
account: Option<StatusAccountDisplay>,
session_id: Option<String>,
forked_from: Option<String>,
token_usage: StatusTokenUsageData,
rate_limits: StatusRateLimitData,
}
@@ -77,6 +78,7 @@ pub(crate) fn new_status_output(
token_info: Option<&TokenUsageInfo>,
total_usage: &TokenUsage,
session_id: &Option<ThreadId>,
forked_from: Option<ThreadId>,
rate_limits: Option<&RateLimitSnapshotDisplay>,
plan_type: Option<PlanType>,
now: DateTime<Local>,
@@ -89,6 +91,7 @@ pub(crate) fn new_status_output(
token_info,
total_usage,
session_id,
forked_from,
rate_limits,
plan_type,
now,
@@ -106,6 +109,7 @@ impl StatusHistoryCell {
token_info: Option<&TokenUsageInfo>,
total_usage: &TokenUsage,
session_id: &Option<ThreadId>,
forked_from: Option<ThreadId>,
rate_limits: Option<&RateLimitSnapshotDisplay>,
plan_type: Option<PlanType>,
now: DateTime<Local>,
@@ -134,6 +138,7 @@ impl StatusHistoryCell {
let model_provider = format_model_provider(config);
let account = compose_account_display(auth_manager, plan_type);
let session_id = session_id.as_ref().map(std::string::ToString::to_string);
let forked_from = forked_from.map(|id| id.to_string());
let default_usage = TokenUsage::default();
let (context_usage, context_window) = match token_info {
Some(info) => (&info.last_token_usage, info.model_context_window),
@@ -163,6 +168,7 @@ impl StatusHistoryCell {
model_provider,
account,
session_id,
forked_from,
token_usage,
rate_limits,
}
@@ -351,6 +357,9 @@ impl HistoryCell for StatusHistoryCell {
if self.session_id.is_some() {
push_label(&mut labels, &mut seen, "Session");
}
if self.session_id.is_some() && self.forked_from.is_some() {
push_label(&mut labels, &mut seen, "Forked from");
}
push_label(&mut labels, &mut seen, "Token usage");
if self.token_usage.context_window.is_some() {
push_label(&mut labels, &mut seen, "Context window");
@@ -403,6 +412,11 @@ impl HistoryCell for StatusHistoryCell {
if let Some(session) = self.session_id.as_ref() {
lines.push(formatter.line("Session", vec![Span::from(session.clone())]));
}
if self.session_id.is_some()
&& let Some(forked_from) = self.forked_from.as_ref()
{
lines.push(formatter.line("Forked from", vec![Span::from(forked_from.clone())]));
}
lines.push(Line::from(Vec::<Span<'static>>::new()));
// Hide token usage only for ChatGPT subscribers

View File

@@ -0,0 +1,24 @@
---
source: tui/src/status/tests.rs
expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: read-only │
│ Agents.md: <none> │
│ Session: 0f0f3c13-6cf9-4aa4-8b80-7d49c2f1be2e │
│ Forked from: e9f18a88-8081-4e51-9d4e-8af5cde2d8dd │
│ │
│ Token usage: 1.2K total (800 input + 400 output) │
│ Context window: 100% left (1.2K used / 272K) │
│ Limits: data not available yet │
╰───────────────────────────────────────────────────────────────────────╯

View File

@@ -14,6 +14,7 @@ use codex_core::protocol::RateLimitWindow;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol::TokenUsage;
use codex_core::protocol::TokenUsageInfo;
use codex_protocol::ThreadId;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ReasoningEffort;
use insta::assert_snapshot;
@@ -146,6 +147,7 @@ async fn status_snapshot_includes_reasoning_details() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -161,6 +163,57 @@ async fn status_snapshot_includes_reasoning_details() {
assert_snapshot!(sanitized);
}
#[tokio::test]
async fn status_snapshot_includes_forked_from() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.model_provider_id = "openai".to_string();
config.cwd = PathBuf::from("/workspace/tests");
let auth_manager = test_auth_manager(&config);
let usage = TokenUsage {
input_tokens: 800,
cached_input_tokens: 0,
output_tokens: 400,
reasoning_output_tokens: 0,
total_tokens: 1_200,
};
let captured_at = chrono::Local
.with_ymd_and_hms(2024, 8, 9, 10, 11, 12)
.single()
.expect("valid time");
let model_slug = ModelsManager::get_model_offline(config.model.as_deref());
let token_info = token_info_for(&model_slug, &config, &usage);
let session_id =
ThreadId::from_string("0f0f3c13-6cf9-4aa4-8b80-7d49c2f1be2e").expect("session id");
let forked_from =
ThreadId::from_string("e9f18a88-8081-4e51-9d4e-8af5cde2d8dd").expect("forked id");
let composite = new_status_output(
&config,
&auth_manager,
Some(&token_info),
&usage,
&Some(session_id),
Some(forked_from),
None,
None,
captured_at,
&model_slug,
);
let mut rendered_lines = render_lines(&composite.display_lines(80));
if cfg!(windows) {
for line in &mut rendered_lines {
*line = line.replace('\\', "/");
}
}
let sanitized = sanitize_directory(rendered_lines).join("\n");
assert_snapshot!(sanitized);
}
#[tokio::test]
async fn status_snapshot_includes_monthly_limit() {
let temp_home = TempDir::new().expect("temp home");
@@ -202,6 +255,7 @@ async fn status_snapshot_includes_monthly_limit() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -246,6 +300,7 @@ async fn status_snapshot_shows_unlimited_credits() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -289,6 +344,7 @@ async fn status_snapshot_shows_positive_credits() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -332,6 +388,7 @@ async fn status_snapshot_hides_zero_credits() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -373,6 +430,7 @@ async fn status_snapshot_hides_when_has_no_credits_flag() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -416,6 +474,7 @@ async fn status_card_token_usage_excludes_cached_tokens() {
&None,
None,
None,
None,
now,
&model_slug,
);
@@ -470,6 +529,7 @@ async fn status_snapshot_truncates_in_narrow_terminal() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -517,6 +577,7 @@ async fn status_snapshot_shows_missing_limits_message() {
&None,
None,
None,
None,
now,
&model_slug,
);
@@ -578,6 +639,7 @@ async fn status_snapshot_includes_credits_and_limits() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -629,6 +691,7 @@ async fn status_snapshot_shows_empty_limits_message() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
captured_at,
@@ -689,6 +752,7 @@ async fn status_snapshot_shows_stale_limits_message() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
now,
@@ -753,6 +817,7 @@ async fn status_snapshot_cached_limits_hide_credits_without_flag() {
Some(&token_info),
&usage,
&None,
None,
Some(&rate_display),
None,
now,
@@ -809,6 +874,7 @@ async fn status_context_window_uses_last_usage() {
&None,
None,
None,
None,
now,
&model_slug,
);