Compare commits

...

1 Commits

Author SHA1 Message Date
shijie-openai
195e23cd4b Feat: thread/resume with last turn context 2026-02-04 00:07:39 -08:00
3 changed files with 383 additions and 15 deletions

View File

@@ -147,7 +147,15 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted.
For response/default settings on resume (`model`, `modelProvider`, `cwd`, `approvalPolicy`, `sandbox`, `reasoningEffort`), values are resolved in this order:
1. Explicit `thread/resume` params.
2. The most recent recorded turn context in the resumed thread.
3. Fallback to current config-derived defaults when no turn context exists (for example, older rollouts).
You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
```json
{ "method": "thread/resume", "id": 11, "params": {

View File

@@ -205,6 +205,7 @@ use codex_protocol::protocol::McpServerRefreshConfig;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionMetaLine;
use codex_protocol::protocol::TurnContextItem;
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
use codex_protocol::user_input::UserInput as CoreInputItem;
use codex_rmcp_client::perform_oauth_login_return_url;
@@ -2425,16 +2426,48 @@ impl CodexMessageProcessor {
}
};
let last_turn_context = last_turn_context_from_history(&thread_history);
let history_model_provider = history_model_provider(&thread_history);
let resolved_model = model.or_else(|| last_turn_context.map(|ctx| ctx.model.clone()));
let resolved_model_provider = model_provider.or(history_model_provider);
let resolved_cwd = cwd.or_else(|| {
last_turn_context.map(|ctx| ctx.cwd.as_os_str().to_string_lossy().into_owned())
});
let resolved_approval_policy =
approval_policy.or_else(|| last_turn_context.map(|ctx| ctx.approval_policy.into()));
let request_sandbox_is_none = sandbox.is_none();
let resolved_sandbox = sandbox.or_else(|| {
last_turn_context
.and_then(sandbox_mode_from_turn_context)
.map(Into::into)
});
let resolved_developer_instructions = developer_instructions
.or_else(|| last_turn_context.and_then(|ctx| ctx.developer_instructions.clone()));
let resolved_personality =
personality.or_else(|| last_turn_context.and_then(|ctx| ctx.personality.clone()));
let resume_context_overrides = last_turn_context.map(|ctx| {
(
ctx.effort,
ctx.summary,
if request_sandbox_is_none {
Some(ctx.sandbox_policy.clone())
} else {
None
},
)
});
let history_cwd = thread_history.session_cwd();
let typesafe_overrides = self.build_thread_config_overrides(
model,
model_provider,
cwd,
approval_policy,
sandbox,
resolved_model,
resolved_model_provider,
resolved_cwd,
resolved_approval_policy,
resolved_sandbox,
base_instructions,
developer_instructions,
personality,
resolved_developer_instructions,
resolved_personality,
);
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
@@ -2469,7 +2502,7 @@ impl CodexMessageProcessor {
Ok(NewThread {
thread_id,
session_configured,
..
thread,
}) => {
let SessionConfiguredEvent {
rollout_path,
@@ -2496,6 +2529,34 @@ impl CodexMessageProcessor {
);
}
if let Some((effort, summary, sandbox_policy)) = resume_context_overrides {
if let Err(err) = thread
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy,
windows_sandbox_level: None,
model: None,
effort: Some(effort),
summary: Some(summary),
collaboration_mode: None,
personality: None,
})
.await
{
self.send_internal_error(
request_id,
format!(
"failed to apply resume turn context for thread {thread_id}: {err}"
),
)
.await;
return;
}
}
let config_snapshot = thread.config_snapshot().await;
let mut thread = match read_summary_from_rollout(
rollout_path.as_path(),
fallback_model_provider.as_str(),
@@ -2521,12 +2582,12 @@ impl CodexMessageProcessor {
let response = ThreadResumeResponse {
thread,
model: session_configured.model,
model_provider: session_configured.model_provider_id,
cwd: session_configured.cwd,
approval_policy: session_configured.approval_policy.into(),
sandbox: session_configured.sandbox_policy.into(),
reasoning_effort: session_configured.reasoning_effort,
model: config_snapshot.model,
model_provider: config_snapshot.model_provider_id,
cwd: config_snapshot.cwd,
approval_policy: config_snapshot.approval_policy.into(),
sandbox: config_snapshot.sandbox_policy.into(),
reasoning_effort: config_snapshot.reasoning_effort,
};
self.outgoing.send_response(request_id, response).await;
@@ -5146,6 +5207,50 @@ fn map_git_info(git_info: &CoreGitInfo) -> ConversationGitInfo {
}
}
fn last_turn_context_from_history(thread_history: &InitialHistory) -> Option<&TurnContextItem> {
history_rollout_items(thread_history)
.iter()
.rev()
.find_map(|item| match item {
RolloutItem::TurnContext(context) => Some(context),
_ => None,
})
}
fn history_model_provider(thread_history: &InitialHistory) -> Option<String> {
history_rollout_items(thread_history)
.iter()
.find_map(|item| match item {
RolloutItem::SessionMeta(meta) => meta.meta.model_provider.clone(),
_ => None,
})
}
fn history_rollout_items(thread_history: &InitialHistory) -> &[RolloutItem] {
match thread_history {
InitialHistory::New => &[],
InitialHistory::Resumed(resumed) => resumed.history.as_slice(),
InitialHistory::Forked(items) => items.as_slice(),
}
}
fn sandbox_mode_from_turn_context(
context: &TurnContextItem,
) -> Option<codex_protocol::config_types::SandboxMode> {
match context.sandbox_policy {
codex_protocol::protocol::SandboxPolicy::DangerFullAccess => {
Some(codex_protocol::config_types::SandboxMode::DangerFullAccess)
}
codex_protocol::protocol::SandboxPolicy::ReadOnly => {
Some(codex_protocol::config_types::SandboxMode::ReadOnly)
}
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite { .. } => {
Some(codex_protocol::config_types::SandboxMode::WorkspaceWrite)
}
codex_protocol::protocol::SandboxPolicy::ExternalSandbox { .. } => None,
}
}
fn parse_datetime(timestamp: Option<&str>) -> Option<DateTime<Utc>> {
timestamp.and_then(|ts| {
chrono::DateTime::parse_from_rfc3339(ts)
@@ -5251,6 +5356,80 @@ mod tests {
validate_dynamic_tools(&tools, &HashSet::new()).expect("valid schema");
}
#[test]
fn last_turn_context_from_history_uses_most_recent_turn_context() -> Result<()> {
let session_id = ThreadId::from_string("3f941c35-29b3-493b-b0a4-e25800d9aeb0")?;
let history = InitialHistory::Forked(vec![
RolloutItem::SessionMeta(SessionMetaLine {
meta: SessionMeta {
id: session_id,
model_provider: Some("provider-a".to_string()),
..Default::default()
},
git: None,
}),
RolloutItem::TurnContext(TurnContextItem {
cwd: PathBuf::from("/tmp/first"),
approval_policy: codex_protocol::protocol::AskForApproval::OnRequest,
sandbox_policy: codex_protocol::protocol::SandboxPolicy::ReadOnly,
model: "model-first".to_string(),
personality: None,
collaboration_mode: None,
effort: None,
summary: codex_protocol::config_types::ReasoningSummary::Auto,
user_instructions: None,
developer_instructions: None,
final_output_json_schema: None,
truncation_policy: None,
}),
RolloutItem::TurnContext(TurnContextItem {
cwd: PathBuf::from("/tmp/second"),
approval_policy: codex_protocol::protocol::AskForApproval::Never,
sandbox_policy: codex_protocol::protocol::SandboxPolicy::DangerFullAccess,
model: "model-second".to_string(),
personality: Some(Personality::Friendly),
collaboration_mode: None,
effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
summary: codex_protocol::config_types::ReasoningSummary::Detailed,
user_instructions: None,
developer_instructions: Some("dev notes".to_string()),
final_output_json_schema: None,
truncation_policy: None,
}),
]);
let context = last_turn_context_from_history(&history).expect("last turn context");
assert_eq!(context.model, "model-second");
assert_eq!(context.cwd, PathBuf::from("/tmp/second"));
assert_eq!(
history_model_provider(&history),
Some("provider-a".to_string())
);
Ok(())
}
#[test]
fn sandbox_mode_from_turn_context_returns_none_for_external_sandbox() {
let context = TurnContextItem {
cwd: PathBuf::from("/tmp"),
approval_policy: codex_protocol::protocol::AskForApproval::OnRequest,
sandbox_policy: codex_protocol::protocol::SandboxPolicy::ExternalSandbox {
network_access: codex_protocol::protocol::NetworkAccess::Enabled,
},
model: "model".to_string(),
personality: None,
collaboration_mode: None,
effort: None,
summary: codex_protocol::config_types::ReasoningSummary::Auto,
user_instructions: None,
developer_instructions: None,
final_output_json_schema: None,
truncation_policy: None,
};
assert_eq!(sandbox_mode_from_turn_context(&context), None);
}
#[test]
fn extract_conversation_summary_prefers_plain_user_messages() -> Result<()> {
let conversation_id = ThreadId::from_string("3f941c35-29b3-493b-b0a4-e25800d9aeb0")?;

View File

@@ -7,6 +7,7 @@ use app_test_support::to_response;
use chrono::Utc;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxPolicy;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadResumeParams;
@@ -16,9 +17,11 @@ use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::config_types::Personality;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use core_test_support::responses;
@@ -261,6 +264,184 @@ async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Re
Ok(())
}
#[tokio::test]
async fn thread_resume_without_turn_context_falls_back_to_current_config_defaults() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
"Saved user message",
Vec::new(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: conversation_id,
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse {
model,
model_provider,
..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(model, "gpt-5.2-codex");
assert_eq!(model_provider, "mock_provider");
Ok(())
}
#[tokio::test]
async fn thread_resume_defaults_to_last_turn_context_and_preserves_override_precedence()
-> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let resumed_cwd = codex_home.path().join("resume-overrides");
std::fs::create_dir_all(&resumed_cwd)?;
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![UserInput::Text {
text: "Set custom turn context".to_string(),
text_elements: Vec::new(),
}],
cwd: Some(resumed_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::OnRequest),
sandbox_policy: Some(SandboxPolicy::WorkspaceWrite {
writable_roots: vec![resumed_cwd.clone().try_into()?],
network_access: true,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
}),
model: Some("mock-model-override".to_string()),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
personality: Some(Personality::Friendly),
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let resume = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(resume.model, "mock-model-override");
assert_eq!(resume.model_provider, "mock_provider");
assert_eq!(resume.cwd, resumed_cwd);
assert_eq!(
resume.approval_policy,
codex_app_server_protocol::AskForApproval::OnRequest
);
assert_eq!(
resume.sandbox,
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![resume.cwd.clone().try_into()?],
network_access: true,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
}
);
assert_eq!(resume.reasoning_effort, Some(ReasoningEffort::High));
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![UserInput::Text {
text: "No turn overrides".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let resume_override_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id,
model: Some("forced-model".to_string()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox: Some(codex_app_server_protocol::SandboxMode::ReadOnly),
..Default::default()
})
.await?;
let resume_override_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_override_id)),
)
.await??;
let resume_override = to_response::<ThreadResumeResponse>(resume_override_resp)?;
assert_eq!(resume_override.model, "forced-model");
assert_eq!(
resume_override.approval_policy,
codex_app_server_protocol::AskForApproval::Never
);
assert_eq!(resume_override.sandbox, SandboxPolicy::ReadOnly);
Ok(())
}
#[tokio::test]
async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;