Compare commits

..

1 Commits

Author SHA1 Message Date
rka-oai
c957b4bef1 Enhance migrate command 2025-11-05 23:13:00 -08:00
24 changed files with 410 additions and 1093 deletions

View File

@@ -133,18 +133,6 @@ client_request_definitions! {
params: v2::ThreadCompactParams,
response: v2::ThreadCompactResponse,
},
#[serde(rename = "turn/start")]
#[ts(rename = "turn/start")]
TurnStart {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
},
#[serde(rename = "turn/interrupt")]
#[ts(rename = "turn/interrupt")]
TurnInterrupt {
params: v2::TurnInterruptParams,
response: v2::TurnInterruptResponse,
},
#[serde(rename = "model/list")]
#[ts(rename = "model/list")]

View File

@@ -5,10 +5,8 @@ use crate::protocol::common::AuthMode;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::user_input::UserInput as CoreUserInput;
use mcp_types::ContentBlock as McpContentBlock;
use schemars::JsonSchema;
use serde::Deserialize;
@@ -367,13 +365,6 @@ pub struct Turn {
pub error: Option<TurnError>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -384,51 +375,15 @@ pub enum TurnStatus {
InProgress,
}
// Turn APIs
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartParams {
pub thread_id: String,
pub input: Vec<UserInput>,
/// Override the working directory for this turn and subsequent turns.
pub cwd: Option<PathBuf>,
/// Override the approval policy for this turn and subsequent turns.
pub approval_policy: Option<AskForApproval>,
/// Override the sandbox policy for this turn and subsequent turns.
pub sandbox_policy: Option<SandboxPolicy>,
/// Override the model for this turn and subsequent turns.
pub model: Option<String>,
/// Override the reasoning effort for this turn and subsequent turns.
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartResponse {
pub turn: Turn,
pub struct TurnError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptParams {
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptResponse {}
// User input types
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum UserInput {
Text { text: String },
@@ -436,19 +391,8 @@ pub enum UserInput {
LocalImage { path: PathBuf },
}
impl UserInput {
pub fn into_core(self) -> CoreUserInput {
match self {
UserInput::Text { text } => CoreUserInput::Text { text },
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum ThreadItem {
UserMessage {
@@ -572,7 +516,7 @@ pub struct TodoItem {
}
// === Server Notifications ===
// Thread/Turn lifecycle notifications and item progress events
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -601,7 +545,6 @@ pub struct Usage {
#[ts(export_to = "v2/")]
pub struct TurnCompletedNotification {
pub turn: Turn,
// TODO: should usage be stored on the Turn object, and we return that instead?
pub usage: Usage,
}
@@ -619,7 +562,6 @@ pub struct ItemCompletedNotification {
pub item: ThreadItem,
}
// Item-specific progress notifications
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]

View File

@@ -13,7 +13,6 @@ use codex_app_server_protocol::ApplyPatchApprovalParams;
use codex_app_server_protocol::ApplyPatchApprovalResponse;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::ArchiveConversationResponse;
use codex_app_server_protocol::AskForApproval;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::AuthStatusChangeNotification;
use codex_app_server_protocol::CancelLoginAccountParams;
@@ -30,8 +29,6 @@ use codex_app_server_protocol::FeedbackUploadResponse;
use codex_app_server_protocol::FuzzyFileSearchParams;
use codex_app_server_protocol::FuzzyFileSearchResponse;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::GetAuthStatusResponse;
use codex_app_server_protocol::GetConversationSummaryParams;
use codex_app_server_protocol::GetConversationSummaryResponse;
use codex_app_server_protocol::GetUserAgentResponse;
@@ -48,8 +45,6 @@ use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::LogoutChatGptResponse;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::NewConversationParams;
@@ -59,8 +54,6 @@ use codex_app_server_protocol::RemoveConversationSubscriptionResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::Result as JsonRpcResult;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::ResumeConversationResponse;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_app_server_protocol::SendUserTurnParams;
@@ -73,7 +66,6 @@ use codex_app_server_protocol::SetDefaultModelResponse;
use codex_app_server_protocol::Thread;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadListResponse;
use codex_app_server_protocol::ThreadResumeParams;
@@ -81,15 +73,7 @@ use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_app_server_protocol::Turn;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInfoResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_app_server_protocol::UserSavedConfig;
use codex_backend_client::Client as BackendClient;
use codex_core::AuthManager;
@@ -152,9 +136,6 @@ use tracing::info;
use tracing::warn;
use uuid::Uuid;
type PendingInterruptQueue = Vec<(RequestId, ApiVersion)>;
type PendingInterrupts = Arc<Mutex<HashMap<ConversationId, PendingInterruptQueue>>>;
// Duration before a ChatGPT login attempt is abandoned.
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
struct ActiveLogin {
@@ -178,17 +159,11 @@ pub(crate) struct CodexMessageProcessor {
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
active_login: Arc<Mutex<Option<ActiveLogin>>>,
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
pending_interrupts: PendingInterrupts,
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
feedback: CodexFeedback,
}
#[derive(Clone, Copy, Debug)]
enum ApiVersion {
V1,
V2,
}
impl CodexMessageProcessor {
pub fn new(
auth_manager: Arc<AuthManager>,
@@ -217,7 +192,7 @@ impl CodexMessageProcessor {
ClientRequest::Initialize { .. } => {
panic!("Initialize should be handled in MessageProcessor");
}
// === v2 Thread/Turn APIs ===
// === v2 Thread APIs ===
ClientRequest::ThreadStart { request_id, params } => {
self.thread_start(request_id, params).await;
}
@@ -237,12 +212,6 @@ impl CodexMessageProcessor {
self.send_unimplemented_error(request_id, "thread/compact")
.await;
}
ClientRequest::TurnStart { request_id, params } => {
self.turn_start(request_id, params).await;
}
ClientRequest::TurnInterrupt { request_id, params } => {
self.turn_interrupt(request_id, params).await;
}
ClientRequest::NewConversation { request_id, params } => {
// Do not tokio::spawn() to process new_conversation()
// asynchronously because we need to ensure the conversation is
@@ -762,7 +731,10 @@ impl CodexMessageProcessor {
match self.logout_common().await {
Ok(current_auth_method) => {
self.outgoing
.send_response(request_id, LogoutChatGptResponse {})
.send_response(
request_id,
codex_app_server_protocol::LogoutChatGptResponse {},
)
.await;
let payload = AuthStatusChangeNotification {
@@ -782,7 +754,10 @@ impl CodexMessageProcessor {
match self.logout_common().await {
Ok(current_auth_method) => {
self.outgoing
.send_response(request_id, LogoutAccountResponse {})
.send_response(
request_id,
codex_app_server_protocol::LogoutAccountResponse {},
)
.await;
let payload_v2 = AccountUpdatedNotification {
@@ -798,7 +773,11 @@ impl CodexMessageProcessor {
}
}
async fn get_auth_status(&self, request_id: RequestId, params: GetAuthStatusParams) {
async fn get_auth_status(
&self,
request_id: RequestId,
params: codex_app_server_protocol::GetAuthStatusParams,
) {
let include_token = params.include_token.unwrap_or(false);
let do_refresh = params.refresh_token.unwrap_or(false);
@@ -812,7 +791,7 @@ impl CodexMessageProcessor {
let requires_openai_auth = self.config.model_provider.requires_openai_auth;
let response = if !requires_openai_auth {
GetAuthStatusResponse {
codex_app_server_protocol::GetAuthStatusResponse {
auth_method: None,
auth_token: None,
requires_openai_auth: Some(false),
@@ -832,13 +811,13 @@ impl CodexMessageProcessor {
(None, None)
}
};
GetAuthStatusResponse {
codex_app_server_protocol::GetAuthStatusResponse {
auth_method: reported_auth_method,
auth_token: token_opt,
requires_openai_auth: Some(true),
}
}
None => GetAuthStatusResponse {
None => codex_app_server_protocol::GetAuthStatusResponse {
auth_method: None,
auth_token: None,
requires_openai_auth: Some(true),
@@ -1122,8 +1101,12 @@ impl CodexMessageProcessor {
let overrides = ConfigOverrides {
model: params.model,
cwd: params.cwd.map(PathBuf::from),
approval_policy: params.approval_policy.map(AskForApproval::to_core),
sandbox_mode: params.sandbox.map(SandboxMode::to_core),
approval_policy: params
.approval_policy
.map(codex_app_server_protocol::AskForApproval::to_core),
sandbox_mode: params
.sandbox
.map(codex_app_server_protocol::SandboxMode::to_core),
model_provider: params.model_provider,
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
base_instructions: params.base_instructions,
@@ -1448,22 +1431,60 @@ impl CodexMessageProcessor {
let ListConversationsParams {
page_size,
cursor,
model_providers,
model_providers: model_provider,
} = params;
let page_size = page_size.unwrap_or(25).max(1);
match self
.list_conversations_common(page_size, cursor, model_providers)
.await
{
Ok((items, next_cursor)) => {
let response = ListConversationsResponse { items, next_cursor };
self.outgoing.send_response(request_id, response).await;
let page_size = page_size.unwrap_or(25);
let cursor_obj: Option<RolloutCursor> = cursor.as_ref().and_then(|s| parse_cursor(s));
let cursor_ref = cursor_obj.as_ref();
let model_provider_filter = match model_provider {
Some(providers) => {
if providers.is_empty() {
None
} else {
Some(providers)
}
}
Err(error) => {
None => Some(vec![self.config.model_provider_id.clone()]),
};
let model_provider_slice = model_provider_filter.as_deref();
let fallback_provider = self.config.model_provider_id.clone();
let page = match RolloutRecorder::list_conversations(
&self.config.codex_home,
page_size,
cursor_ref,
INTERACTIVE_SESSION_SOURCES,
model_provider_slice,
fallback_provider.as_str(),
)
.await
{
Ok(p) => p,
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to list conversations: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let items = page
.items
.into_iter()
.filter_map(|it| extract_conversation_summary(it.path, &it.head, &fallback_provider))
.collect();
// Encode next_cursor as a plain string
let next_cursor = page
.next_cursor
.and_then(|cursor| serde_json::to_value(&cursor).ok())
.and_then(|value| value.as_str().map(str::to_owned));
let response = ListConversationsResponse { items, next_cursor };
self.outgoing.send_response(request_id, response).await;
}
async fn list_conversations_common(
@@ -1513,7 +1534,6 @@ impl CodexMessageProcessor {
.filter_map(|it| extract_conversation_summary(it.path, &it.head, &fallback_provider))
.collect::<Vec<_>>();
// Encode next_cursor as a plain string
let next_cursor = page
.next_cursor
.and_then(|cursor| serde_json::to_value(&cursor).ok())
@@ -1524,8 +1544,7 @@ impl CodexMessageProcessor {
async fn list_models(&self, request_id: RequestId, params: ModelListParams) {
let ModelListParams { limit, cursor } = params;
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let models = supported_models(auth_mode);
let models = supported_models();
let total = models.len();
if total == 0 {
@@ -1537,8 +1556,8 @@ impl CodexMessageProcessor {
return;
}
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
let effective_limit = effective_limit.min(total);
let effective_page_size = limit.unwrap_or(total as u32).max(1) as usize;
let effective_page_size = effective_page_size.min(total);
let start = match cursor {
Some(cursor) => match cursor.parse::<usize>() {
Ok(idx) => idx,
@@ -1565,7 +1584,7 @@ impl CodexMessageProcessor {
return;
}
let end = start.saturating_add(effective_limit).min(total);
let end = start.saturating_add(effective_page_size).min(total);
let items = models[start..end].to_vec();
let next_cursor = if end < total {
Some(end.to_string())
@@ -1600,7 +1619,7 @@ impl CodexMessageProcessor {
profile,
cwd,
approval_policy,
sandbox: sandbox_mode,
sandbox,
config: cli_overrides,
base_instructions,
developer_instructions,
@@ -1613,7 +1632,7 @@ impl CodexMessageProcessor {
config_profile: profile,
cwd: cwd.map(PathBuf::from),
approval_policy,
sandbox_mode,
sandbox_mode: sandbox,
model_provider,
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
base_instructions,
@@ -1739,7 +1758,7 @@ impl CodexMessageProcessor {
.map(|msgs| msgs.into_iter().collect());
// Reply with conversation id + model and initial messages (when present)
let response = ResumeConversationResponse {
let response = codex_app_server_protocol::ResumeConversationResponse {
conversation_id,
model: session_configured.model.clone(),
initial_messages,
@@ -2050,151 +2069,7 @@ impl CodexMessageProcessor {
// Record the pending interrupt so we can reply when TurnAborted arrives.
{
let mut map = self.pending_interrupts.lock().await;
map.entry(conversation_id)
.or_default()
.push((request_id, ApiVersion::V1));
}
// Submit the interrupt; we'll respond upon TurnAborted.
let _ = conversation.submit(Op::Interrupt).await;
}
async fn turn_start(&self, request_id: RequestId, params: TurnStartParams) {
// Resolve conversation id from v2 thread id string.
let conversation_id = match ConversationId::from_string(&params.thread_id) {
Ok(id) => id,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid thread id: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let Ok(conversation) = self
.conversation_manager
.get_conversation(conversation_id)
.await
else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("conversation not found: {conversation_id}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
};
// Keep a copy of v2 inputs for the notification payload.
let v2_inputs_for_notif = params.input.clone();
// Map v2 input items to core input items.
let mapped_items: Vec<CoreInputItem> = params
.input
.into_iter()
.map(V2UserInput::into_core)
.collect();
let has_any_overrides = params.cwd.is_some()
|| params.approval_policy.is_some()
|| params.sandbox_policy.is_some()
|| params.model.is_some()
|| params.effort.is_some()
|| params.summary.is_some();
// If any overrides are provided, update the session turn context first.
if has_any_overrides {
let _ = conversation
.submit(Op::OverrideTurnContext {
cwd: params.cwd,
approval_policy: params.approval_policy.map(AskForApproval::to_core),
sandbox_policy: params.sandbox_policy.map(|p| p.to_core()),
model: params.model,
effort: params.effort.map(Some),
summary: params.summary,
})
.await;
}
// Start the turn by submitting the user input. Return its submission id as turn_id.
let turn_id = conversation
.submit(Op::UserInput {
items: mapped_items,
})
.await;
match turn_id {
Ok(turn_id) => {
let turn = Turn {
id: turn_id.clone(),
items: vec![ThreadItem::UserMessage {
id: turn_id,
content: v2_inputs_for_notif,
}],
status: TurnStatus::InProgress,
error: None,
};
let response = TurnStartResponse { turn: turn.clone() };
self.outgoing.send_response(request_id, response).await;
// Emit v2 turn/started notification.
let notif = TurnStartedNotification { turn };
self.outgoing
.send_server_notification(ServerNotification::TurnStarted(notif))
.await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to start turn: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn turn_interrupt(&mut self, request_id: RequestId, params: TurnInterruptParams) {
let TurnInterruptParams { thread_id, .. } = params;
// Resolve conversation id from v2 thread id string.
let conversation_id = match ConversationId::from_string(&thread_id) {
Ok(id) => id,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid thread id: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let Ok(conversation) = self
.conversation_manager
.get_conversation(conversation_id)
.await
else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("conversation not found: {conversation_id}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
};
// Record the pending interrupt so we can reply when TurnAborted arrives.
{
let mut map = self.pending_interrupts.lock().await;
map.entry(conversation_id)
.or_default()
.push((request_id, ApiVersion::V2));
map.entry(conversation_id).or_default().push(request_id);
}
// Submit the interrupt; we'll respond upon TurnAborted.
@@ -2210,6 +2085,7 @@ impl CodexMessageProcessor {
conversation_id,
experimental_raw_events,
} = params;
match self
.attach_conversation_listener(conversation_id, experimental_raw_events)
.await
@@ -2336,6 +2212,7 @@ impl CodexMessageProcessor {
}
}
});
Ok(subscription_id)
}
@@ -2477,7 +2354,7 @@ async fn apply_bespoke_event_handling(
conversation_id: ConversationId,
conversation: Arc<CodexConversation>,
outgoing: Arc<OutgoingMessageSender>,
pending_interrupts: PendingInterrupts,
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
) {
let Event { id: event_id, msg } = event;
match msg {
@@ -2546,19 +2423,11 @@ async fn apply_bespoke_event_handling(
map.remove(&conversation_id).unwrap_or_default()
};
if !pending.is_empty() {
for (rid, ver) in pending {
match ver {
ApiVersion::V1 => {
let response = InterruptConversationResponse {
abort_reason: turn_aborted_event.reason.clone(),
};
outgoing.send_response(rid, response).await;
}
ApiVersion::V2 => {
let response = TurnInterruptResponse {};
outgoing.send_response(rid, response).await;
}
}
let response = InterruptConversationResponse {
abort_reason: turn_aborted_event.reason,
};
for rid in pending {
outgoing.send_response(rid, response.clone()).await;
}
}
}

View File

@@ -1,12 +1,11 @@
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
.into_iter()
.map(model_from_preset)
.collect()

View File

@@ -22,17 +22,11 @@ use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
@@ -42,8 +36,13 @@ use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use std::process::Command as StdCommand;
use tokio::process::Command;
@@ -250,7 +249,7 @@ impl McpProcess {
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
pub async fn send_upload_feedback_request(
&mut self,
params: FeedbackUploadParams,
) -> anyhow::Result<i64> {
@@ -349,24 +348,6 @@ impl McpProcess {
self.send_request("loginChatGpt", None).await
}
/// Send a `turn/start` JSON-RPC request (v2).
pub async fn send_turn_start_request(
&mut self,
params: TurnStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/start", params).await
}
/// Send a `turn/interrupt` JSON-RPC request (v2).
pub async fn send_turn_interrupt_request(
&mut self,
params: TurnInterruptParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/interrupt", params).await
}
/// Send a `cancelLoginChatGpt` JSON-RPC request.
pub async fn send_cancel_login_chat_gpt_request(
&mut self,

View File

@@ -146,7 +146,7 @@ fn create_config_toml(codex_home: &Path, server_uri: String) -> std::io::Result<
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"

View File

@@ -49,7 +49,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,

View File

@@ -1,7 +1,6 @@
// v2 test suite modules
mod account;
mod thread_archive;
mod thread_list;
mod thread_resume;
mod thread_start;
mod turn_interrupt;
mod turn_start;

View File

@@ -1,128 +0,0 @@
#![cfg(unix)]
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_interrupt_aborts_running_turn() -> Result<()> {
// Use a portable sleep command to keep the turn running.
#[cfg(target_os = "windows")]
let shell_command = vec![
"powershell".to_string(),
"-Command".to_string(),
"Start-Sleep -Seconds 10".to_string(),
];
#[cfg(not(target_os = "windows"))]
let shell_command = vec!["sleep".to_string(), "10".to_string()];
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
// Mock server: long-running shell command then (after abort) nothing else needed.
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
shell_command.clone(),
Some(&working_directory),
Some(10_000),
"call_sleep",
)?])
.await;
create_config_toml(&codex_home, &server.uri())?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a v2 thread and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn that triggers a long-running command.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run sleep".to_string(),
}],
cwd: Some(working_directory.clone()),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Give the command a brief moment to start.
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Interrupt the in-progress turn by id (v2 API).
let interrupt_id = mcp
.send_turn_interrupt_request(TurnInterruptParams {
thread_id: thread.id,
turn_id: turn.id,
})
.await?;
let interrupt_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(interrupt_id)),
)
.await??;
let _resp: TurnInterruptResponse = to_response::<TurnInterruptResponse>(interrupt_resp)?;
// No fields to assert on; successful deserialization confirms proper response shape.
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "workspace-write"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,486 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
// Three Codex turns hit the mock model (session start + two turn/start calls).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread (v2) and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn with only input and thread_id set (no overrides).
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// Expect a turn/started notification.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
let started: TurnStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(
started.turn.status,
codex_app_server_protocol::TurnStatus::InProgress
);
// Send a second turn that exercises the overrides path: change the model.
let turn_req2 = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Second".to_string(),
}],
model: Some("mock-model-override".to_string()),
..Default::default()
})
.await?;
let turn_resp2: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req2)),
)
.await??;
let TurnStartResponse { turn: turn2 } = to_response::<TurnStartResponse>(turn_resp2)?;
assert!(!turn2.id.is_empty());
// Ensure the second turn has a different id than the first.
assert_ne!(turn.id, turn2.id);
// Expect a second turn/started notification as well.
let _notif2: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
// And we should ultimately get a task_complete without having to add a
// legacy conversation listener explicitly (auto-attached by thread/start).
let _task_complete: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
// Use the unchecked variant because the request payload includes a LocalImage
// which the strict matcher does not currently cover.
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
let image_path = codex_home.path().join("image.png");
// No need to actually write the file; we just exercise the input path.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::LocalImage { path: image_path }],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// This test only validates that turn/start responds and returns a turn.
Ok(())
}
#[tokio::test]
async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().to_path_buf();
// Mock server: first turn requests a shell call (elicitation), then completes.
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
let responses = vec![
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call1",
)?,
create_final_assistant_message_sse_response("done 1")?,
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call2",
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_chat_completions_server(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// turn/start — expect ExecCommandApproval request from server
let first_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python".to_string(),
}],
..Default::default()
})
.await?;
// Acknowledge RPC
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
)
.await??;
// Receive elicitation
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ExecCommandApproval { request_id, params } = server_req else {
panic!("expected ExecCommandApproval request");
};
assert_eq!(params.call_id, "call1");
assert_eq!(
params.parsed_cmd,
vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}]
);
// Approve and wait for task completion
mcp.send_response(
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// Second turn with approval_policy=never should not elicit approval
let second_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python again".to_string(),
}],
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
)
.await??;
// Ensure we do NOT receive an ExecCommandApproval request before task completes
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
// When returning Result from a test, pass an Ok(()) to the skip macro
// so the early return type matches. The no-arg form returns unit.
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let workspace_root = tmp.path().join("workspace");
std::fs::create_dir(&workspace_root)?;
let first_cwd = workspace_root.join("turn1");
let second_cwd = workspace_root.join("turn2");
std::fs::create_dir(&first_cwd)?;
std::fs::create_dir(&second_cwd)?;
let responses = vec![
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo first turn".to_string(),
],
None,
Some(5000),
"call-first",
)?,
create_final_assistant_message_sse_response("done first")?,
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string(),
],
None,
Some(5000),
"call-second",
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// first turn with workspace-write sandbox and first_cwd
let first_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "first turn".to_string(),
}],
cwd: Some(first_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
}),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// second turn with workspace-write and second_cwd, ensure exec begins in second_cwd
let second_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "second turn".to_string(),
}],
cwd: Some(second_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn)),
)
.await??;
let exec_begin_notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
)
.await??;
let params = exec_begin_notification
.params
.clone()
.expect("exec_command_begin params");
let event: Event = serde_json::from_value(params).expect("deserialize exec begin event");
let exec_begin = match event.msg {
EventMsg::ExecCommandBegin(exec_begin) => exec_begin,
other => panic!("expected ExecCommandBegin event, got {other:?}"),
};
assert_eq!(exec_begin.cwd, second_cwd);
assert_eq!(
exec_begin.command,
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string()
]
);
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "{approval_policy}"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -34,7 +34,7 @@ const PRESETS: &[ModelPreset] = &[
id: "gpt-5-codex",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for codex.",
description: "Optimized for coding tasks with many tools.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
@@ -52,24 +52,6 @@ const PRESETS: &[ModelPreset] = &[
],
is_default: true,
},
ModelPreset {
id: "gpt-5-codex-mini",
model: "gpt-5-codex-mini",
display_name: "gpt-5-codex-mini",
description: "Optimized for codex. Cheaper, faster, but less capable.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
},
ModelPreset {
id: "gpt-5",
model: "gpt-5",
@@ -98,13 +80,8 @@ const PRESETS: &[ModelPreset] = &[
},
];
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
let allow_codex_mini = matches!(auth_mode, Some(AuthMode::ChatGPT));
PRESETS
.iter()
.filter(|preset| allow_codex_mini || preset.id != "gpt-5-codex-mini")
.copied()
.collect()
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
}
#[cfg(test)]

View File

@@ -680,33 +680,6 @@ fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> {
headers.get(name)?.to_str().ok()
}
async fn emit_completed(
tx_event: &mpsc::Sender<Result<ResponseEvent>>,
otel_event_manager: &OtelEventManager,
completed: ResponseCompleted,
) {
if let Some(token_usage) = &completed.usage {
otel_event_manager.sse_event_completed(
token_usage.input_tokens,
token_usage.output_tokens,
token_usage
.input_tokens_details
.as_ref()
.map(|d| d.cached_tokens),
token_usage
.output_tokens_details
.as_ref()
.map(|d| d.reasoning_tokens),
token_usage.total_tokens,
);
}
let event = ResponseEvent::Completed {
response_id: completed.id.clone(),
token_usage: completed.usage.map(Into::into),
};
let _ = tx_event.send(Ok(event)).await;
}
async fn process_sse<S>(
stream: S,
tx_event: mpsc::Sender<Result<ResponseEvent>>,
@@ -719,7 +692,7 @@ async fn process_sse<S>(
// If the stream stays completely silent for an extended period treat it as disconnected.
// The response id returned from the "complete" message.
let response_completed: Option<ResponseCompleted> = None;
let mut response_completed: Option<ResponseCompleted> = None;
let mut response_error: Option<CodexErr> = None;
loop {
@@ -738,8 +711,30 @@ async fn process_sse<S>(
}
Ok(None) => {
match response_completed {
Some(completed) => {
emit_completed(&tx_event, &otel_event_manager, completed).await
Some(ResponseCompleted {
id: response_id,
usage,
}) => {
if let Some(token_usage) = &usage {
otel_event_manager.sse_event_completed(
token_usage.input_tokens,
token_usage.output_tokens,
token_usage
.input_tokens_details
.as_ref()
.map(|d| d.cached_tokens),
token_usage
.output_tokens_details
.as_ref()
.map(|d| d.reasoning_tokens),
token_usage.total_tokens,
);
}
let event = ResponseEvent::Completed {
response_id,
token_usage: usage.map(Into::into),
};
let _ = tx_event.send(Ok(event)).await;
}
None => {
let error = response_error.unwrap_or(CodexErr::Stream(
@@ -869,8 +864,7 @@ async fn process_sse<S>(
if let Some(resp_val) = event.response {
match serde_json::from_value::<ResponseCompleted>(resp_val) {
Ok(r) => {
emit_completed(&tx_event, &otel_event_manager, r).await;
return;
response_completed = Some(r);
}
Err(e) => {
let error = format!("failed to parse ResponseCompleted: {e}");

View File

@@ -0,0 +1,16 @@
# Migration journal {{MIGRATION_SUMMARY}}
> Workspace: `{{WORKSPACE_NAME}}`
> Created: {{CREATED_AT}}
Use this log for async updates, agent hand-offs, and to publish what was learned during each workstream. Keep entries concise and focused on signals other collaborators need.
## Logging guidance
- Start each entry with a timestamp and author/agent/workstream name.
- Capture what changed, how it was validated, links to diffs/tests, and any open questions.
- Highlight blockers, decisions needed, or knowledge that other agents should adopt.
- Update the plan (`plan.md`) when scope changes; use this journal for progress + lessons.
| Timestamp | Agent / Workstream | Update / Learnings | Blockers & Risks | Next action / owner |
| --------- | ------------------ | ------------------ | ---------------- | ------------------- |
| | | | | |

View File

@@ -0,0 +1,35 @@
# Migration plan {{MIGRATION_SUMMARY}}
> Workspace: `{{WORKSPACE_NAME}}`
> Generated: {{CREATED_AT}}
Use this document as the single source of truth for the migration effort. Keep it updated so any engineer (or agent) can jump in mid-flight.
## 1. Context & stakes
- Current state snapshot
- Target end state and deadline/launch windows
- Guardrails, SLAs, compliance/regulatory constraints
## 2. Incremental plan (numbered)
1. `[Step name]` — Purpose, scope, primary owner/skillset, upstream/downstream dependencies, validation & rollback signals.
2. `…`
Each step must explain:
- Preconditions & artifacts required before starting
- Specific code/data/infrastructure changes
- Telemetry, tests, or dry-runs that prove success
## 3. Parallel workstreams
| Workstream | Objective | Inputs & dependencies | Ownership / skills | Progress & telemetry hooks |
| ---------- | --------- | --------------------- | ------------------ | ------------------------- |
| _Fill in during planning_ | | | | |
## 4. Data + rollout considerations
- Data migration / backfill plan
- Environment readiness, feature flags, or config toggles
- Rollout plan (phases, smoke tests, canaries) and explicit rollback/kill-switch criteria
## 5. Risks, decisions, and follow-ups
- Top risks with mitigation owners
- Open questions / decisions with DRI and due date
- Handoff expectations (reference `journal.md` for ongoing updates)

View File

@@ -0,0 +1,25 @@
You are the migration showrunner orchestrating "{{MIGRATION_SUMMARY}}".
Workspace root: {{WORKSPACE_PATH}}
Plan document to maintain: {{PLAN_PATH}}
Shared journal for progress + learnings: {{JOURNAL_PATH}}
Before writing any code, study the repository layout, dependencies, release process, deployment gates, data stores, and cross-team stakeholders.
Deliverables:
1. Start with a concise **Executive Overview** summarizing the target state, key risks, and the biggest unknowns.
2. Provide an **Incremental plan** as a numbered list (1., 2., 3., …). Each item must include:
- Objective and concrete changes required.
- Dependencies or prerequisites, including approvals and data/state migrations.
- Ownership expectations (skillsets/teams) and automation hooks.
- Validation signals, telemetry, and rollback/kill-switch instructions.
3. Add a **Parallel workstreams** table. Group tasks that can run concurrently, show how learnings are exchanged, and specify when progress should be published to {{JOURNAL_PATH}}.
4. Capture **Coordination & learning loops**: how agents collaborate, what artifacts live in {{PLAN_PATH}} vs. {{JOURNAL_PATH}}, and how to keep successors unblocked.
5. Outline a **Risk / data / rollout** section covering backfills, environment sequencing, feature flags, monitoring, and fallback criteria.
General guidance:
- Call out missing information explicitly and request the files, owners, or metrics needed to proceed.
- Highlight dependencies on other teams, scheduled freezes, or compliance gates.
- Emphasize opportunities for automation, reuse of tooling, and knowledge sharing so multiple agents can contribute safely.
After sharing the plan in chat, mirror the same structure into {{PLAN_PATH}} (using apply_patch or editing tools) so it remains the canonical artifact. Encourage collaborators to keep {{JOURNAL_PATH}} updated with progress, blockers, and learnings.

View File

@@ -450,6 +450,9 @@ impl App {
AppEvent::OpenReviewCustomPrompt => {
self.chat_widget.show_review_custom_prompt();
}
AppEvent::StartMigration { summary } => {
self.chat_widget.start_migration(summary);
}
AppEvent::FullScreenApprovalRequest(request) => match request {
ApprovalRequest::ApplyPatch { cwd, changes, .. } => {
let _ = tui.enter_alt_screen();

View File

@@ -102,6 +102,11 @@ pub(crate) enum AppEvent {
/// Open the custom prompt option from the review popup.
OpenReviewCustomPrompt,
/// Kick off the `/migrate` workflow after the user names the migration.
StartMigration {
summary: String,
},
/// Open the approval popup.
FullScreenApprovalRequest(ApprovalRequest),

View File

@@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fs;
use std::path::PathBuf;
use std::sync::Arc;
@@ -120,10 +121,14 @@ use codex_core::protocol::SandboxPolicy;
use codex_core::protocol_config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_file_search::FileMatch;
use codex_protocol::plan_tool::UpdatePlanArgs;
use pathdiff::diff_paths;
use strum::IntoEnumIterator;
const USER_SHELL_COMMAND_HELP_TITLE: &str = "Prefix a command with ! to run it locally";
const USER_SHELL_COMMAND_HELP_HINT: &str = "Example: !ls";
const MIGRATION_PROMPT_TEMPLATE: &str = include_str!("../prompt_for_migrate_command.md");
const MIGRATION_PLAN_TEMPLATE: &str = include_str!("../migration_plan_template.md");
const MIGRATION_JOURNAL_TEMPLATE: &str = include_str!("../migration_journal_template.md");
// Track information about an in-flight exec command.
struct RunningCommand {
command: Vec<String>,
@@ -132,7 +137,7 @@ struct RunningCommand {
}
const RATE_LIMIT_WARNING_THRESHOLDS: [f64; 3] = [75.0, 90.0, 95.0];
const NUDGE_MODEL_SLUG: &str = "gpt-5-codex-mini";
const NUDGE_MODEL_SLUG: &str = "gpt-5-codex";
const RATE_LIMIT_SWITCH_PROMPT_THRESHOLD: f64 = 90.0;
#[derive(Default)]
@@ -1234,6 +1239,9 @@ impl ChatWidget {
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
self.submit_user_message(INIT_PROMPT.to_string().into());
}
SlashCommand::Migrate => {
self.open_migrate_prompt();
}
SlashCommand::Compact => {
self.clear_token_usage();
self.app_event_tx.send(AppEvent::CodexOp(Op::Compact));
@@ -1773,7 +1781,9 @@ impl ChatWidget {
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Approaching rate limits".to_string()),
subtitle: Some(format!("Switch to {display_name} for lower credit usage?")),
subtitle: Some(format!(
"You've used over 90% of your limit. Switch to {display_name} for lower credit usage?"
)),
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
@@ -1796,7 +1806,6 @@ impl ChatWidget {
};
let is_current = preset.model == current_model;
let preset_for_action = preset;
let single_supported_effort = preset_for_action.supported_reasoning_efforts.len() == 1;
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::OpenReasoningPopup {
model: preset_for_action,
@@ -1807,7 +1816,7 @@ impl ChatWidget {
description,
is_current,
actions,
dismiss_on_select: single_supported_effort,
dismiss_on_select: false,
..Default::default()
});
}
@@ -1846,15 +1855,6 @@ impl ChatWidget {
});
}
if choices.len() == 1 {
if let Some(effort) = choices.first().and_then(|c| c.stored) {
self.apply_model_and_effort(preset.model.to_string(), Some(effort));
} else {
self.apply_model_and_effort(preset.model.to_string(), None);
}
return;
}
let default_choice: Option<ReasoningEffortConfig> = choices
.iter()
.any(|choice| choice.stored == Some(default_effort))
@@ -1893,7 +1893,7 @@ impl ChatWidget {
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
let show_warning =
preset.model.starts_with("gpt-5-codex") && effort == ReasoningEffortConfig::High;
preset.model == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
let selected_description = show_warning.then(|| {
description
.as_ref()
@@ -1950,32 +1950,6 @@ impl ChatWidget {
});
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(model.clone()),
effort: Some(effort),
summary: None,
}));
self.app_event_tx.send(AppEvent::UpdateModel(model.clone()));
self.app_event_tx
.send(AppEvent::UpdateReasoningEffort(effort));
self.app_event_tx.send(AppEvent::PersistModelSelection {
model: model.clone(),
effort,
});
tracing::info!(
"Selected model: {}, Selected effort: {}",
model,
effort
.map(|e| e.to_string())
.unwrap_or_else(|| "default".to_string())
);
}
/// Open a popup to choose the approvals mode (ask for approval policy + sandbox policy).
pub(crate) fn open_approvals_popup(&mut self) {
let current_approval = self.config.approval_policy;
@@ -2456,6 +2430,132 @@ impl ChatWidget {
self.bottom_pane.show_view(Box::new(view));
}
pub(crate) fn start_migration(&mut self, summary: String) {
let summary = summary.trim();
if summary.is_empty() {
return;
}
match self.create_migration_workspace(summary) {
Ok(workspace) => {
let dir_display = self.relative_display_path(&workspace.dir_path);
let plan_display = self.relative_display_path(&workspace.plan_path);
let journal_display = self.relative_display_path(&workspace.journal_path);
let prompt = self.build_migration_prompt(
summary,
&dir_display,
&plan_display,
&journal_display,
);
let hint = format!("Plan: {plan_display}\nJournal: {journal_display}",);
self.add_info_message(
format!(
"Created migration workspace `{}` at {dir_display}.",
workspace.dir_name
),
Some(hint),
);
self.submit_user_message(prompt.into());
}
Err(err) => {
tracing::error!("failed to prepare migration workspace: {err}");
self.add_error_message(format!("Failed to prepare migration workspace: {err}"));
}
}
}
fn open_migrate_prompt(&mut self) {
let tx = self.app_event_tx.clone();
let view = CustomPromptView::new(
"Describe the migration".to_string(),
"Example: Phase 2 move billing to Postgres".to_string(),
Some(
"We'll create migration_<slug> with plan.md and journal.md once you press Enter."
.to_string(),
),
Box::new(move |prompt: String| {
let trimmed = prompt.trim().to_string();
if trimmed.is_empty() {
return;
}
tx.send(AppEvent::StartMigration { summary: trimmed });
}),
);
self.bottom_pane.show_view(Box::new(view));
}
fn create_migration_workspace(
&self,
summary: &str,
) -> Result<MigrationWorkspace, std::io::Error> {
let slug = sanitize_migration_slug(summary);
let base_name = format!("migration_{slug}");
let (dir_path, dir_name) = self.next_available_migration_dir(&base_name);
fs::create_dir_all(&dir_path)?;
let created_at = Local::now().format("%Y-%m-%d %H:%M %Z").to_string();
let plan_path = dir_path.join("plan.md");
let journal_path = dir_path.join("journal.md");
let plan_contents = fill_template(
MIGRATION_PLAN_TEMPLATE,
&[
("{{MIGRATION_SUMMARY}}", summary),
("{{WORKSPACE_NAME}}", dir_name.as_str()),
("{{CREATED_AT}}", created_at.as_str()),
],
);
let journal_contents = fill_template(
MIGRATION_JOURNAL_TEMPLATE,
&[
("{{MIGRATION_SUMMARY}}", summary),
("{{WORKSPACE_NAME}}", dir_name.as_str()),
("{{CREATED_AT}}", created_at.as_str()),
],
);
fs::write(&plan_path, plan_contents)?;
fs::write(&journal_path, journal_contents)?;
Ok(MigrationWorkspace {
dir_path,
dir_name,
plan_path,
journal_path,
})
}
fn build_migration_prompt(
&self,
summary: &str,
dir_display: &str,
plan_display: &str,
journal_display: &str,
) -> String {
let replacements = [
("{{MIGRATION_SUMMARY}}", summary),
("{{WORKSPACE_PATH}}", dir_display),
("{{PLAN_PATH}}", plan_display),
("{{JOURNAL_PATH}}", journal_display),
];
fill_template(MIGRATION_PROMPT_TEMPLATE, &replacements)
}
fn next_available_migration_dir(&self, base_name: &str) -> (PathBuf, String) {
let mut candidate_name = base_name.to_string();
let mut candidate_path = self.config.cwd.join(&candidate_name);
let mut suffix = 2;
while candidate_path.exists() {
candidate_name = format!("{base_name}_{suffix:02}");
candidate_path = self.config.cwd.join(&candidate_name);
suffix += 1;
}
(candidate_path, candidate_name)
}
fn relative_display_path(&self, path: &Path) -> String {
diff_paths(path, &self.config.cwd)
.unwrap_or_else(|| path.to_path_buf())
.display()
.to_string()
}
pub(crate) fn token_usage(&self) -> TokenUsage {
self.token_info
.as_ref()
@@ -2614,6 +2714,48 @@ fn extract_first_bold(s: &str) -> Option<String> {
None
}
struct MigrationWorkspace {
dir_path: PathBuf,
dir_name: String,
plan_path: PathBuf,
journal_path: PathBuf,
}
fn fill_template(template: &str, replacements: &[(&str, &str)]) -> String {
let mut filled = template.to_string();
for (needle, value) in replacements {
filled = filled.replace(needle, value);
}
filled
}
fn sanitize_migration_slug(summary: &str) -> String {
let mut slug = String::new();
let mut last_was_dash = true;
for ch in summary.trim().to_lowercase().chars() {
if ch.is_ascii_alphanumeric() {
slug.push(ch);
last_was_dash = false;
} else if !last_was_dash {
slug.push('-');
last_was_dash = true;
}
}
let mut trimmed = slug.trim_matches('-').to_string();
if trimmed.len() > 48 {
trimmed = trimmed
.chars()
.take(48)
.collect::<String>()
.trim_matches('-')
.to_string();
}
if trimmed.is_empty() {
return Local::now().format("plan-%Y%m%d-%H%M%S").to_string();
}
trimmed
}
#[cfg(test)]
pub(crate) fn show_review_commit_picker_with_entries(
chat: &mut ChatWidget,

View File

@@ -5,7 +5,7 @@ expression: popup
Select Model and Effort
Switch the model for this and future Codex CLI sessions
1. gpt-5-codex (current) Optimized for codex.
1. gpt-5-codex (current) Optimized for coding tasks with many tools.
2. gpt-5 Broad world knowledge with strong general
reasoning.

View File

@@ -1,12 +1,12 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 474
expression: popup
---
Approaching rate limits
Switch to gpt-5-codex-mini for lower credit usage?
You've used over 90% of your limit. Switch to gpt-5-codex for lower credit u
1. Switch to gpt-5-codex-mini Optimized for codex. Cheaper, faster, but
less capable.
1. Switch to gpt-5-codex Optimized for coding tasks with many tools.
2. Keep current model
Press enter to confirm or esc to go back

View File

@@ -5,8 +5,6 @@ use crate::test_backend::VT100Backend;
use crate::tui::FrameRequester;
use assert_matches::assert_matches;
use codex_common::approval_presets::builtin_approval_presets;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::config::Config;
@@ -411,8 +409,6 @@ fn test_rate_limit_warnings_monthly() {
#[test]
fn rate_limit_switch_prompt_skips_when_on_lower_cost_model() {
let (mut chat, _, _) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.config.model = NUDGE_MODEL_SLUG.to_string();
chat.on_rate_limit_snapshot(Some(snapshot(95.0)));
@@ -425,10 +421,8 @@ fn rate_limit_switch_prompt_skips_when_on_lower_cost_model() {
#[test]
fn rate_limit_switch_prompt_shows_once_per_session() {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.auth_manager = AuthManager::from_auth_for_testing(auth);
chat.on_rate_limit_snapshot(Some(snapshot(90.0)));
assert!(
@@ -450,10 +444,8 @@ fn rate_limit_switch_prompt_shows_once_per_session() {
#[test]
fn rate_limit_switch_prompt_defers_until_task_complete() {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.auth_manager = AuthManager::from_auth_for_testing(auth);
chat.bottom_pane.set_task_running(true);
chat.on_rate_limit_snapshot(Some(snapshot(90.0)));
@@ -473,8 +465,6 @@ fn rate_limit_switch_prompt_defers_until_task_complete() {
#[test]
fn rate_limit_switch_prompt_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.config.model = "gpt-5".to_string();
chat.on_rate_limit_snapshot(Some(snapshot(92.0)));
@@ -1501,44 +1491,6 @@ fn model_reasoning_selection_popup_snapshot() {
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn single_reasoning_option_skips_selection() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
static SINGLE_EFFORT: [ReasoningEffortPreset; 1] = [ReasoningEffortPreset {
effort: ReasoningEffortConfig::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
}];
let preset = ModelPreset {
id: "model-with-single-reasoning",
model: "model-with-single-reasoning",
display_name: "model-with-single-reasoning",
description: "",
default_reasoning_effort: ReasoningEffortConfig::High,
supported_reasoning_efforts: &SINGLE_EFFORT,
is_default: false,
};
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains("Select Reasoning Level"),
"expected reasoning selection popup to be skipped"
);
let mut events = Vec::new();
while let Ok(ev) = rx.try_recv() {
events.push(ev);
}
assert!(
events
.iter()
.any(|ev| matches!(ev, AppEvent::UpdateReasoningEffort(Some(effort)) if *effort == ReasoningEffortConfig::High)),
"expected reasoning effort to be applied automatically; events: {events:?}"
);
}
#[test]
fn feedback_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();

View File

@@ -17,6 +17,7 @@ pub enum SlashCommand {
Review,
New,
Init,
Migrate,
Compact,
Undo,
Diff,
@@ -39,6 +40,7 @@ impl SlashCommand {
SlashCommand::New => "start a new chat during a conversation",
SlashCommand::Init => "create an AGENTS.md file with instructions for Codex",
SlashCommand::Compact => "summarize conversation to prevent hitting the context limit",
SlashCommand::Migrate => "spin up a migration workspace and planning prompt",
SlashCommand::Review => "review my current changes and find issues",
SlashCommand::Undo => "ask Codex to undo a turn",
SlashCommand::Quit | SlashCommand::Exit => "exit Codex",
@@ -65,6 +67,7 @@ impl SlashCommand {
match self {
SlashCommand::New
| SlashCommand::Init
| SlashCommand::Migrate
| SlashCommand::Compact
| SlashCommand::Undo
| SlashCommand::Model

View File

@@ -12,7 +12,7 @@ Because Codex is written in Rust, it honors the `RUST_LOG` environment variable
The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info,codex_rmcp_client=info` and log messages are written to `~/.codex/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written:
```bash
```
tail -F ~/.codex/log/codex-tui.log
```
@@ -67,7 +67,7 @@ Use the MCP inspector and `codex mcp-server` to build a simple tic-tac-toe game
**approval-policy:** never
**prompt:** Implement a simple tic-tac-toe game with HTML, JavaScript, and CSS. Write the game in a single file called index.html.
**prompt:** Implement a simple tic-tac-toe game with HTML, Javascript, and CSS. Write the game in a single file called index.html.
**sandbox:** workspace-write

View File

@@ -17,6 +17,7 @@ Control Codexs behavior during an interactive session with slash commands.
| `/review` | review my current changes and find issues |
| `/new` | start a new chat during a conversation |
| `/init` | create an AGENTS.md file with instructions for Codex |
| `/migrate` | spin up a migration workspace and prompt Codex to plan it |
| `/compact` | summarize conversation to prevent hitting the context limit |
| `/undo` | ask Codex to undo a turn |
| `/diff` | show git diff (including untracked files) |