Compare commits

..

1 Commits

Author SHA1 Message Date
rka-oai
a9c7852206 Integrate migrate-cli workflow 2025-11-06 10:34:15 -08:00
35 changed files with 1283 additions and 1093 deletions

View File

@@ -80,6 +80,7 @@ Codex CLI supports a rich set of configuration options, with preferences stored
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Migrations**](./docs/migrations.md)
- [**Configuration**](./docs/config.md)
- [Example config](./docs/example-config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)

3
codex-rs/Cargo.lock generated
View File

@@ -967,6 +967,7 @@ dependencies = [
"anyhow",
"assert_cmd",
"assert_matches",
"chrono",
"clap",
"clap_complete",
"codex-app-server",
@@ -989,8 +990,10 @@ dependencies = [
"codex-windows-sandbox",
"ctor 0.5.0",
"owo-colors",
"pathdiff",
"predicates",
"pretty_assertions",
"serde",
"serde_json",
"supports-color",
"tempfile",

View File

@@ -133,18 +133,6 @@ client_request_definitions! {
params: v2::ThreadCompactParams,
response: v2::ThreadCompactResponse,
},
#[serde(rename = "turn/start")]
#[ts(rename = "turn/start")]
TurnStart {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
},
#[serde(rename = "turn/interrupt")]
#[ts(rename = "turn/interrupt")]
TurnInterrupt {
params: v2::TurnInterruptParams,
response: v2::TurnInterruptResponse,
},
#[serde(rename = "model/list")]
#[ts(rename = "model/list")]

View File

@@ -5,10 +5,8 @@ use crate::protocol::common::AuthMode;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::user_input::UserInput as CoreUserInput;
use mcp_types::ContentBlock as McpContentBlock;
use schemars::JsonSchema;
use serde::Deserialize;
@@ -367,13 +365,6 @@ pub struct Turn {
pub error: Option<TurnError>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -384,51 +375,15 @@ pub enum TurnStatus {
InProgress,
}
// Turn APIs
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartParams {
pub thread_id: String,
pub input: Vec<UserInput>,
/// Override the working directory for this turn and subsequent turns.
pub cwd: Option<PathBuf>,
/// Override the approval policy for this turn and subsequent turns.
pub approval_policy: Option<AskForApproval>,
/// Override the sandbox policy for this turn and subsequent turns.
pub sandbox_policy: Option<SandboxPolicy>,
/// Override the model for this turn and subsequent turns.
pub model: Option<String>,
/// Override the reasoning effort for this turn and subsequent turns.
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartResponse {
pub turn: Turn,
pub struct TurnError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptParams {
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptResponse {}
// User input types
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum UserInput {
Text { text: String },
@@ -436,19 +391,8 @@ pub enum UserInput {
LocalImage { path: PathBuf },
}
impl UserInput {
pub fn into_core(self) -> CoreUserInput {
match self {
UserInput::Text { text } => CoreUserInput::Text { text },
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum ThreadItem {
UserMessage {
@@ -572,7 +516,7 @@ pub struct TodoItem {
}
// === Server Notifications ===
// Thread/Turn lifecycle notifications and item progress events
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -601,7 +545,6 @@ pub struct Usage {
#[ts(export_to = "v2/")]
pub struct TurnCompletedNotification {
pub turn: Turn,
// TODO: should usage be stored on the Turn object, and we return that instead?
pub usage: Usage,
}
@@ -619,7 +562,6 @@ pub struct ItemCompletedNotification {
pub item: ThreadItem,
}
// Item-specific progress notifications
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]

View File

@@ -13,7 +13,6 @@ use codex_app_server_protocol::ApplyPatchApprovalParams;
use codex_app_server_protocol::ApplyPatchApprovalResponse;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::ArchiveConversationResponse;
use codex_app_server_protocol::AskForApproval;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::AuthStatusChangeNotification;
use codex_app_server_protocol::CancelLoginAccountParams;
@@ -30,8 +29,6 @@ use codex_app_server_protocol::FeedbackUploadResponse;
use codex_app_server_protocol::FuzzyFileSearchParams;
use codex_app_server_protocol::FuzzyFileSearchResponse;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::GetAuthStatusResponse;
use codex_app_server_protocol::GetConversationSummaryParams;
use codex_app_server_protocol::GetConversationSummaryResponse;
use codex_app_server_protocol::GetUserAgentResponse;
@@ -48,8 +45,6 @@ use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::LogoutChatGptResponse;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::NewConversationParams;
@@ -59,8 +54,6 @@ use codex_app_server_protocol::RemoveConversationSubscriptionResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::Result as JsonRpcResult;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::ResumeConversationResponse;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_app_server_protocol::SendUserTurnParams;
@@ -73,7 +66,6 @@ use codex_app_server_protocol::SetDefaultModelResponse;
use codex_app_server_protocol::Thread;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadListResponse;
use codex_app_server_protocol::ThreadResumeParams;
@@ -81,15 +73,7 @@ use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_app_server_protocol::Turn;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInfoResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_app_server_protocol::UserSavedConfig;
use codex_backend_client::Client as BackendClient;
use codex_core::AuthManager;
@@ -152,9 +136,6 @@ use tracing::info;
use tracing::warn;
use uuid::Uuid;
type PendingInterruptQueue = Vec<(RequestId, ApiVersion)>;
type PendingInterrupts = Arc<Mutex<HashMap<ConversationId, PendingInterruptQueue>>>;
// Duration before a ChatGPT login attempt is abandoned.
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
struct ActiveLogin {
@@ -178,17 +159,11 @@ pub(crate) struct CodexMessageProcessor {
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
active_login: Arc<Mutex<Option<ActiveLogin>>>,
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
pending_interrupts: PendingInterrupts,
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
feedback: CodexFeedback,
}
#[derive(Clone, Copy, Debug)]
enum ApiVersion {
V1,
V2,
}
impl CodexMessageProcessor {
pub fn new(
auth_manager: Arc<AuthManager>,
@@ -217,7 +192,7 @@ impl CodexMessageProcessor {
ClientRequest::Initialize { .. } => {
panic!("Initialize should be handled in MessageProcessor");
}
// === v2 Thread/Turn APIs ===
// === v2 Thread APIs ===
ClientRequest::ThreadStart { request_id, params } => {
self.thread_start(request_id, params).await;
}
@@ -237,12 +212,6 @@ impl CodexMessageProcessor {
self.send_unimplemented_error(request_id, "thread/compact")
.await;
}
ClientRequest::TurnStart { request_id, params } => {
self.turn_start(request_id, params).await;
}
ClientRequest::TurnInterrupt { request_id, params } => {
self.turn_interrupt(request_id, params).await;
}
ClientRequest::NewConversation { request_id, params } => {
// Do not tokio::spawn() to process new_conversation()
// asynchronously because we need to ensure the conversation is
@@ -762,7 +731,10 @@ impl CodexMessageProcessor {
match self.logout_common().await {
Ok(current_auth_method) => {
self.outgoing
.send_response(request_id, LogoutChatGptResponse {})
.send_response(
request_id,
codex_app_server_protocol::LogoutChatGptResponse {},
)
.await;
let payload = AuthStatusChangeNotification {
@@ -782,7 +754,10 @@ impl CodexMessageProcessor {
match self.logout_common().await {
Ok(current_auth_method) => {
self.outgoing
.send_response(request_id, LogoutAccountResponse {})
.send_response(
request_id,
codex_app_server_protocol::LogoutAccountResponse {},
)
.await;
let payload_v2 = AccountUpdatedNotification {
@@ -798,7 +773,11 @@ impl CodexMessageProcessor {
}
}
async fn get_auth_status(&self, request_id: RequestId, params: GetAuthStatusParams) {
async fn get_auth_status(
&self,
request_id: RequestId,
params: codex_app_server_protocol::GetAuthStatusParams,
) {
let include_token = params.include_token.unwrap_or(false);
let do_refresh = params.refresh_token.unwrap_or(false);
@@ -812,7 +791,7 @@ impl CodexMessageProcessor {
let requires_openai_auth = self.config.model_provider.requires_openai_auth;
let response = if !requires_openai_auth {
GetAuthStatusResponse {
codex_app_server_protocol::GetAuthStatusResponse {
auth_method: None,
auth_token: None,
requires_openai_auth: Some(false),
@@ -832,13 +811,13 @@ impl CodexMessageProcessor {
(None, None)
}
};
GetAuthStatusResponse {
codex_app_server_protocol::GetAuthStatusResponse {
auth_method: reported_auth_method,
auth_token: token_opt,
requires_openai_auth: Some(true),
}
}
None => GetAuthStatusResponse {
None => codex_app_server_protocol::GetAuthStatusResponse {
auth_method: None,
auth_token: None,
requires_openai_auth: Some(true),
@@ -1122,8 +1101,12 @@ impl CodexMessageProcessor {
let overrides = ConfigOverrides {
model: params.model,
cwd: params.cwd.map(PathBuf::from),
approval_policy: params.approval_policy.map(AskForApproval::to_core),
sandbox_mode: params.sandbox.map(SandboxMode::to_core),
approval_policy: params
.approval_policy
.map(codex_app_server_protocol::AskForApproval::to_core),
sandbox_mode: params
.sandbox
.map(codex_app_server_protocol::SandboxMode::to_core),
model_provider: params.model_provider,
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
base_instructions: params.base_instructions,
@@ -1448,22 +1431,60 @@ impl CodexMessageProcessor {
let ListConversationsParams {
page_size,
cursor,
model_providers,
model_providers: model_provider,
} = params;
let page_size = page_size.unwrap_or(25).max(1);
match self
.list_conversations_common(page_size, cursor, model_providers)
.await
{
Ok((items, next_cursor)) => {
let response = ListConversationsResponse { items, next_cursor };
self.outgoing.send_response(request_id, response).await;
let page_size = page_size.unwrap_or(25);
let cursor_obj: Option<RolloutCursor> = cursor.as_ref().and_then(|s| parse_cursor(s));
let cursor_ref = cursor_obj.as_ref();
let model_provider_filter = match model_provider {
Some(providers) => {
if providers.is_empty() {
None
} else {
Some(providers)
}
}
Err(error) => {
None => Some(vec![self.config.model_provider_id.clone()]),
};
let model_provider_slice = model_provider_filter.as_deref();
let fallback_provider = self.config.model_provider_id.clone();
let page = match RolloutRecorder::list_conversations(
&self.config.codex_home,
page_size,
cursor_ref,
INTERACTIVE_SESSION_SOURCES,
model_provider_slice,
fallback_provider.as_str(),
)
.await
{
Ok(p) => p,
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to list conversations: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let items = page
.items
.into_iter()
.filter_map(|it| extract_conversation_summary(it.path, &it.head, &fallback_provider))
.collect();
// Encode next_cursor as a plain string
let next_cursor = page
.next_cursor
.and_then(|cursor| serde_json::to_value(&cursor).ok())
.and_then(|value| value.as_str().map(str::to_owned));
let response = ListConversationsResponse { items, next_cursor };
self.outgoing.send_response(request_id, response).await;
}
async fn list_conversations_common(
@@ -1513,7 +1534,6 @@ impl CodexMessageProcessor {
.filter_map(|it| extract_conversation_summary(it.path, &it.head, &fallback_provider))
.collect::<Vec<_>>();
// Encode next_cursor as a plain string
let next_cursor = page
.next_cursor
.and_then(|cursor| serde_json::to_value(&cursor).ok())
@@ -1524,8 +1544,7 @@ impl CodexMessageProcessor {
async fn list_models(&self, request_id: RequestId, params: ModelListParams) {
let ModelListParams { limit, cursor } = params;
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let models = supported_models(auth_mode);
let models = supported_models();
let total = models.len();
if total == 0 {
@@ -1537,8 +1556,8 @@ impl CodexMessageProcessor {
return;
}
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
let effective_limit = effective_limit.min(total);
let effective_page_size = limit.unwrap_or(total as u32).max(1) as usize;
let effective_page_size = effective_page_size.min(total);
let start = match cursor {
Some(cursor) => match cursor.parse::<usize>() {
Ok(idx) => idx,
@@ -1565,7 +1584,7 @@ impl CodexMessageProcessor {
return;
}
let end = start.saturating_add(effective_limit).min(total);
let end = start.saturating_add(effective_page_size).min(total);
let items = models[start..end].to_vec();
let next_cursor = if end < total {
Some(end.to_string())
@@ -1600,7 +1619,7 @@ impl CodexMessageProcessor {
profile,
cwd,
approval_policy,
sandbox: sandbox_mode,
sandbox,
config: cli_overrides,
base_instructions,
developer_instructions,
@@ -1613,7 +1632,7 @@ impl CodexMessageProcessor {
config_profile: profile,
cwd: cwd.map(PathBuf::from),
approval_policy,
sandbox_mode,
sandbox_mode: sandbox,
model_provider,
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
base_instructions,
@@ -1739,7 +1758,7 @@ impl CodexMessageProcessor {
.map(|msgs| msgs.into_iter().collect());
// Reply with conversation id + model and initial messages (when present)
let response = ResumeConversationResponse {
let response = codex_app_server_protocol::ResumeConversationResponse {
conversation_id,
model: session_configured.model.clone(),
initial_messages,
@@ -2050,151 +2069,7 @@ impl CodexMessageProcessor {
// Record the pending interrupt so we can reply when TurnAborted arrives.
{
let mut map = self.pending_interrupts.lock().await;
map.entry(conversation_id)
.or_default()
.push((request_id, ApiVersion::V1));
}
// Submit the interrupt; we'll respond upon TurnAborted.
let _ = conversation.submit(Op::Interrupt).await;
}
async fn turn_start(&self, request_id: RequestId, params: TurnStartParams) {
// Resolve conversation id from v2 thread id string.
let conversation_id = match ConversationId::from_string(&params.thread_id) {
Ok(id) => id,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid thread id: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let Ok(conversation) = self
.conversation_manager
.get_conversation(conversation_id)
.await
else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("conversation not found: {conversation_id}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
};
// Keep a copy of v2 inputs for the notification payload.
let v2_inputs_for_notif = params.input.clone();
// Map v2 input items to core input items.
let mapped_items: Vec<CoreInputItem> = params
.input
.into_iter()
.map(V2UserInput::into_core)
.collect();
let has_any_overrides = params.cwd.is_some()
|| params.approval_policy.is_some()
|| params.sandbox_policy.is_some()
|| params.model.is_some()
|| params.effort.is_some()
|| params.summary.is_some();
// If any overrides are provided, update the session turn context first.
if has_any_overrides {
let _ = conversation
.submit(Op::OverrideTurnContext {
cwd: params.cwd,
approval_policy: params.approval_policy.map(AskForApproval::to_core),
sandbox_policy: params.sandbox_policy.map(|p| p.to_core()),
model: params.model,
effort: params.effort.map(Some),
summary: params.summary,
})
.await;
}
// Start the turn by submitting the user input. Return its submission id as turn_id.
let turn_id = conversation
.submit(Op::UserInput {
items: mapped_items,
})
.await;
match turn_id {
Ok(turn_id) => {
let turn = Turn {
id: turn_id.clone(),
items: vec![ThreadItem::UserMessage {
id: turn_id,
content: v2_inputs_for_notif,
}],
status: TurnStatus::InProgress,
error: None,
};
let response = TurnStartResponse { turn: turn.clone() };
self.outgoing.send_response(request_id, response).await;
// Emit v2 turn/started notification.
let notif = TurnStartedNotification { turn };
self.outgoing
.send_server_notification(ServerNotification::TurnStarted(notif))
.await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to start turn: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn turn_interrupt(&mut self, request_id: RequestId, params: TurnInterruptParams) {
let TurnInterruptParams { thread_id, .. } = params;
// Resolve conversation id from v2 thread id string.
let conversation_id = match ConversationId::from_string(&thread_id) {
Ok(id) => id,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid thread id: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let Ok(conversation) = self
.conversation_manager
.get_conversation(conversation_id)
.await
else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("conversation not found: {conversation_id}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
};
// Record the pending interrupt so we can reply when TurnAborted arrives.
{
let mut map = self.pending_interrupts.lock().await;
map.entry(conversation_id)
.or_default()
.push((request_id, ApiVersion::V2));
map.entry(conversation_id).or_default().push(request_id);
}
// Submit the interrupt; we'll respond upon TurnAborted.
@@ -2210,6 +2085,7 @@ impl CodexMessageProcessor {
conversation_id,
experimental_raw_events,
} = params;
match self
.attach_conversation_listener(conversation_id, experimental_raw_events)
.await
@@ -2336,6 +2212,7 @@ impl CodexMessageProcessor {
}
}
});
Ok(subscription_id)
}
@@ -2477,7 +2354,7 @@ async fn apply_bespoke_event_handling(
conversation_id: ConversationId,
conversation: Arc<CodexConversation>,
outgoing: Arc<OutgoingMessageSender>,
pending_interrupts: PendingInterrupts,
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
) {
let Event { id: event_id, msg } = event;
match msg {
@@ -2546,19 +2423,11 @@ async fn apply_bespoke_event_handling(
map.remove(&conversation_id).unwrap_or_default()
};
if !pending.is_empty() {
for (rid, ver) in pending {
match ver {
ApiVersion::V1 => {
let response = InterruptConversationResponse {
abort_reason: turn_aborted_event.reason.clone(),
};
outgoing.send_response(rid, response).await;
}
ApiVersion::V2 => {
let response = TurnInterruptResponse {};
outgoing.send_response(rid, response).await;
}
}
let response = InterruptConversationResponse {
abort_reason: turn_aborted_event.reason,
};
for rid in pending {
outgoing.send_response(rid, response.clone()).await;
}
}
}

View File

@@ -1,12 +1,11 @@
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
.into_iter()
.map(model_from_preset)
.collect()

View File

@@ -22,17 +22,11 @@ use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
@@ -42,8 +36,13 @@ use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use std::process::Command as StdCommand;
use tokio::process::Command;
@@ -250,7 +249,7 @@ impl McpProcess {
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
pub async fn send_upload_feedback_request(
&mut self,
params: FeedbackUploadParams,
) -> anyhow::Result<i64> {
@@ -349,24 +348,6 @@ impl McpProcess {
self.send_request("loginChatGpt", None).await
}
/// Send a `turn/start` JSON-RPC request (v2).
pub async fn send_turn_start_request(
&mut self,
params: TurnStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/start", params).await
}
/// Send a `turn/interrupt` JSON-RPC request (v2).
pub async fn send_turn_interrupt_request(
&mut self,
params: TurnInterruptParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/interrupt", params).await
}
/// Send a `cancelLoginChatGpt` JSON-RPC request.
pub async fn send_cancel_login_chat_gpt_request(
&mut self,

View File

@@ -146,7 +146,7 @@ fn create_config_toml(codex_home: &Path, server_uri: String) -> std::io::Result<
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"

View File

@@ -49,7 +49,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,

View File

@@ -1,7 +1,6 @@
// v2 test suite modules
mod account;
mod thread_archive;
mod thread_list;
mod thread_resume;
mod thread_start;
mod turn_interrupt;
mod turn_start;

View File

@@ -1,128 +0,0 @@
#![cfg(unix)]
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_interrupt_aborts_running_turn() -> Result<()> {
// Use a portable sleep command to keep the turn running.
#[cfg(target_os = "windows")]
let shell_command = vec![
"powershell".to_string(),
"-Command".to_string(),
"Start-Sleep -Seconds 10".to_string(),
];
#[cfg(not(target_os = "windows"))]
let shell_command = vec!["sleep".to_string(), "10".to_string()];
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
// Mock server: long-running shell command then (after abort) nothing else needed.
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
shell_command.clone(),
Some(&working_directory),
Some(10_000),
"call_sleep",
)?])
.await;
create_config_toml(&codex_home, &server.uri())?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a v2 thread and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn that triggers a long-running command.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run sleep".to_string(),
}],
cwd: Some(working_directory.clone()),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Give the command a brief moment to start.
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Interrupt the in-progress turn by id (v2 API).
let interrupt_id = mcp
.send_turn_interrupt_request(TurnInterruptParams {
thread_id: thread.id,
turn_id: turn.id,
})
.await?;
let interrupt_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(interrupt_id)),
)
.await??;
let _resp: TurnInterruptResponse = to_response::<TurnInterruptResponse>(interrupt_resp)?;
// No fields to assert on; successful deserialization confirms proper response shape.
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "workspace-write"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,486 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
// Three Codex turns hit the mock model (session start + two turn/start calls).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread (v2) and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn with only input and thread_id set (no overrides).
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// Expect a turn/started notification.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
let started: TurnStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(
started.turn.status,
codex_app_server_protocol::TurnStatus::InProgress
);
// Send a second turn that exercises the overrides path: change the model.
let turn_req2 = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Second".to_string(),
}],
model: Some("mock-model-override".to_string()),
..Default::default()
})
.await?;
let turn_resp2: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req2)),
)
.await??;
let TurnStartResponse { turn: turn2 } = to_response::<TurnStartResponse>(turn_resp2)?;
assert!(!turn2.id.is_empty());
// Ensure the second turn has a different id than the first.
assert_ne!(turn.id, turn2.id);
// Expect a second turn/started notification as well.
let _notif2: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
// And we should ultimately get a task_complete without having to add a
// legacy conversation listener explicitly (auto-attached by thread/start).
let _task_complete: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
// Use the unchecked variant because the request payload includes a LocalImage
// which the strict matcher does not currently cover.
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
let image_path = codex_home.path().join("image.png");
// No need to actually write the file; we just exercise the input path.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::LocalImage { path: image_path }],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// This test only validates that turn/start responds and returns a turn.
Ok(())
}
#[tokio::test]
async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().to_path_buf();
// Mock server: first turn requests a shell call (elicitation), then completes.
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
let responses = vec![
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call1",
)?,
create_final_assistant_message_sse_response("done 1")?,
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call2",
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_chat_completions_server(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// turn/start — expect ExecCommandApproval request from server
let first_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python".to_string(),
}],
..Default::default()
})
.await?;
// Acknowledge RPC
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
)
.await??;
// Receive elicitation
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ExecCommandApproval { request_id, params } = server_req else {
panic!("expected ExecCommandApproval request");
};
assert_eq!(params.call_id, "call1");
assert_eq!(
params.parsed_cmd,
vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}]
);
// Approve and wait for task completion
mcp.send_response(
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// Second turn with approval_policy=never should not elicit approval
let second_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python again".to_string(),
}],
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
)
.await??;
// Ensure we do NOT receive an ExecCommandApproval request before task completes
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
// When returning Result from a test, pass an Ok(()) to the skip macro
// so the early return type matches. The no-arg form returns unit.
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let workspace_root = tmp.path().join("workspace");
std::fs::create_dir(&workspace_root)?;
let first_cwd = workspace_root.join("turn1");
let second_cwd = workspace_root.join("turn2");
std::fs::create_dir(&first_cwd)?;
std::fs::create_dir(&second_cwd)?;
let responses = vec![
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo first turn".to_string(),
],
None,
Some(5000),
"call-first",
)?,
create_final_assistant_message_sse_response("done first")?,
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string(),
],
None,
Some(5000),
"call-second",
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// first turn with workspace-write sandbox and first_cwd
let first_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "first turn".to_string(),
}],
cwd: Some(first_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
}),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// second turn with workspace-write and second_cwd, ensure exec begins in second_cwd
let second_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "second turn".to_string(),
}],
cwd: Some(second_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn)),
)
.await??;
let exec_begin_notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
)
.await??;
let params = exec_begin_notification
.params
.clone()
.expect("exec_command_begin params");
let event: Event = serde_json::from_value(params).expect("deserialize exec begin event");
let exec_begin = match event.msg {
EventMsg::ExecCommandBegin(exec_begin) => exec_begin,
other => panic!("expected ExecCommandBegin event, got {other:?}"),
};
assert_eq!(exec_begin.cwd, second_cwd);
assert_eq!(
exec_begin.command,
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string()
]
);
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "{approval_policy}"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -7,6 +7,10 @@ version = { workspace = true }
name = "codex"
path = "src/main.rs"
[[bin]]
name = "migrate-cli"
path = "src/bin/migrate_cli.rs"
[lib]
name = "codex_cli"
path = "src/lib.rs"
@@ -16,6 +20,7 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["derive"] }
clap_complete = { workspace = true }
codex-app-server = { workspace = true }
@@ -37,6 +42,8 @@ codex-stdio-to-uds = { workspace = true }
codex-tui = { workspace = true }
ctor = { workspace = true }
owo-colors = { workspace = true }
pathdiff = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
supports-color = { workspace = true }
toml = { workspace = true }

View File

@@ -0,0 +1,9 @@
use clap::Parser;
use codex_cli::migrate::MigrateCli;
fn main() {
if let Err(err) = MigrateCli::parse().run() {
eprintln!("{err:?}");
std::process::exit(1);
}
}

View File

@@ -1,6 +1,7 @@
pub mod debug_sandbox;
mod exit_status;
pub mod login;
pub mod migrate;
use clap::Parser;
use codex_common::CliConfigOverrides;

View File

@@ -28,6 +28,7 @@ use supports_color::Stream;
mod mcp_cmd;
use crate::mcp_cmd::McpCli;
use codex_cli::migrate::MigrateCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::features::is_known_feature_key;
@@ -73,6 +74,9 @@ enum Subcommand {
/// Remove stored authentication credentials.
Logout(LogoutCommand),
/// Manage Codex migration workstreams.
Migrate(MigrateCli),
/// [experimental] Run Codex as an MCP server and manage MCP servers.
Mcp(McpCli),
@@ -443,6 +447,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
);
run_logout(logout_cli.config_overrides).await;
}
Some(Subcommand::Migrate(migrate_cli)) => {
migrate_cli.run()?;
}
Some(Subcommand::Completion(completion_cli)) => {
print_completion(completion_cli);
}

782
codex-rs/cli/src/migrate.rs Normal file
View File

@@ -0,0 +1,782 @@
use std::fs;
use std::fs::OpenOptions;
use std::io::Write as _;
use std::path::Path;
use std::path::PathBuf;
use anyhow::Context;
use anyhow::Result;
use chrono::Local;
use chrono::Utc;
use clap::Parser;
use clap::Subcommand;
use clap::ValueEnum;
use codex_tui::migration::MigrationWorkspace;
use codex_tui::migration::create_migration_workspace;
use pathdiff::diff_paths;
use serde::Deserialize;
use serde::Serialize;
const STATE_DIR: &str = ".codex/migrate";
const INDEX_FILE: &str = "index.json";
const MIGRATIONS_DIR: &str = "migrations";
const TASKS_FILE: &str = "tasks.json";
const RUNS_DIR: &str = "runs";
const STATE_VERSION: u32 = 1;
const INDEX_VERSION: u32 = 1;
#[derive(Debug, Parser)]
pub struct MigrateCli {
/// Root of the repository / workspace that owns the migration artifacts.
#[arg(long = "root", value_name = "DIR", default_value = ".")]
root: PathBuf,
#[command(subcommand)]
command: MigrateCommand,
}
#[derive(Debug, Subcommand)]
enum MigrateCommand {
/// Create a migration workspace and seed a task graph.
Plan(PlanCommand),
/// Execute or update a migration task.
Execute(ExecuteCommand),
}
#[derive(Debug, Parser)]
struct PlanCommand {
/// Short description for the migration (used to name the workspace).
#[arg(value_name = "DESCRIPTION")]
summary: String,
/// How many explorer workstreams should be created for parallel agents.
#[arg(long = "parallel", value_name = "COUNT", default_value_t = 2)]
parallel_scouts: usize,
}
#[derive(Debug, Parser)]
struct ExecuteCommand {
/// Specific task id to update. Omit to pick the next runnable task.
#[arg(value_name = "TASK_ID")]
task_id: Option<String>,
/// Name (or path) of the migration workspace to operate on.
#[arg(long = "workspace", value_name = "PATH")]
workspace: Option<String>,
/// Explicitly set a task's status instead of starting it.
#[arg(long = "status", value_enum, requires = "task_id")]
status: Option<TaskStatus>,
/// Append a short note to journal.md after updating the task.
#[arg(long = "note", value_name = "TEXT")]
note: Option<String>,
}
impl MigrateCli {
pub fn run(self) -> Result<()> {
let root = self
.root
.canonicalize()
.unwrap_or_else(|_| self.root.clone());
match self.command {
MigrateCommand::Plan(cmd) => run_plan(&root, cmd),
MigrateCommand::Execute(cmd) => run_execute(&root, cmd),
}
}
}
fn run_plan(root: &Path, cmd: PlanCommand) -> Result<()> {
fs::create_dir_all(package_dir(root))?;
let migrations_dir = root.join(MIGRATIONS_DIR);
let workspace = create_migration_workspace(&migrations_dir, cmd.summary.as_str())
.with_context(|| {
format!(
"failed to create migration workspace inside {}",
migrations_dir.display()
)
})?;
let parallel = cmd.parallel_scouts.clamp(1, 8);
let state = MigrationState::new(cmd.summary.clone(), &workspace, parallel);
state.save()?;
write_workspace_readme(&workspace, cmd.summary.as_str())?;
let workspace_rel = diff_paths(&workspace.dir_path, root)
.unwrap_or_else(|| workspace.dir_path.clone())
.display()
.to_string();
refresh_index(root, &state)?;
println!(
"Created migration workspace `{}` in {workspace_rel}",
workspace.dir_name
);
println!("- Plan: {}", rel_to_root(&workspace.plan_path, root));
println!("- Journal: {}", rel_to_root(&workspace.journal_path, root));
println!(
"Next: open this repo in Codex, run /migrate, and let the agent follow up with `migrate-cli execute` to begin running tasks."
);
Ok(())
}
fn run_execute(root: &Path, cmd: ExecuteCommand) -> Result<()> {
let workspace_dir = resolve_workspace(root, cmd.workspace.as_deref())?;
let mut state = MigrationState::load(workspace_dir)?;
let task_id = if let Some(id) = cmd.task_id {
id
} else if let Some(id) = state.next_runnable_task_id() {
id
} else {
println!("All tasks are complete. Specify --task-id to override.");
return Ok(());
};
if !state.can_start(&task_id) && cmd.status.is_none() {
anyhow::bail!(
"Task `{task_id}` is blocked by its dependencies. Complete the prerequisites or pass --status to override."
);
}
let describe_task = cmd.status.is_none();
let task_snapshot = if describe_task {
Some(
state
.task(&task_id)
.cloned()
.with_context(|| format!("unknown task id `{task_id}`"))?,
)
} else {
None
};
let new_status = cmd.status.unwrap_or(TaskStatus::Running);
state.set_status(&task_id, new_status)?;
let mut run_file = None;
if new_status == TaskStatus::Running && describe_task {
run_file = Some(write_run_file(root, &state, &task_id)?);
}
state.save()?;
if let Some(note) = cmd.note {
append_journal(&state, &task_id, new_status, note.as_str())?;
}
refresh_index(root, &state)?;
if describe_task {
if let Some(task) = task_snapshot.as_ref() {
print_task_brief(&state, task, root);
}
if let Some(path) = run_file {
println!("Runbook prepared at {path}");
}
println!(
"When you finish, mark it done with `migrate-cli execute --task-id {task_id} --status done --note \"<summary>\"` and run `migrate-cli execute` again for the next task."
);
} else {
println!("Task `{task_id}` status -> {new_status}");
if let Some(path) = run_file {
println!("Runbook prepared at {path}");
}
}
Ok(())
}
fn resolve_workspace(root: &Path, provided: Option<&str>) -> Result<PathBuf> {
if let Some(input) = provided {
let direct = PathBuf::from(input);
let candidate = if direct.is_absolute() {
direct
} else {
let joined = root.join(&direct);
if joined.join(TASKS_FILE).exists() {
joined
} else {
root.join(MIGRATIONS_DIR).join(&direct)
}
};
if candidate.join(TASKS_FILE).exists() {
return Ok(candidate);
}
anyhow::bail!("No migration workspace found at {}", candidate.display());
}
let index = load_index(&index_path(root))?;
let latest = index
.migrations
.iter()
.max_by_key(|entry| entry.updated_at_epoch)
.context("No recorded migrations. Run `migrate-cli plan` first.")?;
let rel = PathBuf::from(&latest.workspace);
let path = if rel.is_absolute() {
rel
} else {
root.join(rel)
};
Ok(path)
}
fn write_workspace_readme(workspace: &MigrationWorkspace, summary: &str) -> Result<()> {
let contents = format!(
"# {name}\n\n{summary}\n\n- `plan.md` canonical blueprint\n- `journal.md` publish progress + hand-offs\n- `tasks.json` orchestration metadata\n- `runs/` generated runbooks per task\n\nUse `migrate-cli execute --workspace {name}` to advance tasks or open this folder in Codex and run `/migrate`.\n",
name = workspace.dir_name,
summary = summary
);
fs::write(workspace.dir_path.join("README.md"), contents)?;
Ok(())
}
fn append_journal(
state: &MigrationState,
task_id: &str,
status: TaskStatus,
note: &str,
) -> Result<()> {
let mut file = OpenOptions::new()
.append(true)
.open(state.journal_path())
.with_context(|| format!("failed to open {}", state.journal_path().display()))?;
let timestamp = Local::now().format("%Y-%m-%d %H:%M %Z");
writeln!(
file,
"| {timestamp} | migrate::execute | Task {task_id} -> {status} | | {note} |"
)?;
Ok(())
}
fn write_run_file(root: &Path, state: &MigrationState, task_id: &str) -> Result<String> {
let task = state
.task(task_id)
.with_context(|| format!("unknown task id `{task_id}`"))?;
let runs_dir = state.workspace_dir().join(RUNS_DIR);
fs::create_dir_all(&runs_dir)?;
let timestamp = Utc::now().format("%Y%m%d-%H%M%S");
let file_name = format!("{task_id}-{timestamp}.md");
let path = runs_dir.join(&file_name);
let plan = rel_to_root(&state.plan_path(), root);
let journal = rel_to_root(&state.journal_path(), root);
let mut body = format!(
"# Task {task_id}: {}\n\n{}\n\n## Checkpoints\n",
task.title, task.description
);
for checkpoint in &task.checkpoints {
body.push_str(&format!("- {checkpoint}\n"));
}
body.push_str(&format!(
"\nPublish updates to `{journal}`. Mirror final scope into `{plan}` when it changes.\n"
));
fs::write(&path, body)?;
Ok(rel_to_root(&path, root))
}
fn print_task_brief(state: &MigrationState, task: &MigrationTask, root: &Path) {
println!("--- migrate::execute ---");
println!("Workspace: {}", state.workspace_dir_string(root));
println!("Plan: {}", rel_to_root(&state.plan_path(), root));
println!("Journal: {}", rel_to_root(&state.journal_path(), root));
println!();
println!("Task `{}` {}", task.id, task.title);
println!("{}", task.description);
if !task.depends_on.is_empty() {
println!("Depends on: {}", task.depends_on.join(", "));
}
if let Some(group) = &task.parallel_group {
println!("Parallel track: {group}");
}
if let Some(owner) = &task.owner_hint {
println!("Suggested owner: {owner}");
}
if !task.publish_to.is_empty() {
println!("Publish updates to: {}", task.publish_to.join(", "));
}
if !task.checkpoints.is_empty() {
println!("Checkpoints:");
for checkpoint in &task.checkpoints {
println!(" - {checkpoint}");
}
}
println!(
"Document findings in journal.md, reflect scope changes back into plan.md, and keep runbooks inside runs/."
);
}
fn package_dir(root: &Path) -> PathBuf {
root.join(STATE_DIR)
}
fn index_path(root: &Path) -> PathBuf {
package_dir(root).join(INDEX_FILE)
}
fn rel_to_root(path: &Path, root: &Path) -> String {
diff_paths(path, root)
.unwrap_or_else(|| path.to_path_buf())
.display()
.to_string()
}
fn write_pretty_json(path: &Path, value: &impl Serialize) -> Result<()> {
let text = serde_json::to_string_pretty(value)?;
fs::write(path, text)?;
Ok(())
}
#[derive(Debug, Serialize, Deserialize, Clone)]
struct MigrationIndexEntry {
slug: String,
summary: String,
workspace: String,
plan: String,
journal: String,
tasks_path: String,
pending_tasks: usize,
running_tasks: usize,
blocked_tasks: usize,
ready_parallel_tasks: Vec<String>,
status: IndexStatus,
updated_at: String,
updated_at_epoch: i64,
}
#[derive(Debug, Serialize, Deserialize, Clone, Copy)]
#[serde(rename_all = "snake_case")]
enum IndexStatus {
Planning,
Executing,
Complete,
}
#[derive(Debug, Serialize, Deserialize)]
struct MigrationIndex {
version: u32,
migrations: Vec<MigrationIndexEntry>,
}
impl Default for MigrationIndex {
fn default() -> Self {
Self {
version: INDEX_VERSION,
migrations: Vec::new(),
}
}
}
fn load_index(path: &Path) -> Result<MigrationIndex> {
if path.exists() {
let text = fs::read_to_string(path)?;
Ok(serde_json::from_str(&text)?)
} else {
Ok(MigrationIndex::default())
}
}
fn refresh_index(root: &Path, state: &MigrationState) -> Result<()> {
fs::create_dir_all(package_dir(root))?;
let mut index = load_index(&index_path(root))?;
let entry = state.to_index_entry(root);
index
.migrations
.retain(|existing| existing.slug != entry.slug || existing.workspace != entry.workspace);
index.migrations.push(entry);
write_pretty_json(&index_path(root), &index)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, ValueEnum)]
#[serde(rename_all = "snake_case")]
#[derive(Default)]
enum TaskStatus {
#[default]
Pending,
Running,
Blocked,
Done,
}
impl std::fmt::Display for TaskStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let label = match self {
TaskStatus::Pending => "pending",
TaskStatus::Running => "running",
TaskStatus::Blocked => "blocked",
TaskStatus::Done => "done",
};
write!(f, "{label}")
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct MigrationTask {
id: String,
title: String,
description: String,
#[serde(default)]
status: TaskStatus,
#[serde(default)]
depends_on: Vec<String>,
#[serde(default)]
parallel_group: Option<String>,
#[serde(default)]
owner_hint: Option<String>,
#[serde(default)]
publish_to: Vec<String>,
#[serde(default)]
checkpoints: Vec<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct MigrationStateFile {
version: u32,
summary: String,
slug: String,
plan_path: String,
journal_path: String,
tasks: Vec<MigrationTask>,
}
struct MigrationState {
file: MigrationStateFile,
workspace_dir: PathBuf,
}
impl MigrationState {
fn new(summary: String, workspace: &MigrationWorkspace, parallel: usize) -> Self {
let tasks = seed_tasks(&summary, parallel);
Self {
file: MigrationStateFile {
version: STATE_VERSION,
summary,
slug: workspace.dir_name.clone(),
plan_path: "plan.md".to_string(),
journal_path: "journal.md".to_string(),
tasks,
},
workspace_dir: workspace.dir_path.clone(),
}
}
fn load(workspace_dir: PathBuf) -> Result<Self> {
let data_path = workspace_dir.join(TASKS_FILE);
let text = fs::read_to_string(&data_path)
.with_context(|| format!("missing tasks file at {}", data_path.display()))?;
let file: MigrationStateFile = serde_json::from_str(&text)?;
Ok(Self {
file,
workspace_dir,
})
}
fn save(&self) -> Result<()> {
write_pretty_json(&self.workspace_dir.join(TASKS_FILE), &self.file)
}
fn workspace_dir(&self) -> &Path {
&self.workspace_dir
}
fn plan_path(&self) -> PathBuf {
self.workspace_dir.join(&self.file.plan_path)
}
fn journal_path(&self) -> PathBuf {
self.workspace_dir.join(&self.file.journal_path)
}
fn task(&self, id: &str) -> Option<&MigrationTask> {
self.file.tasks.iter().find(|task| task.id == id)
}
fn task_mut(&mut self, id: &str) -> Option<&mut MigrationTask> {
self.file.tasks.iter_mut().find(|task| task.id == id)
}
fn set_status(&mut self, id: &str, status: TaskStatus) -> Result<()> {
let task = self
.task_mut(id)
.with_context(|| format!("unknown task id `{id}`"))?;
task.status = status;
Ok(())
}
fn next_runnable_task_id(&self) -> Option<String> {
self.file
.tasks
.iter()
.find(|task| task.status == TaskStatus::Pending && self.dependencies_met(task))
.map(|task| task.id.clone())
}
fn dependencies_met(&self, task: &MigrationTask) -> bool {
task.depends_on.iter().all(|dep| {
self.file
.tasks
.iter()
.find(|t| &t.id == dep)
.map(|t| t.status == TaskStatus::Done)
.unwrap_or(false)
})
}
fn can_start(&self, id: &str) -> bool {
self.task(id)
.map(|task| self.dependencies_met(task))
.unwrap_or(false)
}
fn workspace_dir_string(&self, root: &Path) -> String {
rel_to_root(&self.workspace_dir, root)
}
fn ready_parallel_tasks(&self) -> Vec<String> {
self.file
.tasks
.iter()
.filter(|task| task.parallel_group.is_some())
.filter(|task| task.status == TaskStatus::Pending)
.filter(|task| self.dependencies_met(task))
.map(|task| task.id.clone())
.collect()
}
fn status_counts(&self) -> (usize, usize, usize, usize) {
let mut pending = 0;
let mut running = 0;
let mut blocked = 0;
let mut done = 0;
for task in &self.file.tasks {
match task.status {
TaskStatus::Pending => pending += 1,
TaskStatus::Running => running += 1,
TaskStatus::Blocked => blocked += 1,
TaskStatus::Done => done += 1,
}
}
(pending, running, blocked, done)
}
fn to_index_entry(&self, root: &Path) -> MigrationIndexEntry {
let (pending, running, blocked, _done) = self.status_counts();
let ready_parallel_tasks = self.ready_parallel_tasks();
let status = if pending == 0 && running == 0 && blocked == 0 {
IndexStatus::Complete
} else if running > 0 {
IndexStatus::Executing
} else {
IndexStatus::Planning
};
let now = Utc::now();
MigrationIndexEntry {
slug: self.file.slug.clone(),
summary: self.file.summary.clone(),
workspace: self.workspace_dir_string(root),
plan: rel_to_root(&self.plan_path(), root),
journal: rel_to_root(&self.journal_path(), root),
tasks_path: rel_to_root(&self.workspace_dir.join(TASKS_FILE), root),
pending_tasks: pending,
running_tasks: running,
blocked_tasks: blocked,
ready_parallel_tasks,
status,
updated_at: now.to_rfc3339(),
updated_at_epoch: now.timestamp(),
}
}
}
fn seed_tasks(summary: &str, parallel: usize) -> Vec<MigrationTask> {
let mut tasks = Vec::new();
let plan_targets = vec!["plan.md".to_string(), "journal.md".to_string()];
tasks.push(MigrationTask {
id: "plan-baseline".to_string(),
title: "Map current + target states".to_string(),
description: format!(
"Capture why `{summary}` is needed, current system contracts, and the desired end state in `plan.md`."
),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Document repositories, services, and owners".to_string(),
"List non-negotiable constraints".to_string(),
],
..Default::default()
});
tasks.push(MigrationTask {
id: "plan-guardrails".to_string(),
title: "Design guardrails + approvals".to_string(),
description: "Spell out kill-switches, approvals, and telemetry gating.".to_string(),
depends_on: vec!["plan-baseline".to_string()],
publish_to: plan_targets.clone(),
checkpoints: vec![
"Define approval owners".to_string(),
"List telemetry + alerting hooks".to_string(),
],
..Default::default()
});
tasks.push(MigrationTask {
id: "plan-blueprint".to_string(),
title: "Lock incremental rollout plan".to_string(),
description: "Lay out the numbered steps and decision records for the migration."
.to_string(),
depends_on: vec!["plan-guardrails".to_string()],
publish_to: plan_targets.clone(),
checkpoints: vec![
"Identify sequencing + dependencies".to_string(),
"Assign owners to each increment".to_string(),
],
..Default::default()
});
let mut sources: Vec<String> = (1..=parallel.max(1))
.map(|i| format!("workstream #{i}"))
.collect();
if sources.is_empty() {
sources.push("workstream #1".to_string());
}
for (idx, source) in sources.iter().enumerate() {
tasks.push(MigrationTask {
id: format!("parallel-scout-{}", idx + 1),
title: format!("Deep-dive: {source}"),
description: format!(
"Inventory blockers, data contracts, and automation opportunities for `{source}`. Feed findings into journal.md and update plan.md if scope shifts."
),
depends_on: vec!["plan-blueprint".to_string()],
parallel_group: Some("exploration".to_string()),
owner_hint: Some("domain expert".to_string()),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Publish progress + artifacts to journal.md".to_string(),
"Flag shared learnings for other workstreams".to_string(),
],
..Default::default()
});
}
tasks.push(MigrationTask {
id: "parallel-telemetry".to_string(),
title: "Build shared telemetry + rehearsal harness".to_string(),
description:
"Codify validation scripts, load tests, and dashboards each workstream will reuse."
.to_string(),
depends_on: vec!["plan-blueprint".to_string()],
parallel_group: Some("stabilization".to_string()),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Link dashboards in journal.md".to_string(),
"Tag required signals per task".to_string(),
],
..Default::default()
});
tasks.push(MigrationTask {
id: "parallel-backfill".to_string(),
title: "Design data backfill + rollback story".to_string(),
description: "Document backfill tooling, rehearsal cadence, and rollback triggers."
.to_string(),
depends_on: vec!["plan-blueprint".to_string()],
parallel_group: Some("stabilization".to_string()),
publish_to: plan_targets.clone(),
checkpoints: vec![
"Note dry-run schedule in journal.md".to_string(),
"List reversibility safeguards".to_string(),
],
..Default::default()
});
let mut cutover_dependencies = vec![
"plan-baseline".to_string(),
"plan-guardrails".to_string(),
"plan-blueprint".to_string(),
"parallel-telemetry".to_string(),
"parallel-backfill".to_string(),
];
cutover_dependencies.extend(
sources
.iter()
.enumerate()
.map(|(idx, _)| format!("parallel-scout-{}", idx + 1)),
);
tasks.push(MigrationTask {
id: "plan-cutover".to_string(),
title: "Execute rollout + capture learnings".to_string(),
description: "Drive the migration, capture deviations, and publish the final hand-off."
.to_string(),
depends_on: cutover_dependencies,
publish_to: plan_targets,
checkpoints: vec![
"Attach final verification evidence".to_string(),
"Document kill-switch + rollback state".to_string(),
],
..Default::default()
});
tasks
}
impl Default for MigrationTask {
fn default() -> Self {
Self {
id: String::new(),
title: String::new(),
description: String::new(),
status: TaskStatus::Pending,
depends_on: Vec::new(),
parallel_group: None,
owner_hint: None,
publish_to: Vec::new(),
checkpoints: Vec::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn next_task_unlocked_after_dependencies_complete() {
let tmp = TempDir::new().unwrap();
let workspace = MigrationWorkspace {
dir_path: tmp.path().to_path_buf(),
dir_name: "migration_demo".to_string(),
plan_path: tmp.path().join("plan.md"),
journal_path: tmp.path().join("journal.md"),
};
fs::write(&workspace.plan_path, "plan").unwrap();
fs::write(&workspace.journal_path, "journal").unwrap();
let mut state = MigrationState::new("Demo".to_string(), &workspace, 1);
assert_eq!(
state.next_runnable_task_id().as_deref(),
Some("plan-baseline")
);
state.set_status("plan-baseline", TaskStatus::Done).unwrap();
state
.set_status("plan-guardrails", TaskStatus::Done)
.unwrap();
assert_eq!(
state.next_runnable_task_id().as_deref(),
Some("plan-blueprint")
);
}
#[test]
fn ready_parallel_tasks_wait_for_blueprint() {
let tmp = TempDir::new().unwrap();
let workspace = MigrationWorkspace {
dir_path: tmp.path().to_path_buf(),
dir_name: "migration_demo".to_string(),
plan_path: tmp.path().join("plan.md"),
journal_path: tmp.path().join("journal.md"),
};
fs::write(&workspace.plan_path, "plan").unwrap();
fs::write(&workspace.journal_path, "journal").unwrap();
let mut state = MigrationState::new("Demo".to_string(), &workspace, 2);
assert!(state.ready_parallel_tasks().is_empty());
state.set_status("plan-baseline", TaskStatus::Done).unwrap();
state
.set_status("plan-guardrails", TaskStatus::Done)
.unwrap();
state
.set_status("plan-blueprint", TaskStatus::Done)
.unwrap();
let ready = state.ready_parallel_tasks();
let ready_set: std::collections::HashSet<_> = ready.into_iter().collect();
let expected = std::collections::HashSet::from([
"parallel-scout-1".to_string(),
"parallel-scout-2".to_string(),
"parallel-telemetry".to_string(),
"parallel-backfill".to_string(),
]);
assert_eq!(ready_set, expected);
}
}

View File

@@ -34,7 +34,7 @@ const PRESETS: &[ModelPreset] = &[
id: "gpt-5-codex",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for codex.",
description: "Optimized for coding tasks with many tools.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
@@ -52,24 +52,6 @@ const PRESETS: &[ModelPreset] = &[
],
is_default: true,
},
ModelPreset {
id: "gpt-5-codex-mini",
model: "gpt-5-codex-mini",
display_name: "gpt-5-codex-mini",
description: "Optimized for codex. Cheaper, faster, but less capable.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
},
ModelPreset {
id: "gpt-5",
model: "gpt-5",
@@ -98,13 +80,8 @@ const PRESETS: &[ModelPreset] = &[
},
];
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
let allow_codex_mini = matches!(auth_mode, Some(AuthMode::ChatGPT));
PRESETS
.iter()
.filter(|preset| allow_codex_mini || preset.id != "gpt-5-codex-mini")
.copied()
.collect()
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
}
#[cfg(test)]

View File

@@ -680,33 +680,6 @@ fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> {
headers.get(name)?.to_str().ok()
}
async fn emit_completed(
tx_event: &mpsc::Sender<Result<ResponseEvent>>,
otel_event_manager: &OtelEventManager,
completed: ResponseCompleted,
) {
if let Some(token_usage) = &completed.usage {
otel_event_manager.sse_event_completed(
token_usage.input_tokens,
token_usage.output_tokens,
token_usage
.input_tokens_details
.as_ref()
.map(|d| d.cached_tokens),
token_usage
.output_tokens_details
.as_ref()
.map(|d| d.reasoning_tokens),
token_usage.total_tokens,
);
}
let event = ResponseEvent::Completed {
response_id: completed.id.clone(),
token_usage: completed.usage.map(Into::into),
};
let _ = tx_event.send(Ok(event)).await;
}
async fn process_sse<S>(
stream: S,
tx_event: mpsc::Sender<Result<ResponseEvent>>,
@@ -719,7 +692,7 @@ async fn process_sse<S>(
// If the stream stays completely silent for an extended period treat it as disconnected.
// The response id returned from the "complete" message.
let response_completed: Option<ResponseCompleted> = None;
let mut response_completed: Option<ResponseCompleted> = None;
let mut response_error: Option<CodexErr> = None;
loop {
@@ -738,8 +711,30 @@ async fn process_sse<S>(
}
Ok(None) => {
match response_completed {
Some(completed) => {
emit_completed(&tx_event, &otel_event_manager, completed).await
Some(ResponseCompleted {
id: response_id,
usage,
}) => {
if let Some(token_usage) = &usage {
otel_event_manager.sse_event_completed(
token_usage.input_tokens,
token_usage.output_tokens,
token_usage
.input_tokens_details
.as_ref()
.map(|d| d.cached_tokens),
token_usage
.output_tokens_details
.as_ref()
.map(|d| d.reasoning_tokens),
token_usage.total_tokens,
);
}
let event = ResponseEvent::Completed {
response_id,
token_usage: usage.map(Into::into),
};
let _ = tx_event.send(Ok(event)).await;
}
None => {
let error = response_error.unwrap_or(CodexErr::Stream(
@@ -869,8 +864,7 @@ async fn process_sse<S>(
if let Some(resp_val) = event.response {
match serde_json::from_value::<ResponseCompleted>(resp_val) {
Ok(r) => {
emit_completed(&tx_event, &otel_event_manager, r).await;
return;
response_completed = Some(r);
}
Err(e) => {
let error = format!("failed to parse ResponseCompleted: {e}");

View File

@@ -0,0 +1,16 @@
# Migration journal {{MIGRATION_SUMMARY}}
> Workspace: `{{WORKSPACE_NAME}}`
> Created: {{CREATED_AT}}
Use this log for async updates, agent hand-offs, and to publish what was learned during each workstream. Keep entries concise and focused on signals other collaborators need.
## Logging guidance
- Start each entry with a timestamp and author/agent/workstream name.
- Capture what changed, how it was validated, links to diffs/tests, and any open questions.
- Highlight blockers, decisions needed, or knowledge that other agents should adopt.
- Update the plan (`plan.md`) when scope changes; use this journal for progress + lessons.
| Timestamp | Agent / Workstream | Update / Learnings | Blockers & Risks | Next action / owner |
| --------- | ------------------ | ------------------ | ---------------- | ------------------- |
| | | | | |

View File

@@ -0,0 +1,35 @@
# Migration plan {{MIGRATION_SUMMARY}}
> Workspace: `{{WORKSPACE_NAME}}`
> Generated: {{CREATED_AT}}
Use this document as the single source of truth for the migration effort. Keep it updated so any engineer (or agent) can jump in mid-flight.
## 1. Context & stakes
- Current state snapshot
- Target end state and deadline/launch windows
- Guardrails, SLAs, compliance/regulatory constraints
## 2. Incremental plan (numbered)
1. `[Step name]` — Purpose, scope, primary owner/skillset, upstream/downstream dependencies, validation & rollback signals.
2. `…`
Each step must explain:
- Preconditions & artifacts required before starting
- Specific code/data/infrastructure changes
- Telemetry, tests, or dry-runs that prove success
## 3. Parallel workstreams
| Workstream | Objective | Inputs & dependencies | Ownership / skills | Progress & telemetry hooks |
| ---------- | --------- | --------------------- | ------------------ | ------------------------- |
| _Fill in during planning_ | | | | |
## 4. Data + rollout considerations
- Data migration / backfill plan
- Environment readiness, feature flags, or config toggles
- Rollout plan (phases, smoke tests, canaries) and explicit rollback/kill-switch criteria
## 5. Risks, decisions, and follow-ups
- Top risks with mitigation owners
- Open questions / decisions with DRI and due date
- Handoff expectations (reference `journal.md` for ongoing updates)

View File

@@ -0,0 +1,13 @@
You are resuming the active migration. Stay in the repository root and coordinate through the CLI tool so every agent shares the same state.
1. Run `migrate-cli execute`.
- It selects the next runnable task, marks it `running`, prints a detailed brief, and drops a runbook under `runs/`.
- Note the workspace path plus the plan/journal locations from the CLI output.
2. Follow the brief:
- Read any referenced files, services, or dashboards.
- Update `plan.md` when scope changes and log progress plus artifacts in `journal.md`.
- Keep the checkpoints in the runbook so other agents can audit what happened.
3. When you finish, record the result with `migrate-cli execute --task-id <TASK_ID> --status done --note "short summary"`, then run `migrate-cli execute` again to fetch the next task.
4. If you discover blockers, use `--status blocked --note "context"` so the index reflects reality.
Always make the artifacts inside the migration workspace the source of truth: `plan.md` for decisions and sequencing, `journal.md` for hand-offs, `tasks.json`/`runs/` for orchestration metadata.

View File

@@ -0,0 +1,21 @@
You are the migration showrunner for "{{MIGRATION_SUMMARY}}". Spin up the shared tooling and produce a plan that other agents can execute safely.
1. From the repo root run `migrate-cli plan "{{MIGRATION_SUMMARY}}"`.
- It creates `migrations/migration_<slug>/` with `plan.md`, `journal.md`, `tasks.json`, and a `runs/` folder.
- Inspect the CLI output to learn the workspace path.
2. Study the codebase, dependencies, deployment gates, and data contracts. Pull in any diagrams or docs already in the repo.
3. Populate `plan.md` with:
- An executive overview describing the current vs. target state, risks, and unknowns.
- A numbered incremental plan (1., 2., 3., …) that lists owners/skillsets, dependencies, validation steps, and rollback/kill-switch guidance.
- A section detailing how multiple agents can work in parallel, where they should publish progress, and how learnings flow between streams.
- Guardrails for telemetry, backfills, dry runs, and approvals.
4. Keep `journal.md` as the live log for progress, blockers, data snapshots, and hand-offs.
5. When the plan is solid, remind collaborators to run `/continue-migration` (which triggers `migrate-cli execute`) whenever they are ready for the next task brief.
General guidance:
- Call out missing information and request the files/owners you need.
- Prefer automation, reproducible scripts, and links to existing tooling over prose.
- Explicitly document how agents publish updates (journal.md) versus canonical decisions (plan.md).
- Organize tasks so multiple agents can operate concurrently while sharing artifacts.
After sharing the plan in chat, mirror the structure into `plan.md` using `apply_patch` or an editor, and seed `journal.md` with the first entry that summarizes current status and next checkpoints.

View File

@@ -450,6 +450,12 @@ impl App {
AppEvent::OpenReviewCustomPrompt => {
self.chat_widget.show_review_custom_prompt();
}
AppEvent::StartMigration { summary } => {
self.chat_widget.start_migration(summary);
}
AppEvent::ContinueMigration => {
self.chat_widget.continue_migration();
}
AppEvent::FullScreenApprovalRequest(request) => match request {
ApprovalRequest::ApplyPatch { cwd, changes, .. } => {
let _ = tui.enter_alt_screen();

View File

@@ -102,6 +102,14 @@ pub(crate) enum AppEvent {
/// Open the custom prompt option from the review popup.
OpenReviewCustomPrompt,
/// Kick off the `/migrate` workflow after the user names the migration.
StartMigration {
summary: String,
},
/// Prompt Codex to resume a migration via migrate-cli execute.
ContinueMigration,
/// Open the approval popup.
FullScreenApprovalRequest(ApprovalRequest),

View File

@@ -86,6 +86,8 @@ use crate::history_cell::AgentMessageCell;
use crate::history_cell::HistoryCell;
use crate::history_cell::McpToolCallCell;
use crate::markdown::append_markdown;
use crate::migration::build_continue_migration_prompt;
use crate::migration::build_migration_prompt;
#[cfg(target_os = "windows")]
use crate::onboarding::WSL_INSTRUCTIONS;
use crate::render::Insets;
@@ -132,7 +134,7 @@ struct RunningCommand {
}
const RATE_LIMIT_WARNING_THRESHOLDS: [f64; 3] = [75.0, 90.0, 95.0];
const NUDGE_MODEL_SLUG: &str = "gpt-5-codex-mini";
const NUDGE_MODEL_SLUG: &str = "gpt-5-codex";
const RATE_LIMIT_SWITCH_PROMPT_THRESHOLD: f64 = 90.0;
#[derive(Default)]
@@ -1234,6 +1236,12 @@ impl ChatWidget {
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
self.submit_user_message(INIT_PROMPT.to_string().into());
}
SlashCommand::Migrate => {
self.open_migrate_prompt();
}
SlashCommand::ContinueMigration => {
self.app_event_tx.send(AppEvent::ContinueMigration);
}
SlashCommand::Compact => {
self.clear_token_usage();
self.app_event_tx.send(AppEvent::CodexOp(Op::Compact));
@@ -1773,7 +1781,9 @@ impl ChatWidget {
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Approaching rate limits".to_string()),
subtitle: Some(format!("Switch to {display_name} for lower credit usage?")),
subtitle: Some(format!(
"You've used over 90% of your limit. Switch to {display_name} for lower credit usage?"
)),
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
@@ -1796,7 +1806,6 @@ impl ChatWidget {
};
let is_current = preset.model == current_model;
let preset_for_action = preset;
let single_supported_effort = preset_for_action.supported_reasoning_efforts.len() == 1;
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::OpenReasoningPopup {
model: preset_for_action,
@@ -1807,7 +1816,7 @@ impl ChatWidget {
description,
is_current,
actions,
dismiss_on_select: single_supported_effort,
dismiss_on_select: false,
..Default::default()
});
}
@@ -1846,15 +1855,6 @@ impl ChatWidget {
});
}
if choices.len() == 1 {
if let Some(effort) = choices.first().and_then(|c| c.stored) {
self.apply_model_and_effort(preset.model.to_string(), Some(effort));
} else {
self.apply_model_and_effort(preset.model.to_string(), None);
}
return;
}
let default_choice: Option<ReasoningEffortConfig> = choices
.iter()
.any(|choice| choice.stored == Some(default_effort))
@@ -1893,7 +1893,7 @@ impl ChatWidget {
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
let show_warning =
preset.model.starts_with("gpt-5-codex") && effort == ReasoningEffortConfig::High;
preset.model == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
let selected_description = show_warning.then(|| {
description
.as_ref()
@@ -1950,32 +1950,6 @@ impl ChatWidget {
});
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(model.clone()),
effort: Some(effort),
summary: None,
}));
self.app_event_tx.send(AppEvent::UpdateModel(model.clone()));
self.app_event_tx
.send(AppEvent::UpdateReasoningEffort(effort));
self.app_event_tx.send(AppEvent::PersistModelSelection {
model: model.clone(),
effort,
});
tracing::info!(
"Selected model: {}, Selected effort: {}",
model,
effort
.map(|e| e.to_string())
.unwrap_or_else(|| "default".to_string())
);
}
/// Open a popup to choose the approvals mode (ask for approval policy + sandbox policy).
pub(crate) fn open_approvals_popup(&mut self) {
let current_approval = self.config.approval_policy;
@@ -2456,6 +2430,50 @@ impl ChatWidget {
self.bottom_pane.show_view(Box::new(view));
}
pub(crate) fn start_migration(&mut self, summary: String) {
let summary = summary.trim();
if summary.is_empty() {
return;
}
let prompt = build_migration_prompt(summary);
self.add_info_message(
format!("Prompting Codex to run `migrate-cli plan \"{summary}\"`."),
Some(
"The tool scaffolds migrations/migration_<slug> with plan.md, journal.md, tasks.json, and runs/."
.to_string(),
),
);
self.submit_user_message(prompt.into());
}
pub(crate) fn continue_migration(&mut self) {
self.add_info_message(
"Prompting Codex to resume the active migration via `migrate-cli execute`.".to_string(),
Some(
"It will print the next task brief, mark it running, and remind you to update plan.md + journal.md."
.to_string(),
),
);
self.submit_user_message(build_continue_migration_prompt().into());
}
fn open_migrate_prompt(&mut self) {
let tx = self.app_event_tx.clone();
let view = CustomPromptView::new(
"Describe the migration".to_string(),
"Example: Phase 2 move billing to Postgres".to_string(),
Some("We'll ask Codex to run `migrate-cli plan` once you press Enter.".to_string()),
Box::new(move |prompt: String| {
let trimmed = prompt.trim().to_string();
if trimmed.is_empty() {
return;
}
tx.send(AppEvent::StartMigration { summary: trimmed });
}),
);
self.bottom_pane.show_view(Box::new(view));
}
pub(crate) fn token_usage(&self) -> TokenUsage {
self.token_info
.as_ref()

View File

@@ -5,7 +5,7 @@ expression: popup
Select Model and Effort
Switch the model for this and future Codex CLI sessions
1. gpt-5-codex (current) Optimized for codex.
1. gpt-5-codex (current) Optimized for coding tasks with many tools.
2. gpt-5 Broad world knowledge with strong general
reasoning.

View File

@@ -1,12 +1,12 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 474
expression: popup
---
Approaching rate limits
Switch to gpt-5-codex-mini for lower credit usage?
You've used over 90% of your limit. Switch to gpt-5-codex for lower credit u
1. Switch to gpt-5-codex-mini Optimized for codex. Cheaper, faster, but
less capable.
1. Switch to gpt-5-codex Optimized for coding tasks with many tools.
2. Keep current model
Press enter to confirm or esc to go back

View File

@@ -5,8 +5,6 @@ use crate::test_backend::VT100Backend;
use crate::tui::FrameRequester;
use assert_matches::assert_matches;
use codex_common::approval_presets::builtin_approval_presets;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::config::Config;
@@ -411,8 +409,6 @@ fn test_rate_limit_warnings_monthly() {
#[test]
fn rate_limit_switch_prompt_skips_when_on_lower_cost_model() {
let (mut chat, _, _) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.config.model = NUDGE_MODEL_SLUG.to_string();
chat.on_rate_limit_snapshot(Some(snapshot(95.0)));
@@ -425,10 +421,8 @@ fn rate_limit_switch_prompt_skips_when_on_lower_cost_model() {
#[test]
fn rate_limit_switch_prompt_shows_once_per_session() {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.auth_manager = AuthManager::from_auth_for_testing(auth);
chat.on_rate_limit_snapshot(Some(snapshot(90.0)));
assert!(
@@ -450,10 +444,8 @@ fn rate_limit_switch_prompt_shows_once_per_session() {
#[test]
fn rate_limit_switch_prompt_defers_until_task_complete() {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.auth_manager = AuthManager::from_auth_for_testing(auth);
chat.bottom_pane.set_task_running(true);
chat.on_rate_limit_snapshot(Some(snapshot(90.0)));
@@ -473,8 +465,6 @@ fn rate_limit_switch_prompt_defers_until_task_complete() {
#[test]
fn rate_limit_switch_prompt_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.config.model = "gpt-5".to_string();
chat.on_rate_limit_snapshot(Some(snapshot(92.0)));
@@ -1501,44 +1491,6 @@ fn model_reasoning_selection_popup_snapshot() {
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn single_reasoning_option_skips_selection() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
static SINGLE_EFFORT: [ReasoningEffortPreset; 1] = [ReasoningEffortPreset {
effort: ReasoningEffortConfig::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
}];
let preset = ModelPreset {
id: "model-with-single-reasoning",
model: "model-with-single-reasoning",
display_name: "model-with-single-reasoning",
description: "",
default_reasoning_effort: ReasoningEffortConfig::High,
supported_reasoning_efforts: &SINGLE_EFFORT,
is_default: false,
};
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains("Select Reasoning Level"),
"expected reasoning selection popup to be skipped"
);
let mut events = Vec::new();
while let Ok(ev) = rx.try_recv() {
events.push(ev);
}
assert!(
events
.iter()
.any(|ev| matches!(ev, AppEvent::UpdateReasoningEffort(Some(effort)) if *effort == ReasoningEffortConfig::High)),
"expected reasoning effort to be applied automatically; events: {events:?}"
);
}
#[test]
fn feedback_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();

View File

@@ -54,6 +54,7 @@ pub mod live_wrap;
mod markdown;
mod markdown_render;
mod markdown_stream;
pub mod migration;
pub mod onboarding;
mod pager_overlay;
pub mod public_widgets;

View File

@@ -0,0 +1,123 @@
use chrono::Local;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
pub const MIGRATION_PROMPT_TEMPLATE: &str = include_str!("../prompt_for_migrate_command.md");
pub const CONTINUE_MIGRATION_PROMPT_TEMPLATE: &str =
include_str!("../prompt_for_continue_migration_command.md");
const MIGRATION_PLAN_TEMPLATE: &str = include_str!("../migration_plan_template.md");
const MIGRATION_JOURNAL_TEMPLATE: &str = include_str!("../migration_journal_template.md");
#[derive(Debug, Clone)]
pub struct MigrationWorkspace {
pub dir_path: PathBuf,
pub dir_name: String,
pub plan_path: PathBuf,
pub journal_path: PathBuf,
}
pub fn create_migration_workspace(
base_dir: &Path,
summary: &str,
) -> Result<MigrationWorkspace, std::io::Error> {
fs::create_dir_all(base_dir)?;
let slug = sanitize_migration_slug(summary);
let base_name = format!("migration_{slug}");
let (dir_path, dir_name) = next_available_migration_dir(base_dir, &base_name);
fs::create_dir_all(&dir_path)?;
let created_at = Local::now().format("%Y-%m-%d %H:%M %Z").to_string();
let plan_path = dir_path.join("plan.md");
let journal_path = dir_path.join("journal.md");
let replacements = [
("{{MIGRATION_SUMMARY}}", summary),
("{{WORKSPACE_NAME}}", dir_name.as_str()),
("{{CREATED_AT}}", created_at.as_str()),
];
let plan_contents = fill_template(MIGRATION_PLAN_TEMPLATE, &replacements);
let journal_contents = fill_template(MIGRATION_JOURNAL_TEMPLATE, &replacements);
fs::write(&plan_path, plan_contents)?;
fs::write(&journal_path, journal_contents)?;
Ok(MigrationWorkspace {
dir_path,
dir_name,
plan_path,
journal_path,
})
}
pub fn build_migration_prompt(summary: &str) -> String {
fill_template(
MIGRATION_PROMPT_TEMPLATE,
&[("{{MIGRATION_SUMMARY}}", summary)],
)
}
pub fn build_continue_migration_prompt() -> String {
CONTINUE_MIGRATION_PROMPT_TEMPLATE.to_string()
}
pub fn sanitize_migration_slug(summary: &str) -> String {
let mut slug = String::new();
let mut last_was_dash = true;
for ch in summary.trim().to_lowercase().chars() {
if ch.is_ascii_alphanumeric() {
slug.push(ch);
last_was_dash = false;
} else if !last_was_dash {
slug.push('-');
last_was_dash = true;
}
}
let mut trimmed = slug.trim_matches('-').to_string();
if trimmed.len() > 48 {
trimmed = trimmed
.chars()
.take(48)
.collect::<String>()
.trim_matches('-')
.to_string();
}
if trimmed.is_empty() {
return Local::now().format("plan-%Y%m%d-%H%M%S").to_string();
}
trimmed
}
fn next_available_migration_dir(base_dir: &Path, base_name: &str) -> (PathBuf, String) {
let mut candidate_name = base_name.to_string();
let mut candidate_path = base_dir.join(&candidate_name);
let mut suffix = 2;
while candidate_path.exists() {
candidate_name = format!("{base_name}_{suffix:02}");
candidate_path = base_dir.join(&candidate_name);
suffix += 1;
}
(candidate_path, candidate_name)
}
fn fill_template(template: &str, replacements: &[(&str, &str)]) -> String {
let mut filled = template.to_string();
for (needle, value) in replacements {
filled = filled.replace(needle, value);
}
filled
}
#[cfg(test)]
mod tests {
use super::sanitize_migration_slug;
#[test]
fn slug_sanitizes_whitespace_and_length() {
let slug = sanitize_migration_slug(" Launch 🚀 Phase #2 migration :: Big Refactor ");
assert_eq!(slug, "launch-phase-2-migration-big-refactor");
}
#[test]
fn slug_falls_back_to_timestamp() {
let slug = sanitize_migration_slug(" ");
assert!(slug.starts_with("plan-"));
assert!(slug.len() > 10);
}
}

View File

@@ -17,6 +17,8 @@ pub enum SlashCommand {
Review,
New,
Init,
Migrate,
ContinueMigration,
Compact,
Undo,
Diff,
@@ -39,6 +41,8 @@ impl SlashCommand {
SlashCommand::New => "start a new chat during a conversation",
SlashCommand::Init => "create an AGENTS.md file with instructions for Codex",
SlashCommand::Compact => "summarize conversation to prevent hitting the context limit",
SlashCommand::Migrate => "ask Codex to run migrate-cli plan and build the workspace",
SlashCommand::ContinueMigration => "resume the migration with migrate-cli execute",
SlashCommand::Review => "review my current changes and find issues",
SlashCommand::Undo => "ask Codex to undo a turn",
SlashCommand::Quit | SlashCommand::Exit => "exit Codex",
@@ -65,6 +69,8 @@ impl SlashCommand {
match self {
SlashCommand::New
| SlashCommand::Init
| SlashCommand::Migrate
| SlashCommand::ContinueMigration
| SlashCommand::Compact
| SlashCommand::Undo
| SlashCommand::Model

View File

@@ -12,7 +12,7 @@ Because Codex is written in Rust, it honors the `RUST_LOG` environment variable
The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info,codex_rmcp_client=info` and log messages are written to `~/.codex/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written:
```bash
```
tail -F ~/.codex/log/codex-tui.log
```
@@ -67,7 +67,7 @@ Use the MCP inspector and `codex mcp-server` to build a simple tic-tac-toe game
**approval-policy:** never
**prompt:** Implement a simple tic-tac-toe game with HTML, JavaScript, and CSS. Write the game in a single file called index.html.
**prompt:** Implement a simple tic-tac-toe game with HTML, Javascript, and CSS. Write the game in a single file called index.html.
**sandbox:** workspace-write

44
docs/migrations.md Normal file
View File

@@ -0,0 +1,44 @@
# Codex migrations
Codex ships a purpose-built `migrate-cli` binary plus slash commands so every migration follows the same playbook. The CLI manages workspaces under `migrations/migration_<slug>/`, keeps `.codex/migrate/index.json` updated, and prints detailed task briefs that Codex can execute.
## CLI quickstart
> Run the binary directly (`migrate-cli plan ...`) or via Cargo while developing (`cargo run -p codex-cli --bin migrate-cli -- plan ...`).
### `migrate-cli plan "<description>"`
* Creates `migrations/migration_<slug>/` with:
* `plan.md` canonical blueprint.
* `journal.md` running log of progress, hand-offs, and blockers.
* `tasks.json` orchestration metadata and dependencies.
* `runs/` runbooks generated per task when execution starts.
* Seeds a dependency-aware task graph so you can parallelize safely.
* Updates `.codex/migrate/index.json` so dashboards (or other agents) discover the workspace.
Use this command whenever you kick off a new initiative. After it runs, open the repo in Codex and use `/migrate` so the agent runs the same command and fills out `plan.md`/`journal.md` automatically.
### `migrate-cli execute [TASK_ID] [--status <state>] [--note "..."]`
* With no arguments it picks the next runnable task, marks it `running`, prints a task brief (workspace path, plan/journal locations, checkpoints), and drops a runbook under `runs/`.
* Use `--task-id <id> --status done --note "summary"` when you finish so the CLI records the journal entry and advances the graph.
* Use `--status blocked` to flag issues, or pass `--workspace <path>` if you are not working on the most recent migration.
Every invocation refreshes `.codex/migrate/index.json`, so team members and tools always see accurate status.
## Slash commands inside Codex
| Command | Purpose |
| --- | --- |
| `/migrate` | Ask Codex to run `migrate-cli plan` with your description, gather context, and populate `plan.md`/`journal.md`. |
| `/continue-migration` | Ask Codex to run `migrate-cli execute`, accept the next task brief, and push that task forward. |
Because the CLI writes the real artifacts, the slash commands simply queue up the right instructions so the agent runs the tool for you.
## Recommended workflow to share with your team
1. **Plan** Open the repo in Codex and run `/migrate`. Codex will run `migrate-cli plan "<description>"`, scaffold the workspace, and fill in the executive overview plus incremental plan inside `plan.md` and `journal.md`.
2. **Execute** Whenever you want the next piece of work, run `/continue-migration`. Codex runs `migrate-cli execute`, receives the task brief, and uses it (plus repo context) to do the work. When done it should mark the task complete with `migrate-cli execute --task-id <ID> --status done --note "summary"`.
3. **Repeat** Continue using `/continue-migration` to keep the task graph flowing. `tasks.json` and `.codex/migrate/index.json` stay up to date automatically, and `runs/` accumulates runbooks for auditability.
Since everything lives in the repo, you can commit `plan.md`, `journal.md`, `tasks.json`, and `runs/` so asynchronous contributors (human or agent) always have the latest state.

View File

@@ -17,6 +17,8 @@ Control Codexs behavior during an interactive session with slash commands.
| `/review` | review my current changes and find issues |
| `/new` | start a new chat during a conversation |
| `/init` | create an AGENTS.md file with instructions for Codex |
| `/migrate` | ask Codex to run `migrate-cli plan` and populate the migration workspace |
| `/continue-migration` | ask Codex to run `migrate-cli execute` and work the next task |
| `/compact` | summarize conversation to prevent hitting the context limit |
| `/undo` | ask Codex to undo a turn |
| `/diff` | show git diff (including untracked files) |