diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index bce2e48c97..4011328062 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -154,6 +154,11 @@ client_request_definitions! { params: v2::ModelListParams, response: v2::ModelListResponse, }, + /// EXPERIMENTAL - list collaboration mode presets. + CollaborationModeList => "collaborationMode/list" { + params: v2::CollaborationModeListParams, + response: v2::CollaborationModeListResponse, + }, McpServerOauthLogin => "mcpServer/oauth/login" { params: v2::McpServerOauthLoginParams, @@ -878,4 +883,21 @@ mod tests { ); Ok(()) } + + #[test] + fn serialize_list_collaboration_modes() -> Result<()> { + let request = ClientRequest::CollaborationModeList { + request_id: RequestId::Integer(7), + params: v2::CollaborationModeListParams::default(), + }; + assert_eq!( + json!({ + "method": "collaborationMode/list", + "id": 7, + "params": {} + }), + serde_json::to_value(&request)?, + ); + Ok(()) + } } diff --git a/codex-rs/app-server-protocol/src/protocol/v2.rs b/codex-rs/app-server-protocol/src/protocol/v2.rs index c0caa7ea32..db51c278f0 100644 --- a/codex-rs/app-server-protocol/src/protocol/v2.rs +++ b/codex-rs/app-server-protocol/src/protocol/v2.rs @@ -917,6 +917,20 @@ pub struct ModelListResponse { pub next_cursor: Option, } +/// EXPERIMENTAL - list collaboration mode presets. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct CollaborationModeListParams {} + +/// EXPERIMENTAL - collaboration mode presets response. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct CollaborationModeListResponse { + pub data: Vec, +} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index 891632796b..4c376ce6a4 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -86,6 +86,7 @@ Example (from OpenAI's official VSCode extension): - `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review. - `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation). - `model/list` — list available models (with reasoning effort options). +- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination). - `skills/list` — list skills for one or more `cwd` values (optional `forceReload`). - `skills/config/write` — write user-level skill config by path. - `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes. diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 551015f94a..1aeb623987 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -23,6 +23,8 @@ use codex_app_server_protocol::CancelLoginAccountResponse; use codex_app_server_protocol::CancelLoginAccountStatus; use codex_app_server_protocol::CancelLoginChatGptResponse; use codex_app_server_protocol::ClientRequest; +use codex_app_server_protocol::CollaborationModeListParams; +use codex_app_server_protocol::CollaborationModeListResponse; use codex_app_server_protocol::CommandExecParams; use codex_app_server_protocol::ConversationGitInfo; use codex_app_server_protocol::ConversationSummary; @@ -436,6 +438,15 @@ impl CodexMessageProcessor { Self::list_models(outgoing, thread_manager, config, request_id, params).await; }); } + ClientRequest::CollaborationModeList { request_id, params } => { + let outgoing = self.outgoing.clone(); + let thread_manager = self.thread_manager.clone(); + + tokio::spawn(async move { + Self::list_collaboration_modes(outgoing, thread_manager, request_id, params) + .await; + }); + } ClientRequest::McpServerOauthLogin { request_id, params } => { self.mcp_server_oauth_login(request_id, params).await; } @@ -2371,6 +2382,18 @@ impl CodexMessageProcessor { outgoing.send_response(request_id, response).await; } + async fn list_collaboration_modes( + outgoing: Arc, + thread_manager: Arc, + request_id: RequestId, + params: CollaborationModeListParams, + ) { + let CollaborationModeListParams {} = params; + let items = thread_manager.list_collaboration_modes(); + let response = CollaborationModeListResponse { data: items }; + outgoing.send_response(request_id, response).await; + } + async fn mcp_server_refresh(&self, request_id: RequestId, _params: Option<()>) { let config = match self.load_latest_config().await { Ok(config) => config, diff --git a/codex-rs/app-server/tests/common/mcp_process.rs b/codex-rs/app-server/tests/common/mcp_process.rs index cde6ff3913..fe236a52b2 100644 --- a/codex-rs/app-server/tests/common/mcp_process.rs +++ b/codex-rs/app-server/tests/common/mcp_process.rs @@ -17,6 +17,7 @@ use codex_app_server_protocol::CancelLoginAccountParams; use codex_app_server_protocol::CancelLoginChatGptParams; use codex_app_server_protocol::ClientInfo; use codex_app_server_protocol::ClientNotification; +use codex_app_server_protocol::CollaborationModeListParams; use codex_app_server_protocol::ConfigBatchWriteParams; use codex_app_server_protocol::ConfigReadParams; use codex_app_server_protocol::ConfigValueWriteParams; @@ -396,6 +397,15 @@ impl McpProcess { self.send_request("model/list", params).await } + /// Send a `collaborationMode/list` JSON-RPC request. + pub async fn send_list_collaboration_modes_request( + &mut self, + params: CollaborationModeListParams, + ) -> anyhow::Result { + let params = Some(serde_json::to_value(params)?); + self.send_request("collaborationMode/list", params).await + } + /// Send a `resumeConversation` JSON-RPC request. pub async fn send_resume_conversation_request( &mut self, diff --git a/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs b/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs new file mode 100644 index 0000000000..b6ff3f888e --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs @@ -0,0 +1,85 @@ +//! Validates that the collaboration mode list endpoint returns the expected default presets. +//! +//! The test drives the app server through the MCP harness and asserts that the list response +//! includes the plan, collaborate, and execute modes with their default model and reasoning +//! effort settings, which keeps the API contract visible in one place. + +use std::time::Duration; + +use anyhow::Result; +use app_test_support::McpProcess; +use app_test_support::to_response; +use codex_app_server_protocol::CollaborationModeListParams; +use codex_app_server_protocol::CollaborationModeListResponse; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::RequestId; +use codex_protocol::config_types::CollaborationMode; +use codex_protocol::config_types::Settings; +use codex_protocol::openai_models::ReasoningEffort; +use pretty_assertions::assert_eq; +use tempfile::TempDir; +use tokio::time::timeout; + +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); + +/// Confirms the server returns the default collaboration mode presets in a stable order. +#[tokio::test] +async fn list_collaboration_modes_returns_presets() -> Result<()> { + let codex_home = TempDir::new()?; + let mut mcp = McpProcess::new(codex_home.path()).await?; + + timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_list_collaboration_modes_request(CollaborationModeListParams {}) + .await?; + + let response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + + let CollaborationModeListResponse { data: items } = + to_response::(response)?; + + let expected = vec![plan_preset(), collaborate_preset(), execute_preset()]; + assert_eq!(expected, items); + Ok(()) +} + +/// Builds the plan preset that the list response is expected to return. +/// +/// If the defaults change in the app server, this helper should be updated alongside the +/// contract, or the test will fail in ways that imply a regression in the API. +fn plan_preset() -> CollaborationMode { + CollaborationMode::Plan(Settings { + model: "gpt-5.2-codex".to_string(), + reasoning_effort: Some(ReasoningEffort::Medium), + developer_instructions: None, + }) +} + +/// Builds the collaborate preset that the list response is expected to return. +/// +/// The helper keeps the expected model and reasoning defaults co-located with the test +/// so that mismatches point directly at the API contract being exercised. +fn collaborate_preset() -> CollaborationMode { + CollaborationMode::Collaborate(Settings { + model: "gpt-5.2-codex".to_string(), + reasoning_effort: Some(ReasoningEffort::Medium), + developer_instructions: None, + }) +} + +/// Builds the execute preset that the list response is expected to return. +/// +/// The execute preset uses a different reasoning effort to capture the higher-effort +/// execution contract the server currently exposes. +fn execute_preset() -> CollaborationMode { + CollaborationMode::Execute(Settings { + model: "gpt-5.2-codex".to_string(), + reasoning_effort: Some(ReasoningEffort::XHigh), + developer_instructions: None, + }) +} diff --git a/codex-rs/app-server/tests/suite/v2/mod.rs b/codex-rs/app-server/tests/suite/v2/mod.rs index b2159ab9ce..82939908ee 100644 --- a/codex-rs/app-server/tests/suite/v2/mod.rs +++ b/codex-rs/app-server/tests/suite/v2/mod.rs @@ -1,5 +1,6 @@ mod account; mod analytics; +mod collaboration_mode_list; mod config_rpc; mod initialize; mod model_list; diff --git a/codex-rs/core/src/models_manager/collaboration_mode_presets.rs b/codex-rs/core/src/models_manager/collaboration_mode_presets.rs new file mode 100644 index 0000000000..3d6eb9ca3c --- /dev/null +++ b/codex-rs/core/src/models_manager/collaboration_mode_presets.rs @@ -0,0 +1,31 @@ +use codex_protocol::config_types::CollaborationMode; +use codex_protocol::config_types::Settings; +use codex_protocol::openai_models::ReasoningEffort; + +pub(super) fn builtin_collaboration_mode_presets() -> Vec { + vec![plan_preset(), collaborate_preset(), execute_preset()] +} + +fn plan_preset() -> CollaborationMode { + CollaborationMode::Plan(Settings { + model: "gpt-5.2-codex".to_string(), + reasoning_effort: Some(ReasoningEffort::Medium), + developer_instructions: None, + }) +} + +fn collaborate_preset() -> CollaborationMode { + CollaborationMode::Collaborate(Settings { + model: "gpt-5.2-codex".to_string(), + reasoning_effort: Some(ReasoningEffort::Medium), + developer_instructions: None, + }) +} + +fn execute_preset() -> CollaborationMode { + CollaborationMode::Execute(Settings { + model: "gpt-5.2-codex".to_string(), + reasoning_effort: Some(ReasoningEffort::XHigh), + developer_instructions: None, + }) +} diff --git a/codex-rs/core/src/models_manager/manager.rs b/codex-rs/core/src/models_manager/manager.rs index 206b4cab27..82fea6e71f 100644 --- a/codex-rs/core/src/models_manager/manager.rs +++ b/codex-rs/core/src/models_manager/manager.rs @@ -1,6 +1,7 @@ use codex_api::ModelsClient; use codex_api::ReqwestTransport; use codex_app_server_protocol::AuthMode; +use codex_protocol::config_types::CollaborationMode; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelsResponse; @@ -23,6 +24,7 @@ use crate::error::CodexErr; use crate::error::Result as CoreResult; use crate::features::Feature; use crate::model_provider_info::ModelProviderInfo; +use crate::models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets; use crate::models_manager::model_info; use crate::models_manager::model_presets::builtin_model_presets; @@ -90,6 +92,13 @@ impl ModelsManager { self.build_available_models(remote_models) } + /// List collaboration mode presets. + /// + /// Returns a static set of presets seeded with the configured model. + pub fn list_collaboration_modes(&self) -> Vec { + builtin_collaboration_mode_presets() + } + /// Attempt to list models without blocking, using the current cached state. /// /// Returns an error if the internal lock cannot be acquired. diff --git a/codex-rs/core/src/models_manager/mod.rs b/codex-rs/core/src/models_manager/mod.rs index d0e3c8214a..1b3d95dc40 100644 --- a/codex-rs/core/src/models_manager/mod.rs +++ b/codex-rs/core/src/models_manager/mod.rs @@ -1,4 +1,5 @@ pub mod cache; +pub mod collaboration_mode_presets; pub mod manager; pub mod model_info; pub mod model_presets; diff --git a/codex-rs/core/src/thread_manager.rs b/codex-rs/core/src/thread_manager.rs index 6f2187f527..8d02828d59 100644 --- a/codex-rs/core/src/thread_manager.rs +++ b/codex-rs/core/src/thread_manager.rs @@ -19,6 +19,7 @@ use crate::rollout::RolloutRecorder; use crate::rollout::truncation; use crate::skills::SkillsManager; use codex_protocol::ThreadId; +use codex_protocol::config_types::CollaborationMode; use codex_protocol::openai_models::ModelPreset; use codex_protocol::protocol::InitialHistory; use codex_protocol::protocol::McpServerRefreshConfig; @@ -157,6 +158,10 @@ impl ThreadManager { .await } + pub fn list_collaboration_modes(&self) -> Vec { + self.state.models_manager.list_collaboration_modes() + } + pub async fn list_thread_ids(&self) -> Vec { self.state.threads.read().await.keys().copied().collect() } diff --git a/codex-rs/docs/codex_mcp_interface.md b/codex-rs/docs/codex_mcp_interface.md index 10f81e59b9..883c7321bc 100644 --- a/codex-rs/docs/codex_mcp_interface.md +++ b/codex-rs/docs/codex_mcp_interface.md @@ -20,6 +20,7 @@ At a glance: - Configuration and info - `getUserSavedConfig`, `setDefaultModel`, `getUserAgent`, `userInfo` - `model/list` → enumerate available models and reasoning options + - `collaborationMode/list` → enumerate collaboration mode presets (experimental) - Auth - `account/read`, `account/login/start`, `account/login/cancel`, `account/logout`, `account/rateLimits/read` - notifications: `account/login/completed`, `account/updated`, `account/rateLimits/updated` @@ -96,6 +97,12 @@ Each response yields: - `isDefault` – whether the model is recommended for most users - `nextCursor` – pass into the next request to continue paging (optional) +## Collaboration modes (experimental) + +Fetch the built-in collaboration mode presets with `collaborationMode/list`. This endpoint does not accept pagination and returns the full list in one response: + +- `data` – ordered list of collaboration mode presets + ## Event stream While a conversation runs, the server sends notifications: