Compare commits

..

9 Commits

Author SHA1 Message Date
Owen Lin
a70260a0ff working update 2025-10-24 15:10:37 -07:00
Owen Lin
b995e93f3b fix compilation 2025-10-24 15:10:37 -07:00
Owen Lin
8cd7998dab codex impl 2025-10-24 15:10:36 -07:00
Owen Lin
ad958ed1aa make config.rs tests more concise 2025-10-24 15:10:36 -07:00
Owen Lin
0ceb38383e update 2025-10-24 15:10:36 -07:00
Owen Lin
8950c882eb update read test 2025-10-24 15:10:36 -07:00
Owen Lin
73a22df910 basic wiring 2025-10-24 15:10:36 -07:00
Owen Lin
6379bcadf6 codex impl 2025-10-24 15:10:36 -07:00
Owen Lin
19146ee74e [app-server] basic config RPC API 2025-10-24 15:10:35 -07:00
17 changed files with 1081 additions and 367 deletions

View File

@@ -78,6 +78,9 @@ macro_rules! for_each_schema_type {
$macro!(crate::LoginChatGptResponse);
$macro!(crate::LogoutChatGptParams);
$macro!(crate::LogoutChatGptResponse);
$macro!(crate::McpOAuthCredentialsStoreMode);
$macro!(crate::McpServerConfig);
$macro!(crate::McpServerTransportConfig);
$macro!(crate::NewConversationParams);
$macro!(crate::NewConversationResponse);
$macro!(crate::Profile);

View File

@@ -130,6 +130,22 @@ client_request_definitions! {
response: GetAccountResponse,
},
#[serde(rename = "config/read")]
#[ts(rename = "config/read")]
GetConfig {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: UserSavedConfig,
},
#[serde(rename = "config/update")]
#[ts(rename = "config/update")]
UpdateConfig {
params: UpdateConfigParams,
// TODO(owen): or should we return UserSavedConfig directly?
// First, figure out how we want to represent errors.
response: UpdateConfigResponse,
},
/// DEPRECATED APIs below
Initialize {
params: InitializeParams,
@@ -409,6 +425,18 @@ pub struct LoginAccountResponse {
#[serde(rename_all = "camelCase")]
pub struct LogoutAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UpdateConfigParams {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UpdateConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationParams {
@@ -626,6 +654,12 @@ pub struct UserSavedConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub tools: Option<Tools>,
/// MCP servers
#[serde(default)]
pub mcp_servers: HashMap<String, McpServerConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mcp_oauth_credentials_store: Option<McpOAuthCredentialsStoreMode>,
/// Profiles
#[serde(skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
@@ -647,6 +681,58 @@ pub struct Profile {
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct McpServerConfig {
pub transport: McpServerTransportConfig,
#[serde(default)]
pub enabled: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub startup_timeout_sec: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_timeout_sec: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled_tools: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub disabled_tools: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "kebab-case")]
pub enum McpServerTransportConfig {
#[serde(rename_all = "camelCase")]
Stdio {
command: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
args: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
env: Option<HashMap<String, String>>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
env_vars: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
cwd: Option<PathBuf>,
},
#[serde(rename_all = "camelCase")]
StreamableHttp {
url: String,
#[serde(skip_serializing_if = "Option::is_none")]
bearer_token_env_var: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
http_headers: Option<HashMap<String, String>>,
#[serde(skip_serializing_if = "Option::is_none")]
env_http_headers: Option<HashMap<String, String>>,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS, Default)]
#[serde(rename_all = "lowercase")]
pub enum McpOAuthCredentialsStoreMode {
#[default]
Auto,
File,
Keyring,
}
/// MCP representation of a [`codex_core::config::ToolsToml`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]

View File

@@ -52,6 +52,8 @@ use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::SessionConfiguredNotification;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::SetDefaultModelResponse;
use codex_app_server_protocol::UpdateConfigParams;
use codex_app_server_protocol::UpdateConfigResponse;
use codex_app_server_protocol::UserInfoResponse;
use codex_app_server_protocol::UserSavedConfig;
use codex_backend_client::Client as BackendClient;
@@ -74,6 +76,7 @@ use codex_core::config::load_config_as_toml;
use codex_core::config_edit::CONFIG_KEY_EFFORT;
use codex_core::config_edit::CONFIG_KEY_MODEL;
use codex_core::config_edit::persist_overrides_and_clear_if_none;
use codex_core::config_edit::persist_user_saved_config;
use codex_core::default_client::get_codex_user_agent;
use codex_core::exec::ExecParams;
use codex_core::exec_env::create_env;
@@ -197,6 +200,21 @@ impl CodexMessageProcessor {
self.send_unimplemented_error(request_id, "account/read")
.await;
}
ClientRequest::GetAccountRateLimits {
request_id,
params: _,
} => {
self.get_account_rate_limits(request_id).await;
}
ClientRequest::GetConfig {
request_id,
params: _,
} => {
self.get_user_saved_config(request_id, false).await;
}
ClientRequest::UpdateConfig { request_id, params } => {
self.update_user_saved_config(request_id, params).await;
}
ClientRequest::ResumeConversation { request_id, params } => {
self.handle_resume_conversation(request_id, params).await;
}
@@ -246,7 +264,7 @@ impl CodexMessageProcessor {
request_id,
params: _,
} => {
self.get_user_saved_config(request_id).await;
self.get_user_saved_config(request_id, true).await;
}
ClientRequest::SetDefaultModel { request_id, params } => {
self.set_default_model(request_id, params).await;
@@ -269,12 +287,6 @@ impl CodexMessageProcessor {
ClientRequest::ExecOneOffCommand { request_id, params } => {
self.exec_one_off_command(request_id, params).await;
}
ClientRequest::GetAccountRateLimits {
request_id,
params: _,
} => {
self.get_account_rate_limits(request_id).await;
}
}
}
@@ -618,7 +630,7 @@ impl CodexMessageProcessor {
})
}
async fn get_user_saved_config(&self, request_id: RequestId) {
async fn get_user_saved_config(&self, request_id: RequestId, wrap: bool) {
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
Ok(val) => val,
Err(err) => {
@@ -647,9 +659,57 @@ impl CodexMessageProcessor {
let user_saved_config: UserSavedConfig = cfg.into();
let response = GetUserSavedConfigResponse {
config: user_saved_config,
if wrap {
let response = GetUserSavedConfigResponse {
config: user_saved_config,
};
self.outgoing.send_response(request_id, response).await;
} else {
self.outgoing
.send_response(request_id, user_saved_config)
.await;
}
}
async fn update_user_saved_config(&self, request_id: RequestId, params: UpdateConfigParams) {
let UpdateConfigParams { config } = params;
if let Err(err) = persist_user_saved_config(&self.config.codex_home, &config).await {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to persist config.toml: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
Ok(val) => val,
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to load config.toml: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let cfg: ConfigToml = match toml_value.try_into() {
Ok(cfg) => cfg,
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to parse config.toml: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let response = UpdateConfigResponse { config: cfg.into() };
self.outgoing.send_response(request_id, response).await;
}

View File

@@ -30,6 +30,7 @@ use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::UpdateConfigParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
@@ -227,9 +228,18 @@ impl McpProcess {
self.send_request("getAuthStatus", params).await
}
/// Send a `getUserSavedConfig` JSON-RPC request.
pub async fn send_get_user_saved_config_request(&mut self) -> anyhow::Result<i64> {
self.send_request("getUserSavedConfig", None).await
/// Send a `config/read` JSON-RPC request.
pub async fn send_get_config_request(&mut self) -> anyhow::Result<i64> {
self.send_request("config/read", None).await
}
/// Send a `config/update` JSON-RPC request.
pub async fn send_update_config_request(
&mut self,
params: UpdateConfigParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("config/update", params).await
}
/// Send a `getUserAgent` JSON-RPC request.

View File

@@ -1,15 +1,21 @@
use std::collections::HashMap;
use std::path::Path;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::GetUserSavedConfigResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::McpOAuthCredentialsStoreMode;
use codex_app_server_protocol::McpServerConfig as ProtocolMcpServerConfig;
use codex_app_server_protocol::McpServerTransportConfig as ProtocolMcpServerTransportConfig;
use codex_app_server_protocol::Profile;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxSettings;
use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UpdateConfigParams;
use codex_app_server_protocol::UpdateConfigResponse;
use codex_app_server_protocol::UserSavedConfig;
use codex_core::config::ConfigToml;
use codex_core::protocol::AskForApproval;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
@@ -36,6 +42,7 @@ model_verbosity = "medium"
profile = "test"
forced_chatgpt_workspace_id = "12345678-0000-0000-0000-000000000000"
forced_login_method = "chatgpt"
mcp_oauth_credentials_store = "keyring"
[sandbox_workspace_write]
writable_roots = ["/tmp"]
@@ -55,116 +62,330 @@ model_reasoning_summary = "detailed"
model_verbosity = "medium"
model_provider = "openai"
chatgpt_base_url = "https://api.chatgpt.com"
[mcp_servers.docs]
command = "codex-docs"
args = ["serve"]
env_vars = ["DOCS_TOKEN"]
cwd = "/tmp/docs"
startup_timeout_sec = 12.5
tool_timeout_sec = 42.0
enabled = false
enabled_tools = ["read_docs"]
disabled_tools = ["delete_docs"]
[mcp_servers.docs.env]
PLAN = "gold"
[mcp_servers.issues]
url = "https://example.com/mcp"
bearer_token_env_var = "MCP_TOKEN"
startup_timeout_sec = 30.0
tool_timeout_sec = 15.0
[mcp_servers.issues.http_headers]
"X-Test" = "42"
[mcp_servers.issues.env_http_headers]
"X-Token" = "TOKEN_ENV"
"#,
)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn get_config_toml_parses_all_fields() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).expect("write config.toml");
async fn get_config_toml_parses_all_fields() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_get_user_saved_config_request()
.await
.expect("send getUserSavedConfig");
let request_id = mcp.send_get_config_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.expect("getUserSavedConfig timeout")
.expect("getUserSavedConfig response");
.await??;
let config: GetUserSavedConfigResponse = to_response(resp).expect("deserialize config");
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: Some(AskForApproval::OnRequest),
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
sandbox_settings: Some(SandboxSettings {
writable_roots: vec!["/tmp".into()],
network_access: Some(true),
exclude_tmpdir_env_var: Some(true),
exclude_slash_tmp: Some(true),
}),
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
model: Some("gpt-5-codex".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
tools: Some(Tools {
web_search: Some(false),
view_image: Some(true),
}),
profile: Some("test".to_string()),
profiles: HashMap::from([(
"test".into(),
Profile {
model: Some("gpt-4o".into()),
approval_policy: Some(AskForApproval::OnRequest),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
model_provider: Some("openai".into()),
chatgpt_base_url: Some("https://api.chatgpt.com".into()),
let config: UserSavedConfig = to_response(resp)?;
let expected = UserSavedConfig {
approval_policy: Some(AskForApproval::OnRequest),
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
sandbox_settings: Some(SandboxSettings {
writable_roots: vec!["/tmp".into()],
network_access: Some(true),
exclude_tmpdir_env_var: Some(true),
exclude_slash_tmp: Some(true),
}),
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
model: Some("gpt-5-codex".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
tools: Some(Tools {
web_search: Some(false),
view_image: Some(true),
}),
mcp_servers: HashMap::from([
(
"docs".into(),
ProtocolMcpServerConfig {
transport: ProtocolMcpServerTransportConfig::Stdio {
command: "codex-docs".into(),
args: vec!["serve".into()],
env: Some(HashMap::from([("PLAN".into(), "gold".into())])),
env_vars: vec!["DOCS_TOKEN".into()],
cwd: Some("/tmp/docs".into()),
},
enabled: false,
startup_timeout_sec: Some(12.5),
tool_timeout_sec: Some(42.0),
enabled_tools: Some(vec!["read_docs".into()]),
disabled_tools: Some(vec!["delete_docs".into()]),
},
)]),
},
),
(
"issues".into(),
ProtocolMcpServerConfig {
transport: ProtocolMcpServerTransportConfig::StreamableHttp {
url: "https://example.com/mcp".into(),
bearer_token_env_var: Some("MCP_TOKEN".into()),
http_headers: Some(HashMap::from([("X-Test".into(), "42".into())])),
env_http_headers: Some(HashMap::from([(
"X-Token".into(),
"TOKEN_ENV".into(),
)])),
},
enabled: true,
startup_timeout_sec: Some(30.0),
tool_timeout_sec: Some(15.0),
enabled_tools: None,
disabled_tools: None,
},
),
]),
mcp_oauth_credentials_store: Some(McpOAuthCredentialsStoreMode::Keyring),
profile: Some("test".to_string()),
profiles: HashMap::from([(
"test".into(),
Profile {
model: Some("gpt-4o".into()),
approval_policy: Some(AskForApproval::OnRequest),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
model_provider: Some("openai".into()),
chatgpt_base_url: Some("https://api.chatgpt.com".into()),
},
)]),
};
assert_eq!(config, expected);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_config_toml_empty() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
async fn get_config_toml_empty() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_get_user_saved_config_request()
.await
.expect("send getUserSavedConfig");
let request_id = mcp.send_get_config_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.expect("getUserSavedConfig timeout")
.expect("getUserSavedConfig response");
.await??;
let config: GetUserSavedConfigResponse = to_response(resp).expect("deserialize config");
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: None,
sandbox_mode: None,
sandbox_settings: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
model: None,
model_reasoning_effort: None,
model_reasoning_summary: None,
model_verbosity: None,
tools: None,
profile: None,
profiles: HashMap::new(),
},
let config: UserSavedConfig = to_response(resp)?;
let expected = UserSavedConfig {
approval_policy: None,
sandbox_mode: None,
sandbox_settings: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
model: None,
model_reasoning_effort: None,
model_reasoning_summary: None,
model_verbosity: None,
tools: None,
mcp_servers: HashMap::new(),
mcp_oauth_credentials_store: None,
profile: None,
profiles: HashMap::new(),
};
assert_eq!(config, expected);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn update_config_persists_all_fields() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let desired = sample_user_saved_config();
let request_id = mcp
.send_update_config_request(UpdateConfigParams {
config: desired.clone(),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let response: UpdateConfigResponse = to_response(resp)?;
assert_eq!(response.config, desired);
let config_contents = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
let config_toml: ConfigToml = toml::from_str(&config_contents)?;
let persisted: UserSavedConfig = config_toml.into();
assert_eq!(persisted, desired);
let read_request_id = mcp.send_get_config_request().await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_request_id)),
)
.await??;
let read_config: UserSavedConfig = to_response(read_resp)?;
assert_eq!(read_config, desired);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn update_config_clears_missing_fields() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let desired = empty_user_saved_config();
let request_id = mcp
.send_update_config_request(UpdateConfigParams {
config: desired.clone(),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let response: UpdateConfigResponse = to_response(resp)?;
assert_eq!(response.config, desired);
let config_contents = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
let config_toml: ConfigToml = toml::from_str(&config_contents)?;
let persisted: UserSavedConfig = config_toml.into();
assert_eq!(persisted, desired);
let read_request_id = mcp.send_get_config_request().await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_request_id)),
)
.await??;
let read_config: UserSavedConfig = to_response(read_resp)?;
assert_eq!(read_config, desired);
Ok(())
}
fn empty_user_saved_config() -> UserSavedConfig {
UserSavedConfig {
approval_policy: None,
sandbox_mode: None,
sandbox_settings: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
model: None,
model_reasoning_effort: None,
model_reasoning_summary: None,
model_verbosity: None,
tools: None,
mcp_servers: HashMap::new(),
mcp_oauth_credentials_store: None,
profile: None,
profiles: HashMap::new(),
}
}
fn sample_user_saved_config() -> UserSavedConfig {
UserSavedConfig {
approval_policy: Some(AskForApproval::OnRequest),
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
sandbox_settings: Some(SandboxSettings {
writable_roots: vec!["/tmp".into()],
network_access: Some(true),
exclude_tmpdir_env_var: Some(true),
exclude_slash_tmp: Some(true),
}),
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
model: Some("gpt-5-codex".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
tools: Some(Tools {
web_search: Some(false),
view_image: Some(true),
}),
mcp_servers: HashMap::from([
(
"docs".into(),
ProtocolMcpServerConfig {
transport: ProtocolMcpServerTransportConfig::Stdio {
command: "codex-docs".into(),
args: vec!["serve".into()],
env: Some(HashMap::from([("PLAN".into(), "gold".into())])),
env_vars: vec!["DOCS_TOKEN".into()],
cwd: Some("/tmp/docs".into()),
},
enabled: false,
startup_timeout_sec: Some(12.5),
tool_timeout_sec: Some(42.0),
enabled_tools: Some(vec!["read_docs".into()]),
disabled_tools: Some(vec!["delete_docs".into()]),
},
),
(
"issues".into(),
ProtocolMcpServerConfig {
transport: ProtocolMcpServerTransportConfig::StreamableHttp {
url: "https://example.com/mcp".into(),
bearer_token_env_var: Some("MCP_TOKEN".into()),
http_headers: Some(HashMap::from([("X-Test".into(), "42".into())])),
env_http_headers: Some(HashMap::from([(
"X-Token".into(),
"TOKEN_ENV".into(),
)])),
},
enabled: true,
startup_timeout_sec: Some(30.0),
tool_timeout_sec: Some(15.0),
enabled_tools: None,
disabled_tools: None,
},
),
]),
mcp_oauth_credentials_store: Some(McpOAuthCredentialsStoreMode::Keyring),
profile: Some("test".into()),
profiles: HashMap::from([(
"test".into(),
Profile {
model: Some("gpt-4o".into()),
approval_policy: Some(AskForApproval::OnRequest),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
model_provider: Some("openai".into()),
chatgpt_base_url: Some("https://api.chatgpt.com".into()),
},
)]),
}
}

View File

@@ -59,7 +59,7 @@ use crate::client_common::ResponseEvent;
use crate::config::Config;
use crate::config_types::McpServerTransportConfig;
use crate::config_types::ShellEnvironmentPolicy;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use crate::environment_context::EnvironmentContext;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
@@ -867,7 +867,7 @@ impl Session {
turn_context: &TurnContext,
rollout_items: &[RolloutItem],
) -> Vec<ResponseItem> {
let mut history = ContextManager::new();
let mut history = ConversationHistory::new();
for item in rollout_items {
match item {
RolloutItem::ResponseItem(response_item) => {
@@ -941,7 +941,7 @@ impl Session {
state.history_snapshot()
}
pub(crate) async fn clone_history(&self) -> ContextManager {
pub(crate) async fn clone_history(&self) -> ConversationHistory {
let state = self.state.lock().await;
state.clone_history()
}
@@ -1524,7 +1524,7 @@ pub(crate) async fn run_task(
// For normal turns, continue recording to the session history as before.
let is_review_mode = turn_context.is_review_mode;
let mut review_thread_history: ContextManager = ContextManager::new();
let mut review_thread_history: ConversationHistory = ConversationHistory::new();
if is_review_mode {
// Seed review threads with environment context so the model knows the working directory.
review_thread_history
@@ -2843,7 +2843,7 @@ mod tests {
turn_context: &TurnContext,
) -> (Vec<RolloutItem>, Vec<ResponseItem>) {
let mut rollout_items = Vec::new();
let mut live_history = ContextManager::new();
let mut live_history = ConversationHistory::new();
let initial_context = session.build_initial_context(turn_context);
for item in &initial_context {

View File

@@ -34,6 +34,7 @@ use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use anyhow::Context;
use codex_app_server_protocol::McpOAuthCredentialsStoreMode;
use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UserSavedConfig;
use codex_protocol::config_types::ForcedLoginMethod;
@@ -967,6 +968,11 @@ impl From<ConfigToml> for UserSavedConfig {
.into_iter()
.map(|(k, v)| (k, v.into()))
.collect();
let mcp_servers = config_toml
.mcp_servers
.into_iter()
.map(|(k, v)| (k, v.into()))
.collect();
Self {
approval_policy: config_toml.approval_policy,
@@ -979,6 +985,10 @@ impl From<ConfigToml> for UserSavedConfig {
model_reasoning_summary: config_toml.model_reasoning_summary,
model_verbosity: config_toml.model_verbosity,
tools: config_toml.tools.map(From::from),
mcp_servers,
mcp_oauth_credentials_store: config_toml
.mcp_oauth_credentials_store
.map(map_oauth_credentials_store_mode),
profile: config_toml.profile,
profiles,
}
@@ -1488,6 +1498,16 @@ fn default_review_model() -> String {
OPENAI_DEFAULT_REVIEW_MODEL.to_string()
}
fn map_oauth_credentials_store_mode(
mode: OAuthCredentialsStoreMode,
) -> McpOAuthCredentialsStoreMode {
match mode {
OAuthCredentialsStoreMode::Auto => McpOAuthCredentialsStoreMode::Auto,
OAuthCredentialsStoreMode::File => McpOAuthCredentialsStoreMode::File,
OAuthCredentialsStoreMode::Keyring => McpOAuthCredentialsStoreMode::Keyring,
}
}
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.

View File

@@ -1,8 +1,26 @@
use crate::config::CONFIG_TOML_FILE;
use crate::config_types::McpServerConfig;
use crate::config_types::McpServerTransportConfig;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use codex_app_server_protocol::McpOAuthCredentialsStoreMode;
use codex_app_server_protocol::McpServerConfig as ProtocolMcpServerConfig;
use codex_app_server_protocol::McpServerTransportConfig as ProtocolMcpServerTransportConfig;
use codex_app_server_protocol::Profile;
use codex_app_server_protocol::SandboxSettings;
use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UserSavedConfig;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::io::ErrorKind;
use std::path::Path;
use std::time::Duration;
use tempfile::NamedTempFile;
use toml_edit::Array as TomlArray;
use toml_edit::DocumentMut;
use toml_edit::Item as TomlItem;
use toml_edit::Table as TomlTable;
pub const CONFIG_KEY_MODEL: &str = "model";
pub const CONFIG_KEY_EFFORT: &str = "model_reasoning_effort";
@@ -49,6 +67,87 @@ pub async fn persist_overrides_and_clear_if_none(
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Remove).await
}
pub async fn persist_user_saved_config(codex_home: &Path, config: &UserSavedConfig) -> Result<()> {
let servers = convert_mcp_servers(&config.mcp_servers)?;
let config_path = codex_home.join(CONFIG_TOML_FILE);
let existing = tokio::fs::read_to_string(&config_path).await;
let mut doc = match existing {
Ok(contents) => contents.parse::<DocumentMut>()?,
Err(err) if err.kind() == ErrorKind::NotFound => DocumentMut::new(),
Err(err) => return Err(err.into()),
};
{
let root = doc.as_table_mut();
set_string_option(
root,
"approval_policy",
config.approval_policy.map(|value| value.to_string()),
);
set_string_option(
root,
"sandbox_mode",
config.sandbox_mode.map(|mode| mode.to_string()),
);
set_sandbox_workspace_write(root, config.sandbox_settings.as_ref())?;
set_string_option(
root,
"forced_chatgpt_workspace_id",
config.forced_chatgpt_workspace_id.clone(),
);
set_string_option(
root,
"forced_login_method",
config.forced_login_method.map(|mode| mode.to_string()),
);
set_string_option(root, "model", config.model.clone());
set_string_option(
root,
"model_reasoning_effort",
config
.model_reasoning_effort
.map(|effort| effort.to_string()),
);
set_string_option(
root,
"model_reasoning_summary",
config
.model_reasoning_summary
.map(|summary| summary.to_string()),
);
set_string_option(
root,
"model_verbosity",
config
.model_verbosity
.map(|verbosity| verbosity.to_string()),
);
set_tools(root, config.tools.as_ref())?;
set_string_option(
root,
"mcp_oauth_credentials_store",
config
.mcp_oauth_credentials_store
.map(protocol_oauth_mode_to_str)
.map(String::from),
);
set_string_option(root, "profile", config.profile.clone());
set_profiles(root, &config.profiles)?;
set_mcp_servers(root, &servers)?;
}
tokio::fs::create_dir_all(codex_home)
.await
.with_context(|| format!("failed to create Codex home at {}", codex_home.display()))?;
let tmp_file = NamedTempFile::new_in(codex_home)?;
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
tmp_file.persist(config_path)?;
Ok(())
}
/// Apply a single override onto a `toml_edit` document while preserving
/// existing formatting/comments.
/// The key is expressed as explicit segments to correctly handle keys that
@@ -212,6 +311,340 @@ fn remove_toml_edit_segments(doc: &mut DocumentMut, segments: &[&str]) -> bool {
current.remove(segments[segments.len() - 1]).is_some()
}
fn set_string_option(table: &mut TomlTable, key: &str, value: Option<String>) {
match value {
Some(value) => {
table[key] = toml_edit::value(value);
}
None => {
table.remove(key);
}
}
}
fn set_sandbox_workspace_write(
table: &mut TomlTable,
settings: Option<&SandboxSettings>,
) -> Result<()> {
table.remove("sandbox_workspace_write");
let Some(settings) = settings else {
return Ok(());
};
let mut sandbox = TomlTable::new();
sandbox.set_implicit(false);
if !settings.writable_roots.is_empty() {
let mut roots = TomlArray::new();
for root_path in &settings.writable_roots {
roots.push(root_path.to_string_lossy().to_string());
}
sandbox["writable_roots"] = TomlItem::Value(roots.into());
}
if let Some(network_access) = settings.network_access {
sandbox["network_access"] = toml_edit::value(network_access);
}
if let Some(exclude_tmpdir_env_var) = settings.exclude_tmpdir_env_var {
sandbox["exclude_tmpdir_env_var"] = toml_edit::value(exclude_tmpdir_env_var);
}
if let Some(exclude_slash_tmp) = settings.exclude_slash_tmp {
sandbox["exclude_slash_tmp"] = toml_edit::value(exclude_slash_tmp);
}
if sandbox.is_empty() {
return Ok(());
}
table.insert("sandbox_workspace_write", TomlItem::Table(sandbox));
Ok(())
}
fn set_tools(table: &mut TomlTable, tools: Option<&Tools>) -> Result<()> {
table.remove("tools");
let Some(tools) = tools else {
return Ok(());
};
let mut tools_table = TomlTable::new();
tools_table.set_implicit(false);
if let Some(web_search) = tools.web_search {
tools_table["web_search"] = toml_edit::value(web_search);
}
if let Some(view_image) = tools.view_image {
tools_table["view_image"] = toml_edit::value(view_image);
}
if tools_table.is_empty() {
return Ok(());
}
table.insert("tools", TomlItem::Table(tools_table));
Ok(())
}
fn set_profiles(table: &mut TomlTable, profiles: &HashMap<String, Profile>) -> Result<()> {
table.remove("profiles");
if profiles.is_empty() {
return Ok(());
}
let mut profiles_table = TomlTable::new();
profiles_table.set_implicit(true);
let mut keys: Vec<_> = profiles.keys().cloned().collect();
keys.sort();
for key in keys {
let profile = profiles.get(&key).expect("profile key should exist");
let mut profile_table = TomlTable::new();
profile_table.set_implicit(false);
if let Some(model) = profile.model.clone() {
profile_table["model"] = toml_edit::value(model);
}
if let Some(model_provider) = profile.model_provider.clone() {
profile_table["model_provider"] = toml_edit::value(model_provider);
}
if let Some(approval_policy) = profile.approval_policy {
profile_table["approval_policy"] = toml_edit::value(approval_policy.to_string());
}
if let Some(effort) = profile.model_reasoning_effort {
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
if let Some(summary) = profile.model_reasoning_summary {
profile_table["model_reasoning_summary"] = toml_edit::value(summary.to_string());
}
if let Some(verbosity) = profile.model_verbosity {
profile_table["model_verbosity"] = toml_edit::value(verbosity.to_string());
}
if let Some(chatgpt_base_url) = profile.chatgpt_base_url.clone() {
profile_table["chatgpt_base_url"] = toml_edit::value(chatgpt_base_url);
}
profiles_table.insert(&key, TomlItem::Table(profile_table));
}
table.insert("profiles", TomlItem::Table(profiles_table));
Ok(())
}
fn set_mcp_servers(
table: &mut TomlTable,
servers: &BTreeMap<String, McpServerConfig>,
) -> Result<()> {
table.remove("mcp_servers");
if servers.is_empty() {
return Ok(());
}
let mut servers_table = TomlTable::new();
servers_table.set_implicit(true);
for (name, config) in servers {
let mut entry = TomlTable::new();
entry.set_implicit(false);
match &config.transport {
McpServerTransportConfig::Stdio {
command,
args,
env,
env_vars,
cwd,
} => {
entry["command"] = toml_edit::value(command.clone());
if !args.is_empty() {
let mut args_array = TomlArray::new();
for arg in args {
args_array.push(arg.clone());
}
entry["args"] = TomlItem::Value(args_array.into());
}
if let Some(env) = env
&& !env.is_empty()
{
let mut env_table = TomlTable::new();
env_table.set_implicit(false);
let mut pairs: Vec<_> = env.iter().collect();
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
for (key, value) in pairs {
env_table.insert(key, toml_edit::value(value.clone()));
}
entry["env"] = TomlItem::Table(env_table);
}
if !env_vars.is_empty() {
let mut vars = TomlArray::new();
for var in env_vars {
vars.push(var.clone());
}
entry["env_vars"] = TomlItem::Value(vars.into());
}
if let Some(cwd) = cwd {
entry["cwd"] = toml_edit::value(cwd.to_string_lossy().to_string());
}
}
McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
} => {
entry["url"] = toml_edit::value(url.clone());
if let Some(env_var) = bearer_token_env_var {
entry["bearer_token_env_var"] = toml_edit::value(env_var.clone());
}
if let Some(headers) = http_headers
&& !headers.is_empty()
{
let mut headers_table = TomlTable::new();
headers_table.set_implicit(false);
let mut pairs: Vec<_> = headers.iter().collect();
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
for (key, value) in pairs {
headers_table.insert(key, toml_edit::value(value.clone()));
}
entry["http_headers"] = TomlItem::Table(headers_table);
}
if let Some(headers) = env_http_headers
&& !headers.is_empty()
{
let mut headers_table = TomlTable::new();
headers_table.set_implicit(false);
let mut pairs: Vec<_> = headers.iter().collect();
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
for (key, value) in pairs {
headers_table.insert(key, toml_edit::value(value.clone()));
}
entry["env_http_headers"] = TomlItem::Table(headers_table);
}
}
}
entry["enabled"] = toml_edit::value(config.enabled);
if let Some(startup) = config.startup_timeout_sec {
entry["startup_timeout_sec"] = toml_edit::value(startup.as_secs_f64());
}
if let Some(tool_timeout) = config.tool_timeout_sec {
entry["tool_timeout_sec"] = toml_edit::value(tool_timeout.as_secs_f64());
}
if let Some(enabled_tools) = config.enabled_tools.as_ref()
&& !enabled_tools.is_empty()
{
let mut tools = TomlArray::new();
for tool in enabled_tools {
tools.push(tool.clone());
}
entry["enabled_tools"] = TomlItem::Value(tools.into());
}
if let Some(disabled_tools) = config.disabled_tools.as_ref()
&& !disabled_tools.is_empty()
{
let mut tools = TomlArray::new();
for tool in disabled_tools {
tools.push(tool.clone());
}
entry["disabled_tools"] = TomlItem::Value(tools.into());
}
servers_table.insert(name, TomlItem::Table(entry));
}
table.insert("mcp_servers", TomlItem::Table(servers_table));
Ok(())
}
fn convert_mcp_servers(
servers: &HashMap<String, ProtocolMcpServerConfig>,
) -> Result<BTreeMap<String, McpServerConfig>> {
let mut result = BTreeMap::new();
for (name, config) in servers {
result.insert(name.clone(), convert_mcp_server_config(config)?);
}
Ok(result)
}
fn convert_mcp_server_config(config: &ProtocolMcpServerConfig) -> Result<McpServerConfig> {
let transport = match &config.transport {
ProtocolMcpServerTransportConfig::Stdio {
command,
args,
env,
env_vars,
cwd,
} => McpServerTransportConfig::Stdio {
command: command.clone(),
args: args.clone(),
env: env.clone(),
env_vars: env_vars.clone(),
cwd: cwd.clone(),
},
ProtocolMcpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
} => McpServerTransportConfig::StreamableHttp {
url: url.clone(),
bearer_token_env_var: bearer_token_env_var.clone(),
http_headers: http_headers.clone(),
env_http_headers: env_http_headers.clone(),
},
};
Ok(McpServerConfig {
transport,
enabled: config.enabled,
startup_timeout_sec: config
.startup_timeout_sec
.map(duration_from_secs)
.transpose()?,
tool_timeout_sec: config
.tool_timeout_sec
.map(duration_from_secs)
.transpose()?,
enabled_tools: config.enabled_tools.clone(),
disabled_tools: config.disabled_tools.clone(),
})
}
fn duration_from_secs(value: f64) -> Result<Duration> {
Duration::try_from_secs_f64(value).map_err(|err| anyhow!(err))
}
fn protocol_oauth_mode_to_str(mode: McpOAuthCredentialsStoreMode) -> &'static str {
match mode {
McpOAuthCredentialsStoreMode::Auto => "auto",
McpOAuthCredentialsStoreMode::File => "file",
McpOAuthCredentialsStoreMode::Keyring => "keyring",
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -388,6 +388,52 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
}
}
impl From<McpServerConfig> for codex_app_server_protocol::McpServerConfig {
fn from(cfg: McpServerConfig) -> Self {
Self {
transport: cfg.transport.into(),
enabled: cfg.enabled,
startup_timeout_sec: cfg
.startup_timeout_sec
.map(|duration| duration.as_secs_f64()),
tool_timeout_sec: cfg.tool_timeout_sec.map(|duration| duration.as_secs_f64()),
enabled_tools: cfg.enabled_tools,
disabled_tools: cfg.disabled_tools,
}
}
}
impl From<McpServerTransportConfig> for codex_app_server_protocol::McpServerTransportConfig {
fn from(cfg: McpServerTransportConfig) -> Self {
match cfg {
McpServerTransportConfig::Stdio {
command,
args,
env,
env_vars,
cwd,
} => Self::Stdio {
command,
args,
env,
env_vars,
cwd,
},
McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
} => Self::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
},
}
}
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
#[serde(rename_all = "kebab-case")]
pub enum ShellEnvironmentPolicyInherit {

View File

@@ -1,3 +0,0 @@
mod manager;
pub(crate) use manager::ContextManager;
pub mod truncation;

View File

@@ -1,159 +0,0 @@
use codex_utils_string::take_bytes_at_char_boundary;
#[derive(Clone, Copy)]
pub(crate) struct TruncationConfig {
pub max_bytes: usize,
pub max_lines: usize,
pub truncation_notice: &'static str,
}
// Telemetry preview limits: keep log events smaller than model budgets.
pub(crate) const TELEMETRY_PREVIEW_MAX_BYTES: usize = 2 * 1024; // 2 KiB
pub(crate) const TELEMETRY_PREVIEW_MAX_LINES: usize = 64; // lines
pub(crate) const TELEMETRY_PREVIEW_TRUNCATION_NOTICE: &str =
"[... telemetry preview truncated ...]";
pub(crate) const CONTEXT_OUTPUT_TRUNCATION: TruncationConfig = TruncationConfig {
max_bytes: TELEMETRY_PREVIEW_MAX_BYTES,
max_lines: TELEMETRY_PREVIEW_MAX_LINES,
truncation_notice: TELEMETRY_PREVIEW_TRUNCATION_NOTICE,
};
pub(crate) fn truncate_with_config(content: &str, config: TruncationConfig) -> String {
let TruncationConfig {
max_bytes,
max_lines,
truncation_notice,
} = config;
let truncated_slice = take_bytes_at_char_boundary(content, max_bytes);
let truncated_by_bytes = truncated_slice.len() < content.len();
let mut preview = String::new();
let mut lines_iter = truncated_slice.lines();
for idx in 0..max_lines {
match lines_iter.next() {
Some(line) => {
if idx > 0 {
preview.push('\n');
}
preview.push_str(line);
}
None => break,
}
}
let truncated_by_lines = lines_iter.next().is_some();
if !truncated_by_bytes && !truncated_by_lines {
return content.to_string();
}
if preview.len() < truncated_slice.len()
&& truncated_slice
.as_bytes()
.get(preview.len())
.is_some_and(|byte| *byte == b'\n')
{
preview.push('\n');
}
if !preview.is_empty() && !preview.ends_with('\n') {
preview.push('\n');
}
preview.push_str(truncation_notice);
preview
}
pub(crate) fn truncate_context_output(content: &str) -> String {
truncate_with_config(content, CONTEXT_OUTPUT_TRUNCATION)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn truncate_with_config_returns_original_within_limits() {
let content = "short output";
let config = TruncationConfig {
max_bytes: 64,
max_lines: 5,
truncation_notice: "[notice]",
};
assert_eq!(truncate_with_config(content, config), content);
}
#[test]
fn truncate_with_config_truncates_by_bytes() {
let config = TruncationConfig {
max_bytes: 16,
max_lines: 10,
truncation_notice: "[notice]",
};
let content = "abcdefghijklmnopqrstuvwxyz";
let truncated = truncate_with_config(content, config);
assert!(truncated.contains("[notice]"));
}
#[test]
fn truncate_with_config_truncates_by_lines() {
let config = TruncationConfig {
max_bytes: 1024,
max_lines: 2,
truncation_notice: "[notice]",
};
let content = "l1\nl2\nl3\nl4";
let truncated = truncate_with_config(content, config);
assert!(truncated.lines().count() <= 3);
assert!(truncated.contains("[notice]"));
}
#[test]
fn telemetry_preview_returns_original_within_limits() {
let content = "short output";
let config = TruncationConfig {
max_bytes: TELEMETRY_PREVIEW_MAX_BYTES,
max_lines: TELEMETRY_PREVIEW_MAX_LINES,
truncation_notice: TELEMETRY_PREVIEW_TRUNCATION_NOTICE,
};
assert_eq!(truncate_with_config(content, config), content);
}
#[test]
fn telemetry_preview_truncates_by_bytes() {
let config = TruncationConfig {
max_bytes: TELEMETRY_PREVIEW_MAX_BYTES,
max_lines: TELEMETRY_PREVIEW_MAX_LINES,
truncation_notice: TELEMETRY_PREVIEW_TRUNCATION_NOTICE,
};
let content = "x".repeat(TELEMETRY_PREVIEW_MAX_BYTES + 8);
let preview = truncate_with_config(&content, config);
assert!(preview.contains(TELEMETRY_PREVIEW_TRUNCATION_NOTICE));
assert!(
preview.len()
<= TELEMETRY_PREVIEW_MAX_BYTES + TELEMETRY_PREVIEW_TRUNCATION_NOTICE.len() + 1
);
}
#[test]
fn telemetry_preview_truncates_by_lines() {
let config = TruncationConfig {
max_bytes: TELEMETRY_PREVIEW_MAX_BYTES,
max_lines: TELEMETRY_PREVIEW_MAX_LINES,
truncation_notice: TELEMETRY_PREVIEW_TRUNCATION_NOTICE,
};
let content = (0..(TELEMETRY_PREVIEW_MAX_LINES + 5))
.map(|idx| format!("line {idx}"))
.collect::<Vec<_>>()
.join("\n");
let preview = truncate_with_config(&content, config);
let lines: Vec<&str> = preview.lines().collect();
assert!(lines.len() <= TELEMETRY_PREVIEW_MAX_LINES + 1);
assert_eq!(lines.last(), Some(&TELEMETRY_PREVIEW_TRUNCATION_NOTICE));
}
}

View File

@@ -1,4 +1,3 @@
use crate::context_manager::truncation::truncate_context_output;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::TokenUsage;
@@ -7,13 +6,13 @@ use tracing::error;
/// Transcript of conversation history
#[derive(Debug, Clone, Default)]
pub(crate) struct ContextManager {
pub(crate) struct ConversationHistory {
/// The oldest items are at the beginning of the vector.
items: Vec<ResponseItem>,
token_info: Option<TokenUsageInfo>,
}
impl ContextManager {
impl ConversationHistory {
pub(crate) fn new() -> Self {
Self {
items: Vec::new(),
@@ -45,8 +44,7 @@ impl ContextManager {
continue;
}
let processed = Self::process_item(&item);
self.items.push(processed);
self.items.push(item.clone());
}
}
@@ -78,29 +76,6 @@ impl ContextManager {
self.remove_orphan_outputs();
}
fn process_item(item: &ResponseItem) -> ResponseItem {
match item {
ResponseItem::FunctionCallOutput { call_id, output } => {
let truncated_content = truncate_context_output(output.content.as_str());
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: truncated_content,
success: output.success,
},
}
}
ResponseItem::CustomToolCallOutput { call_id, output } => {
let truncated = truncate_context_output(output);
ResponseItem::CustomToolCallOutput {
call_id: call_id.clone(),
output: truncated,
}
}
_ => item.clone(),
}
}
/// Returns a clone of the contents in the transcript.
fn contents(&self) -> Vec<ResponseItem> {
self.items.clone()
@@ -131,7 +106,7 @@ impl ContextManager {
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: truncate_context_output("aborted"),
content: "aborted".to_string(),
success: None,
},
},
@@ -154,7 +129,7 @@ impl ContextManager {
idx,
ResponseItem::CustomToolCallOutput {
call_id: call_id.clone(),
output: truncate_context_output("aborted"),
output: "aborted".to_string(),
},
));
}
@@ -178,7 +153,7 @@ impl ContextManager {
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: truncate_context_output("aborted"),
content: "aborted".to_string(),
success: None,
},
},
@@ -274,10 +249,7 @@ impl ContextManager {
}
pub(crate) fn replace(&mut self, items: Vec<ResponseItem>) {
self.items = items
.into_iter()
.map(|item| Self::process_item(&item))
.collect();
self.items = items;
}
/// Removes the corresponding paired item for the provided `item`, if any.
@@ -390,8 +362,6 @@ fn is_api_message(message: &ResponseItem) -> bool {
#[cfg(test)]
mod tests {
use super::*;
use crate::context_manager::truncation::TELEMETRY_PREVIEW_MAX_BYTES;
use crate::context_manager::truncation::TELEMETRY_PREVIEW_TRUNCATION_NOTICE;
use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::LocalShellAction;
@@ -409,8 +379,8 @@ mod tests {
}
}
fn create_history_with_items(items: Vec<ResponseItem>) -> ContextManager {
let mut h = ContextManager::new();
fn create_history_with_items(items: Vec<ResponseItem>) -> ConversationHistory {
let mut h = ConversationHistory::new();
h.record_items(items.iter());
h
}
@@ -427,7 +397,7 @@ mod tests {
#[test]
fn filters_non_api_messages() {
let mut h = ContextManager::default();
let mut h = ConversationHistory::default();
// System message is not an API message; Other is ignored.
let system = ResponseItem::Message {
id: None,
@@ -465,61 +435,6 @@ mod tests {
);
}
#[test]
fn record_items_truncates_function_call_output() {
let mut h = ContextManager::new();
let long_content = "a".repeat(TELEMETRY_PREVIEW_MAX_BYTES + 32);
let item = ResponseItem::FunctionCallOutput {
call_id: "call-long".to_string(),
output: FunctionCallOutputPayload {
content: long_content.clone(),
success: Some(true),
},
};
h.record_items([&item]);
let stored = h.contents();
let ResponseItem::FunctionCallOutput { output, .. } = &stored[0] else {
panic!("expected FunctionCallOutput variant");
};
assert!(
output
.content
.ends_with(TELEMETRY_PREVIEW_TRUNCATION_NOTICE),
"truncated content should end with notice"
);
assert!(
output.content.len() < long_content.len(),
"content should shrink after truncation"
);
}
#[test]
fn record_items_truncates_custom_tool_output() {
let mut h = ContextManager::new();
let long_content = "b".repeat(TELEMETRY_PREVIEW_MAX_BYTES + 64);
let item = ResponseItem::CustomToolCallOutput {
call_id: "custom-long".to_string(),
output: long_content.clone(),
};
h.record_items([&item]);
let stored = h.contents();
let ResponseItem::CustomToolCallOutput { output, .. } = &stored[0] else {
panic!("expected CustomToolCallOutput variant");
};
assert!(
output.ends_with(TELEMETRY_PREVIEW_TRUNCATION_NOTICE),
"truncated output should end with notice"
);
assert!(
output.len() < long_content.len(),
"output should shrink after truncation"
);
}
#[test]
fn remove_first_item_removes_matching_output_for_function_call() {
let items = vec![

View File

@@ -20,7 +20,7 @@ pub mod config_edit;
pub mod config_loader;
pub mod config_profile;
pub mod config_types;
mod context_manager;
mod conversation_history;
pub mod custom_prompts;
mod environment_context;
pub mod error;

View File

@@ -1,5 +1,5 @@
use crate::codex::Session;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::models::ResponseItem;
@@ -11,7 +11,7 @@ use tracing::warn;
pub(crate) async fn process_items(
processed_items: Vec<crate::codex::ProcessedResponseItem>,
is_review_mode: bool,
review_thread_history: &mut ContextManager,
review_thread_history: &mut ConversationHistory,
sess: &Session,
) -> (Vec<ResponseInputItem>, Vec<ResponseItem>) {
let mut items_to_record_in_conversation_history = Vec::<ResponseItem>::new();

View File

@@ -3,7 +3,7 @@
use codex_protocol::models::ResponseItem;
use crate::codex::SessionConfiguration;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
use crate::protocol::TokenUsageInfo;
@@ -11,7 +11,7 @@ use crate::protocol::TokenUsageInfo;
/// Persistent, session-scoped state previously stored directly on `Session`.
pub(crate) struct SessionState {
pub(crate) session_configuration: SessionConfiguration,
pub(crate) history: ContextManager,
pub(crate) history: ConversationHistory,
pub(crate) latest_rate_limits: Option<RateLimitSnapshot>,
}
@@ -20,7 +20,7 @@ impl SessionState {
pub(crate) fn new(session_configuration: SessionConfiguration) -> Self {
Self {
session_configuration,
history: ContextManager::new(),
history: ConversationHistory::new(),
latest_rate_limits: None,
}
}
@@ -38,7 +38,7 @@ impl SessionState {
self.history.get_history()
}
pub(crate) fn clone_history(&self) -> ContextManager {
pub(crate) fn clone_history(&self) -> ConversationHistory {
self.history.clone()
}

View File

@@ -1,11 +1,15 @@
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::tools::TELEMETRY_PREVIEW_MAX_BYTES;
use crate::tools::TELEMETRY_PREVIEW_MAX_LINES;
use crate::tools::TELEMETRY_PREVIEW_TRUNCATION_NOTICE;
use crate::turn_diff_tracker::TurnDiffTracker;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::models::ShellToolCallParams;
use codex_protocol::protocol::FileChange;
use codex_utils_string::take_bytes_at_char_boundary;
use mcp_types::CallToolResult;
use std::borrow::Cow;
use std::collections::HashMap;
@@ -72,7 +76,7 @@ pub enum ToolOutput {
impl ToolOutput {
pub fn log_preview(&self) -> String {
match self {
ToolOutput::Function { content, .. } => content.clone(),
ToolOutput::Function { content, .. } => telemetry_preview(content),
ToolOutput::Mcp { result } => format!("{result:?}"),
}
}
@@ -107,6 +111,46 @@ impl ToolOutput {
}
}
fn telemetry_preview(content: &str) -> String {
let truncated_slice = take_bytes_at_char_boundary(content, TELEMETRY_PREVIEW_MAX_BYTES);
let truncated_by_bytes = truncated_slice.len() < content.len();
let mut preview = String::new();
let mut lines_iter = truncated_slice.lines();
for idx in 0..TELEMETRY_PREVIEW_MAX_LINES {
match lines_iter.next() {
Some(line) => {
if idx > 0 {
preview.push('\n');
}
preview.push_str(line);
}
None => break,
}
}
let truncated_by_lines = lines_iter.next().is_some();
if !truncated_by_bytes && !truncated_by_lines {
return content.to_string();
}
if preview.len() < truncated_slice.len()
&& truncated_slice
.as_bytes()
.get(preview.len())
.is_some_and(|byte| *byte == b'\n')
{
preview.push('\n');
}
if !preview.is_empty() && !preview.ends_with('\n') {
preview.push('\n');
}
preview.push_str(TELEMETRY_PREVIEW_TRUNCATION_NOTICE);
preview
}
#[cfg(test)]
mod tests {
use super::*;
@@ -152,6 +196,38 @@ mod tests {
other => panic!("expected FunctionCallOutput, got {other:?}"),
}
}
#[test]
fn telemetry_preview_returns_original_within_limits() {
let content = "short output";
assert_eq!(telemetry_preview(content), content);
}
#[test]
fn telemetry_preview_truncates_by_bytes() {
let content = "x".repeat(TELEMETRY_PREVIEW_MAX_BYTES + 8);
let preview = telemetry_preview(&content);
assert!(preview.contains(TELEMETRY_PREVIEW_TRUNCATION_NOTICE));
assert!(
preview.len()
<= TELEMETRY_PREVIEW_MAX_BYTES + TELEMETRY_PREVIEW_TRUNCATION_NOTICE.len() + 1
);
}
#[test]
fn telemetry_preview_truncates_by_lines() {
let content = (0..(TELEMETRY_PREVIEW_MAX_LINES + 5))
.map(|idx| format!("line {idx}"))
.collect::<Vec<_>>()
.join("\n");
let preview = telemetry_preview(&content);
let lines: Vec<&str> = preview.lines().collect();
assert!(lines.len() <= TELEMETRY_PREVIEW_MAX_LINES + 1);
assert_eq!(lines.last(), Some(&TELEMETRY_PREVIEW_TRUNCATION_NOTICE));
}
}
#[derive(Clone, Debug)]

View File

@@ -22,6 +22,12 @@ pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
// Telemetry preview limits: keep log events smaller than model budgets.
pub(crate) const TELEMETRY_PREVIEW_MAX_BYTES: usize = 2 * 1024; // 2 KiB
pub(crate) const TELEMETRY_PREVIEW_MAX_LINES: usize = 64; // lines
pub(crate) const TELEMETRY_PREVIEW_TRUNCATION_NOTICE: &str =
"[... telemetry preview truncated ...]";
/// Format the combined exec output for sending back to the model.
/// Includes exit code and duration metadata; truncates large bodies safely.
pub fn format_exec_output_for_model(exec_output: &ExecToolCallOutput) -> String {