Compare commits

...

4 Commits

Author SHA1 Message Date
Michael Bolin
86d00c0236 core: route view_image through a sandbox-backed fs helper 2026-03-20 15:32:26 -07:00
Michael Bolin
f31d388ac0 core: add a sandbox-backed fs helper 2026-03-20 15:19:33 -07:00
Dylan Hurd
ea8b07e680 chore(core) Remove Feature::PowershellUtf8 (#15128)
## Summary
This feature has been enabled for powershell for a while now, let's get
rid of the logic

## Testing
- [x] Unit tests
2026-03-20 22:03:31 +00:00
Matthew Zeng
dd88ed767b [apps] Use ARC for yolo mode. (#15273)
- [x] Use ARC for yolo mode.
2026-03-20 21:13:20 +00:00
27 changed files with 874 additions and 139 deletions

10
codex-rs/Cargo.lock generated
View File

@@ -1572,6 +1572,7 @@ version = "0.0.0"
dependencies = [
"anyhow",
"codex-apply-patch",
"codex-fs-ops",
"codex-linux-sandbox",
"codex-shell-escalation",
"codex-utils-home-dir",
@@ -1863,6 +1864,7 @@ dependencies = [
"codex-execpolicy",
"codex-features",
"codex-file-search",
"codex-fs-ops",
"codex-git",
"codex-hooks",
"codex-login",
@@ -2119,6 +2121,14 @@ dependencies = [
"tokio",
]
[[package]]
name = "codex-fs-ops"
version = "0.0.0"
dependencies = [
"pretty_assertions",
"tempfile",
]
[[package]]
name = "codex-git"
version = "0.0.0"

View File

@@ -10,8 +10,6 @@ members = [
"debug-client",
"apply-patch",
"arg0",
"feedback",
"features",
"codex-backend-openapi-models",
"cloud-requirements",
"cloud-tasks",
@@ -29,6 +27,9 @@ members = [
"exec-server",
"execpolicy",
"execpolicy-legacy",
"feedback",
"features",
"fs-ops",
"keyring-store",
"file-search",
"linux-sandbox",
@@ -111,9 +112,10 @@ codex-exec = { path = "exec" }
codex-exec-server = { path = "exec-server" }
codex-execpolicy = { path = "execpolicy" }
codex-experimental-api-macros = { path = "codex-experimental-api-macros" }
codex-feedback = { path = "feedback" }
codex-features = { path = "features" }
codex-feedback = { path = "feedback" }
codex-file-search = { path = "file-search" }
codex-fs-ops = { path = "fs-ops" }
codex-git = { path = "utils/git" }
codex-hooks = { path = "hooks" }
codex-keyring-store = { path = "keyring-store" }

View File

@@ -14,6 +14,7 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
codex-apply-patch = { workspace = true }
codex-fs-ops = { workspace = true }
codex-linux-sandbox = { workspace = true }
codex-shell-escalation = { workspace = true }
codex-utils-home-dir = { workspace = true }

View File

@@ -4,6 +4,7 @@ use std::path::Path;
use std::path::PathBuf;
use codex_apply_patch::CODEX_CORE_APPLY_PATCH_ARG1;
use codex_fs_ops::CODEX_CORE_FS_OPS_ARG1;
use codex_utils_home_dir::find_codex_home;
#[cfg(unix)]
use std::os::unix::fs::symlink;
@@ -105,6 +106,12 @@ pub fn arg0_dispatch() -> Option<Arg0PathEntryGuard> {
};
std::process::exit(exit_code);
}
if argv1 == CODEX_CORE_FS_OPS_ARG1 {
let mut stdin = std::io::stdin();
let mut stdout = std::io::stdout();
let mut stderr = std::io::stderr();
codex_fs_ops::run_from_args_and_exit(args, &mut stdin, &mut stdout, &mut stderr);
}
// This modifies the environment, which is not thread-safe, so do this
// before creating any threads/the Tokio runtime.

View File

@@ -39,6 +39,7 @@ codex-login = { workspace = true }
codex-shell-command = { workspace = true }
codex-skills = { workspace = true }
codex-execpolicy = { workspace = true }
codex-fs-ops = { workspace = true }
codex-file-search = { workspace = true }
codex-git = { workspace = true }
codex-hooks = { workspace = true }

View File

@@ -431,9 +431,6 @@
"plugins": {
"type": "boolean"
},
"powershell_utf8": {
"type": "boolean"
},
"prevent_idle_sleep": {
"type": "boolean"
},
@@ -2040,9 +2037,6 @@
"plugins": {
"type": "boolean"
},
"powershell_utf8": {
"type": "boolean"
},
"prevent_idle_sleep": {
"type": "boolean"
},

View File

@@ -99,6 +99,7 @@ pub(crate) async fn monitor_action(
sess: &Session,
turn_context: &TurnContext,
action: serde_json::Value,
protection_client_callsite: &'static str,
) -> ArcMonitorOutcome {
let auth = match turn_context.auth_manager.as_ref() {
Some(auth_manager) => match auth_manager.auth().await {
@@ -138,7 +139,8 @@ pub(crate) async fn monitor_action(
return ArcMonitorOutcome::Ok;
}
};
let body = build_arc_monitor_request(sess, turn_context, action).await;
let body =
build_arc_monitor_request(sess, turn_context, action, protection_client_callsite).await;
let client = build_reqwest_client();
let mut request = client
.post(&url)
@@ -236,6 +238,7 @@ async fn build_arc_monitor_request(
sess: &Session,
turn_context: &TurnContext,
action: serde_json::Map<String, serde_json::Value>,
protection_client_callsite: &'static str,
) -> ArcMonitorRequest {
let history = sess.clone_history().await;
let mut messages = build_arc_monitor_messages(history.raw_items());
@@ -254,7 +257,7 @@ async fn build_arc_monitor_request(
codex_thread_id: conversation_id.clone(),
codex_turn_id: turn_context.sub_id.clone(),
conversation_id: Some(conversation_id),
protection_client_callsite: None,
protection_client_callsite: Some(protection_client_callsite.to_string()),
},
messages: Some(messages),
input: None,

View File

@@ -178,6 +178,7 @@ async fn build_arc_monitor_request_includes_relevant_history_and_null_policies()
&turn_context,
serde_json::from_value(serde_json::json!({ "tool": "mcp_tool_call" }))
.expect("action should deserialize"),
"normal",
)
.await;
@@ -188,7 +189,7 @@ async fn build_arc_monitor_request_includes_relevant_history_and_null_policies()
codex_thread_id: session.conversation_id.to_string(),
codex_turn_id: turn_context.sub_id.clone(),
conversation_id: Some(session.conversation_id.to_string()),
protection_client_callsite: None,
protection_client_callsite: Some("normal".to_string()),
},
messages: Some(vec![
ArcMonitorChatMessage {
@@ -285,6 +286,7 @@ async fn monitor_action_posts_expected_arc_request() {
"codex_thread_id": session.conversation_id.to_string(),
"codex_turn_id": turn_context.sub_id.clone(),
"conversation_id": session.conversation_id.to_string(),
"protection_client_callsite": "normal",
},
"messages": [{
"role": "user",
@@ -320,6 +322,7 @@ async fn monitor_action_posts_expected_arc_request() {
&session,
&turn_context,
serde_json::json!({ "tool": "mcp_tool_call" }),
"normal",
)
.await;
@@ -377,6 +380,7 @@ async fn monitor_action_uses_env_url_and_token_overrides() {
&session,
&turn_context,
serde_json::json!({ "tool": "mcp_tool_call" }),
"normal",
)
.await;
@@ -428,6 +432,7 @@ async fn monitor_action_rejects_legacy_response_fields() {
&session,
&turn_context,
serde_json::json!({ "tool": "mcp_tool_call" }),
"normal",
)
.await;

View File

@@ -319,6 +319,62 @@ pub(crate) async fn execute_exec_request(
stdout_stream: Option<StdoutStream>,
after_spawn: Option<Box<dyn FnOnce() + Send>>,
) -> Result<ExecToolCallOutput> {
let PreparedExecRequest {
params,
sandbox,
file_system_sandbox_policy,
network_sandbox_policy,
} = prepare_exec_request(exec_request);
let start = Instant::now();
let raw_output_result = exec(
params,
sandbox,
sandbox_policy,
&file_system_sandbox_policy,
network_sandbox_policy,
stdout_stream,
after_spawn,
)
.await;
let duration = start.elapsed();
Ok(normalize_exec_result(raw_output_result, sandbox, duration)?.to_utf8_lossy_output())
}
pub(crate) async fn execute_exec_request_raw_output(
exec_request: ExecRequest,
sandbox_policy: &SandboxPolicy,
stdout_stream: Option<StdoutStream>,
after_spawn: Option<Box<dyn FnOnce() + Send>>,
) -> Result<ExecToolCallRawOutput> {
let PreparedExecRequest {
params,
sandbox,
file_system_sandbox_policy,
network_sandbox_policy,
} = prepare_exec_request(exec_request);
let start = Instant::now();
let raw_output_result = exec(
params,
sandbox,
sandbox_policy,
&file_system_sandbox_policy,
network_sandbox_policy,
stdout_stream,
after_spawn,
)
.await;
let duration = start.elapsed();
normalize_exec_result(raw_output_result, sandbox, duration)
}
struct PreparedExecRequest {
params: ExecParams,
sandbox: SandboxType,
file_system_sandbox_policy: FileSystemSandboxPolicy,
network_sandbox_policy: NetworkSandboxPolicy,
}
fn prepare_exec_request(exec_request: ExecRequest) -> PreparedExecRequest {
let ExecRequest {
command,
cwd,
@@ -338,33 +394,24 @@ pub(crate) async fn execute_exec_request(
} = exec_request;
let _ = _sandbox_policy_from_env;
let params = ExecParams {
command,
cwd,
expiration,
capture_policy,
env,
network: network.clone(),
sandbox_permissions,
windows_sandbox_level,
windows_sandbox_private_desktop,
justification,
arg0,
};
let start = Instant::now();
let raw_output_result = exec(
params,
PreparedExecRequest {
params: ExecParams {
command,
cwd,
expiration,
capture_policy,
env,
network,
sandbox_permissions,
windows_sandbox_level,
windows_sandbox_private_desktop,
justification,
arg0,
},
sandbox,
sandbox_policy,
&file_system_sandbox_policy,
network_sandbox_policy,
stdout_stream,
after_spawn,
)
.await;
let duration = start.elapsed();
finalize_exec_result(raw_output_result, sandbox, duration)
file_system_sandbox_policy,
}
}
#[cfg(target_os = "windows")]
@@ -558,19 +605,25 @@ async fn exec_windows_sandbox(
})
}
fn finalize_exec_result(
fn normalize_exec_result(
raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr>,
sandbox_type: SandboxType,
duration: Duration,
) -> Result<ExecToolCallOutput> {
) -> Result<ExecToolCallRawOutput> {
match raw_output_result {
Ok(raw_output) => {
#[allow(unused_mut)]
let mut timed_out = raw_output.timed_out;
let RawExecToolCallOutput {
exit_status,
stdout,
stderr,
aggregated_output,
#[cfg_attr(target_os = "windows", allow(unused_mut))]
mut timed_out,
} = raw_output;
#[cfg(target_family = "unix")]
{
if let Some(signal) = raw_output.exit_status.signal() {
if let Some(signal) = exit_status.signal() {
if signal == TIMEOUT_CODE {
timed_out = true;
} else {
@@ -579,15 +632,12 @@ fn finalize_exec_result(
}
}
let mut exit_code = raw_output.exit_status.code().unwrap_or(-1);
let mut exit_code = exit_status.code().unwrap_or(-1);
if timed_out {
exit_code = EXEC_TIMEOUT_EXIT_CODE;
}
let stdout = raw_output.stdout.from_utf8_lossy();
let stderr = raw_output.stderr.from_utf8_lossy();
let aggregated_output = raw_output.aggregated_output.from_utf8_lossy();
let exec_output = ExecToolCallOutput {
let exec_output = ExecToolCallRawOutput {
exit_code,
stdout,
stderr,
@@ -598,13 +648,14 @@ fn finalize_exec_result(
if timed_out {
return Err(CodexErr::Sandbox(SandboxErr::Timeout {
output: Box::new(exec_output),
output: Box::new(exec_output.to_utf8_lossy_output()),
}));
}
if is_likely_sandbox_denied(sandbox_type, &exec_output) {
let string_output = exec_output.to_utf8_lossy_output();
if is_likely_sandbox_denied(sandbox_type, &string_output) {
return Err(CodexErr::Sandbox(SandboxErr::Denied {
output: Box::new(exec_output),
output: Box::new(string_output),
network_policy_decision: None,
}));
}
@@ -796,6 +847,16 @@ pub struct ExecToolCallOutput {
pub timed_out: bool,
}
#[derive(Clone, Debug)]
pub(crate) struct ExecToolCallRawOutput {
pub exit_code: i32,
pub stdout: StreamOutput<Vec<u8>>,
pub stderr: StreamOutput<Vec<u8>>,
pub aggregated_output: StreamOutput<Vec<u8>>,
pub duration: Duration,
pub timed_out: bool,
}
impl Default for ExecToolCallOutput {
fn default() -> Self {
Self {
@@ -809,6 +870,19 @@ impl Default for ExecToolCallOutput {
}
}
impl ExecToolCallRawOutput {
fn to_utf8_lossy_output(&self) -> ExecToolCallOutput {
ExecToolCallOutput {
exit_code: self.exit_code,
stdout: self.stdout.from_utf8_lossy(),
stderr: self.stderr.from_utf8_lossy(),
aggregated_output: self.aggregated_output.from_utf8_lossy(),
duration: self.duration,
timed_out: self.timed_out,
}
}
}
#[cfg_attr(not(target_os = "windows"), allow(unused_variables))]
async fn exec(
params: ExecParams,

View File

@@ -121,6 +121,7 @@ pub mod default_client {
pub mod project_doc;
mod rollout;
pub(crate) mod safety;
mod sandboxed_fs;
pub mod seatbelt;
pub mod shell;
pub mod shell_snapshot;

View File

@@ -457,6 +457,9 @@ const MCP_TOOL_APPROVAL_TOOL_TITLE_KEY: &str = "tool_title";
const MCP_TOOL_APPROVAL_TOOL_DESCRIPTION_KEY: &str = "tool_description";
const MCP_TOOL_APPROVAL_TOOL_PARAMS_KEY: &str = "tool_params";
const MCP_TOOL_APPROVAL_TOOL_PARAMS_DISPLAY_KEY: &str = "tool_params_display";
const MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_DEFAULT: &str = "mcp_tool_call__default";
const MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_ALWAYS_ALLOW: &str = "mcp_tool_call__always_allow";
const MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_FULL_ACCESS: &str = "mcp_tool_call__full_access";
pub(crate) fn is_mcp_tool_approval_question_id(question_id: &str) -> bool {
question_id
@@ -494,14 +497,22 @@ async fn maybe_request_mcp_tool_approval(
let annotations = metadata.and_then(|metadata| metadata.annotations.as_ref());
let approval_required = annotations.is_some_and(requires_mcp_tool_approval);
let mut monitor_reason = None;
let auto_approved_by_policy = approval_mode == AppToolApproval::Approve
|| (approval_mode == AppToolApproval::Auto && is_full_access_mode(turn_context));
if approval_mode == AppToolApproval::Approve {
if auto_approved_by_policy {
if !approval_required {
return None;
}
match maybe_monitor_auto_approved_mcp_tool_call(sess, turn_context, invocation, metadata)
.await
match maybe_monitor_auto_approved_mcp_tool_call(
sess,
turn_context,
invocation,
metadata,
approval_mode,
)
.await
{
ArcMonitorOutcome::Ok => return None,
ArcMonitorOutcome::AskUser(reason) => {
@@ -515,13 +526,8 @@ async fn maybe_request_mcp_tool_approval(
}
}
if approval_mode == AppToolApproval::Auto {
if is_full_access_mode(turn_context) {
return None;
}
if !approval_required {
return None;
}
if approval_mode == AppToolApproval::Auto && !approval_required {
return None;
}
let session_approval_key = session_mcp_tool_approval_key(invocation, metadata, approval_mode);
@@ -653,9 +659,16 @@ async fn maybe_monitor_auto_approved_mcp_tool_call(
turn_context: &TurnContext,
invocation: &McpInvocation,
metadata: Option<&McpToolApprovalMetadata>,
approval_mode: AppToolApproval,
) -> ArcMonitorOutcome {
let action = prepare_arc_request_action(invocation, metadata);
monitor_action(sess, turn_context, action).await
monitor_action(
sess,
turn_context,
action,
mcp_tool_approval_callsite_mode(approval_mode, turn_context),
)
.await
}
fn prepare_arc_request_action(
@@ -749,6 +762,22 @@ fn is_full_access_mode(turn_context: &TurnContext) -> bool {
)
}
fn mcp_tool_approval_callsite_mode(
approval_mode: AppToolApproval,
turn_context: &TurnContext,
) -> &'static str {
match approval_mode {
AppToolApproval::Approve => MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_ALWAYS_ALLOW,
AppToolApproval::Auto | AppToolApproval::Prompt => {
if approval_mode == AppToolApproval::Auto && is_full_access_mode(turn_context) {
MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_FULL_ACCESS
} else {
MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_DEFAULT
}
}
}
}
pub(crate) async fn lookup_mcp_tool_metadata(
sess: &Session,
turn_context: &TurnContext,

View File

@@ -776,6 +776,38 @@ fn approval_elicitation_meta_merges_session_and_always_persist_with_connector_so
);
}
#[tokio::test]
async fn approval_callsite_mode_distinguishes_default_always_allow_and_full_access() {
let (_session, mut turn_context) = make_session_and_context().await;
assert_eq!(
mcp_tool_approval_callsite_mode(AppToolApproval::Auto, &turn_context),
"mcp_tool_call__default"
);
assert_eq!(
mcp_tool_approval_callsite_mode(AppToolApproval::Prompt, &turn_context),
"mcp_tool_call__default"
);
assert_eq!(
mcp_tool_approval_callsite_mode(AppToolApproval::Approve, &turn_context),
"mcp_tool_call__always_allow"
);
turn_context
.approval_policy
.set(AskForApproval::Never)
.expect("test setup should allow updating approval policy");
turn_context
.sandbox_policy
.set(SandboxPolicy::DangerFullAccess)
.expect("test setup should allow updating sandbox policy");
assert_eq!(
mcp_tool_approval_callsite_mode(AppToolApproval::Auto, &turn_context),
"mcp_tool_call__full_access"
);
}
#[test]
fn declined_elicitation_response_stays_decline() {
let response = parse_mcp_tool_approval_elicitation_response(
@@ -1035,6 +1067,83 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_for_model() {
);
}
#[tokio::test]
async fn full_access_auto_mode_blocks_when_arc_returns_interrupt_for_model() {
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/codex/safety/arc"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"outcome": "steer-model",
"short_reason": "needs approval",
"rationale": "high-risk action",
"risk_score": 96,
"risk_level": "critical",
"evidence": [{
"message": "dangerous_tool",
"why": "high-risk action",
}],
})))
.expect(1)
.mount(&server)
.await;
let (session, mut turn_context) = make_session_and_context().await;
turn_context.auth_manager = Some(crate::test_support::auth_manager_from_auth(
crate::CodexAuth::create_dummy_chatgpt_auth_for_testing(),
));
turn_context
.approval_policy
.set(AskForApproval::Never)
.expect("test setup should allow updating approval policy");
turn_context
.sandbox_policy
.set(SandboxPolicy::DangerFullAccess)
.expect("test setup should allow updating sandbox policy");
let mut config = (*turn_context.config).clone();
config.chatgpt_base_url = server.uri();
turn_context.config = Arc::new(config);
let session = Arc::new(session);
let turn_context = Arc::new(turn_context);
let invocation = McpInvocation {
server: CODEX_APPS_MCP_SERVER_NAME.to_string(),
tool: "dangerous_tool".to_string(),
arguments: Some(serde_json::json!({ "id": 1 })),
};
let metadata = McpToolApprovalMetadata {
annotations: Some(annotations(Some(false), Some(true), Some(true))),
connector_id: Some("calendar".to_string()),
connector_name: Some("Calendar".to_string()),
connector_description: Some("Manage events".to_string()),
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Performs a risky action.".to_string()),
codex_apps_meta: None,
};
let decision = maybe_request_mcp_tool_approval(
&session,
&turn_context,
"call-2",
&invocation,
Some(&metadata),
AppToolApproval::Auto,
)
.await;
assert_eq!(
decision,
Some(McpToolApprovalDecision::BlockedBySafetyMonitor(
"Tool call was cancelled because of safety risks: high-risk action".to_string(),
))
);
}
#[tokio::test]
async fn approve_mode_routes_arc_ask_user_to_guardian_when_guardian_reviewer_is_enabled() {
use wiremock::Mock;

View File

@@ -0,0 +1,160 @@
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::exec::ExecCapturePolicy;
use crate::exec::ExecExpiration;
use crate::exec::ExecToolCallRawOutput;
use crate::exec::execute_exec_request_raw_output;
use crate::sandboxing::CommandSpec;
use crate::sandboxing::SandboxPermissions;
use crate::sandboxing::merge_permission_profiles;
use crate::tools::sandboxing::SandboxAttempt;
use crate::tools::sandboxing::SandboxablePreference;
use codex_fs_ops::CODEX_CORE_FS_OPS_ARG1;
use codex_fs_ops::READ_FILE_OPERATION_ARG2;
use codex_protocol::models::PermissionProfile;
use codex_utils_absolute_path::AbsolutePathBuf;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
/// Reads the contents of the specified file subject to the sandbox constraints
/// imposed by the provided session and turn context.
///
/// Note that this function is comparable to `cat FILE`, though unlike `cat
/// FILE`, this function verifies that FILE is a regular file before reading,
/// which means that if you pass `/dev/zero` as the path, it will error (rather
/// than hang forever).
pub(crate) async fn read_file(
path: &AbsolutePathBuf,
session: &Arc<Session>,
turn: &Arc<TurnContext>,
) -> Result<Vec<u8>, SandboxedFsError> {
let output = perform_operation(
SandboxedFsOperation::Read { path: path.clone() },
session,
turn,
)
.await?;
Ok(output.stdout.text)
}
/// Operations supported by the [CODEX_CORE_FS_OPS_ARG1] sandbox helper.
enum SandboxedFsOperation {
Read { path: AbsolutePathBuf },
}
async fn perform_operation(
operation: SandboxedFsOperation,
session: &Arc<Session>,
turn: &Arc<TurnContext>,
) -> Result<ExecToolCallRawOutput, SandboxedFsError> {
let exe = std::env::current_exe().map_err(|error| SandboxedFsError::ResolveExe {
message: error.to_string(),
})?;
let additional_permissions = effective_granted_permissions(session).await;
let sandbox_manager = crate::sandboxing::SandboxManager::new();
let attempt = SandboxAttempt {
sandbox: sandbox_manager.select_initial(
&turn.file_system_sandbox_policy,
turn.network_sandbox_policy,
SandboxablePreference::Auto,
turn.windows_sandbox_level,
/*has_managed_network_requirements*/ false,
),
policy: &turn.sandbox_policy,
file_system_policy: &turn.file_system_sandbox_policy,
network_policy: turn.network_sandbox_policy,
enforce_managed_network: false,
manager: &sandbox_manager,
sandbox_cwd: &turn.cwd,
codex_linux_sandbox_exe: turn.codex_linux_sandbox_exe.as_ref(),
use_legacy_landlock: turn.features.use_legacy_landlock(),
windows_sandbox_level: turn.windows_sandbox_level,
windows_sandbox_private_desktop: turn.config.permissions.windows_sandbox_private_desktop,
};
let args = match operation {
SandboxedFsOperation::Read { ref path } => vec![
CODEX_CORE_FS_OPS_ARG1.to_string(),
READ_FILE_OPERATION_ARG2.to_string(),
path.to_string_lossy().to_string(),
],
};
// `FullBuffer` reads ignore exec expiration, but `ExecRequest` still requires
// an `expiration` field, so keep a placeholder timeout here until that API
// changes.
let ignored_expiration = Duration::from_secs(30);
let exec_request = attempt
.env_for(
CommandSpec {
program: exe.to_string_lossy().to_string(),
args,
cwd: turn.cwd.clone(),
env: HashMap::new(),
expiration: ExecExpiration::Timeout(ignored_expiration),
capture_policy: ExecCapturePolicy::FullBuffer,
sandbox_permissions: SandboxPermissions::UseDefault,
additional_permissions,
justification: None,
},
/*network*/ None,
)
.map_err(|error| SandboxedFsError::ProcessFailed {
exit_code: -1,
message: error.to_string(),
})?;
let effective_policy = exec_request.sandbox_policy.clone();
let output = execute_exec_request_raw_output(
exec_request,
&effective_policy,
/*stdout_stream*/ None,
/*after_spawn*/ None,
)
.await
.map_err(|error| SandboxedFsError::ProcessFailed {
exit_code: 1,
message: error.to_string(),
})?;
if output.exit_code == 0 {
Ok(output)
} else {
Err(parse_helper_failure(
output.exit_code,
&output.stderr.text,
&output.stdout.text,
))
}
}
async fn effective_granted_permissions(session: &Session) -> Option<PermissionProfile> {
let granted_session_permissions = session.granted_session_permissions().await;
let granted_turn_permissions = session.granted_turn_permissions().await;
merge_permission_profiles(
granted_session_permissions.as_ref(),
granted_turn_permissions.as_ref(),
)
}
fn parse_helper_failure(exit_code: i32, stderr: &[u8], stdout: &[u8]) -> SandboxedFsError {
let stderr = String::from_utf8_lossy(stderr);
let stdout = String::from_utf8_lossy(stdout);
let message = if !stderr.trim().is_empty() {
stderr.trim().to_string()
} else if !stdout.trim().is_empty() {
stdout.trim().to_string()
} else {
"no error details emitted".to_string()
};
SandboxedFsError::ProcessFailed { exit_code, message }
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum SandboxedFsError {
#[error("failed to determine codex executable: {message}")]
ResolveExe { message: String },
#[error("sandboxed fs helper exited with code {exit_code}: {message}")]
ProcessFailed { exit_code: i32, message: String },
}

View File

@@ -14,6 +14,7 @@ use crate::function_tool::FunctionCallError;
use crate::original_image_detail::can_request_original_image_detail;
use crate::protocol::EventMsg;
use crate::protocol::ViewImageToolCallEvent;
use crate::sandboxed_fs;
use crate::tools::context::ToolInvocation;
use crate::tools::context::ToolOutput;
use crate::tools::context::ToolPayload;
@@ -93,36 +94,6 @@ impl ToolHandler for ViewImageHandler {
AbsolutePathBuf::try_from(turn.resolve_path(Some(args.path))).map_err(|error| {
FunctionCallError::RespondToModel(format!("unable to resolve image path: {error}"))
})?;
let metadata = turn
.environment
.get_filesystem()
.get_metadata(&abs_path)
.await
.map_err(|error| {
FunctionCallError::RespondToModel(format!(
"unable to locate image at `{}`: {error}",
abs_path.display()
))
})?;
if !metadata.is_file {
return Err(FunctionCallError::RespondToModel(format!(
"image path `{}` is not a file",
abs_path.display()
)));
}
let file_bytes = turn
.environment
.get_filesystem()
.read_file(&abs_path)
.await
.map_err(|error| {
FunctionCallError::RespondToModel(format!(
"unable to read image at `{}`: {error}",
abs_path.display()
))
})?;
let event_path = abs_path.to_path_buf();
let can_request_original_detail =
@@ -135,14 +106,24 @@ impl ToolHandler for ViewImageHandler {
PromptImageMode::ResizeToFit
};
let image_detail = use_original_detail.then_some(ImageDetail::Original);
let image_bytes = sandboxed_fs::read_file(&abs_path, &session, &turn)
.await
.map_err(|error| {
let full_error = format!(
"unable to read image file `{path}`: {error:?}",
path = abs_path.display()
);
FunctionCallError::RespondToModel(full_error)
})?;
let image =
load_for_prompt_bytes(abs_path.as_path(), file_bytes, image_mode).map_err(|error| {
let image = load_for_prompt_bytes(abs_path.as_path(), image_bytes, image_mode).map_err(
|error| {
FunctionCallError::RespondToModel(format!(
"unable to process image at `{}`: {error}",
abs_path.display()
))
})?;
},
)?;
let image_url = image.into_data_url();
session

View File

@@ -33,7 +33,6 @@ use crate::tools::sandboxing::ToolError;
use crate::tools::sandboxing::ToolRuntime;
use crate::tools::sandboxing::sandbox_override_for_first_attempt;
use crate::tools::sandboxing::with_cached_approval;
use codex_features::Feature;
use codex_network_proxy::NetworkProxy;
use codex_protocol::models::PermissionProfile;
use codex_protocol::protocol::ReviewDecision;
@@ -227,9 +226,7 @@ impl ToolRuntime<ShellRequest, ExecToolCallOutput> for ShellRuntime {
&req.cwd,
&req.explicit_env_overrides,
);
let command = if matches!(session_shell.shell_type, ShellType::PowerShell)
&& ctx.session.features().enabled(Feature::PowershellUtf8)
{
let command = if matches!(session_shell.shell_type, ShellType::PowerShell) {
prefix_powershell_script_with_utf8(&command)
} else {
command

View File

@@ -36,7 +36,6 @@ use crate::unified_exec::NoopSpawnLifecycle;
use crate::unified_exec::UnifiedExecError;
use crate::unified_exec::UnifiedExecProcess;
use crate::unified_exec::UnifiedExecProcessManager;
use codex_features::Feature;
use codex_network_proxy::NetworkProxy;
use codex_protocol::models::PermissionProfile;
use codex_protocol::protocol::ReviewDecision;
@@ -200,9 +199,7 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecProcess> for UnifiedExecRunt
&req.cwd,
&req.explicit_env_overrides,
);
let command = if matches!(session_shell.shell_type, ShellType::PowerShell)
&& ctx.session.features().enabled(Feature::PowershellUtf8)
{
let command = if matches!(session_shell.shell_type, ShellType::PowerShell) {
prefix_powershell_script_with_utf8(&command)
} else {
command

View File

@@ -1,7 +1,6 @@
use std::time::Duration;
use anyhow::Result;
use codex_features::Feature;
use core_test_support::assert_regex_match;
use core_test_support::responses::ev_assistant_message;
use core_test_support::responses::ev_completed;
@@ -251,16 +250,7 @@ async fn shell_command_times_out_with_timeout_ms() -> anyhow::Result<()> {
async fn unicode_output(login: bool) -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
#[allow(clippy::expect_used)]
let harness = shell_command_harness_with(|builder| {
builder.with_model("gpt-5.2").with_config(|config| {
config
.features
.enable(Feature::PowershellUtf8)
.expect("test config should allow feature update");
})
})
.await?;
let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.2")).await?;
// We use a child process on windows instead of a direct builtin like 'echo' to ensure that Powershell
// config is actually being set correctly.
@@ -286,16 +276,7 @@ async fn unicode_output(login: bool) -> anyhow::Result<()> {
async fn unicode_output_with_newlines(login: bool) -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
#[allow(clippy::expect_used)]
let harness = shell_command_harness_with(|builder| {
builder.with_model("gpt-5.2").with_config(|config| {
config
.features
.enable(Feature::PowershellUtf8)
.expect("test config should allow feature update");
})
})
.await?;
let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.2")).await?;
let call_id = "unicode_output";
mount_shell_responses_with_timeout(

View File

@@ -3,6 +3,7 @@
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use codex_core::CodexAuth;
use codex_core::config::Constrained;
use codex_exec_server::CreateDirectoryOptions;
use codex_features::Feature;
use codex_protocol::config_types::ReasoningSummary;
@@ -14,9 +15,15 @@ use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::permissions::FileSystemAccessMode;
use codex_protocol::permissions::FileSystemPath;
use codex_protocol::permissions::FileSystemSandboxEntry;
use codex_protocol::permissions::FileSystemSandboxPolicy;
use codex_protocol::permissions::FileSystemSpecialPath;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::ReadOnlyAccess;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::user_input::UserInput;
use core_test_support::responses;
@@ -1146,7 +1153,10 @@ async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
.function_call_output_content_and_success(call_id)
.and_then(|(content, _)| content)
.expect("output text present");
let expected_message = format!("image path `{}` is not a file", abs_path.display());
let expected_message = format!(
r#"unable to read image file `{path}`: ProcessFailed {{ exit_code: 1, message: "error: `{path}` is not a regular file" }}"#,
path = abs_path.display()
);
assert_eq!(output_text, expected_message);
assert!(
@@ -1301,7 +1311,10 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
.function_call_output_content_and_success(call_id)
.and_then(|(content, _)| content)
.expect("output text present");
let expected_prefix = format!("unable to locate image at `{}`:", abs_path.display());
let expected_prefix = format!(
"unable to read image file `{path}`:",
path = abs_path.display()
);
assert!(
output_text.starts_with(&expected_prefix),
"expected error to start with `{expected_prefix}` but got `{output_text}`"
@@ -1315,6 +1328,109 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn view_image_tool_respects_filesystem_sandbox() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let sandbox_policy_for_config = SandboxPolicy::ReadOnly {
access: ReadOnlyAccess::Restricted {
include_platform_defaults: true,
readable_roots: Vec::new(),
},
network_access: false,
};
let mut builder = test_codex().with_config({
let sandbox_policy_for_config = sandbox_policy_for_config.clone();
move |config| {
config.permissions.sandbox_policy = Constrained::allow_any(sandbox_policy_for_config);
config.permissions.file_system_sandbox_policy =
FileSystemSandboxPolicy::restricted(vec![
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::Minimal,
},
access: FileSystemAccessMode::Read,
},
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::CurrentWorkingDirectory,
},
access: FileSystemAccessMode::Read,
},
]);
}
});
let TestCodex {
codex,
config,
cwd,
session_configured,
..
} = builder.build(&server).await?;
let outside_dir = tempfile::tempdir()?;
let abs_path = outside_dir.path().join("blocked.png");
let image = ImageBuffer::from_pixel(256, 128, Rgba([10u8, 20, 30, 255]));
image.save(&abs_path)?;
let call_id = "view-image-sandbox-denied";
let arguments = serde_json::json!({ "path": abs_path }).to_string();
let first_response = sse(vec![
ev_response_created("resp-1"),
ev_function_call(call_id, "view_image", &arguments),
ev_completed("resp-1"),
]);
responses::mount_sse_once(&server, first_response).await;
let second_response = sse(vec![
ev_assistant_message("msg-1", "done"),
ev_completed("resp-2"),
]);
let mock = responses::mount_sse_once(&server, second_response).await;
let session_model = session_configured.model.clone();
codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "please attach the outside image".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: config.permissions.sandbox_policy.get().clone(),
model: session_model,
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
let request = mock.single_request();
assert!(
request.inputs_of_type("input_image").is_empty(),
"sandbox-denied image should not produce an input_image message"
);
let output_text = request
.function_call_output_content_and_success(call_id)
.and_then(|(content, _)| content)
.expect("output text present");
let expected_prefix = format!("unable to read image file `{}`:", abs_path.display());
assert!(
output_text.starts_with(&expected_prefix),
"expected sandbox denial prefix `{expected_prefix}` but got `{output_text}`"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn view_image_tool_returns_unsupported_message_for_text_only_model() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));

View File

@@ -132,8 +132,6 @@ pub enum Feature {
ChildAgentsMd,
/// Allow the model to request `detail: "original"` image outputs on supported models.
ImageDetailOriginal,
/// Enforce UTF8 output in Powershell.
PowershellUtf8,
/// Compress request bodies (zstd) when sending streaming requests to codex-backend.
EnableRequestCompression,
/// Enable collab tools.
@@ -689,18 +687,6 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::Removed,
default_enabled: false,
},
FeatureSpec {
id: Feature::PowershellUtf8,
key: "powershell_utf8",
#[cfg(windows)]
stage: Stage::Stable,
#[cfg(windows)]
default_enabled: true,
#[cfg(not(windows))]
stage: Stage::UnderDevelopment,
#[cfg(not(windows))]
default_enabled: false,
},
FeatureSpec {
id: Feature::EnableRequestCompression,
key: "enable_request_compression",

View File

@@ -0,0 +1,6 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "fs-ops",
crate_name = "codex_fs_ops",
)

View File

@@ -0,0 +1,16 @@
[package]
name = "codex-fs-ops"
edition.workspace = true
license.workspace = true
version.workspace = true
[lib]
name = "codex_fs_ops"
path = "src/lib.rs"
[lints]
workspace = true
[dev-dependencies]
pretty_assertions = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,37 @@
use crate::constants::READ_FILE_OPERATION_ARG2;
use std::ffi::OsString;
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum FsCommand {
ReadFile { path: PathBuf },
}
pub fn parse_command_from_args(
mut args: impl Iterator<Item = OsString>,
) -> Result<FsCommand, String> {
let Some(operation) = args.next() else {
return Err("missing operation".to_string());
};
let Some(operation) = operation.to_str() else {
return Err("operation must be valid UTF-8".to_string());
};
let Some(path) = args.next() else {
return Err(format!("missing path for operation `{operation}`"));
};
if args.next().is_some() {
return Err(format!(
"unexpected extra arguments for operation `{operation}`"
));
}
let path = PathBuf::from(path);
match operation {
READ_FILE_OPERATION_ARG2 => Ok(FsCommand::ReadFile { path }),
_ => Err(format!("unsupported filesystem operation `{operation}`")),
}
}
#[cfg(test)]
#[path = "command_tests.rs"]
mod tests;

View File

@@ -0,0 +1,21 @@
use super::FsCommand;
use super::READ_FILE_OPERATION_ARG2;
use super::parse_command_from_args;
use pretty_assertions::assert_eq;
#[test]
fn parse_read_command() {
let command = parse_command_from_args(
[READ_FILE_OPERATION_ARG2, "/tmp/example.png"]
.into_iter()
.map(Into::into),
)
.expect("command should parse");
assert_eq!(
command,
FsCommand::ReadFile {
path: "/tmp/example.png".into(),
}
);
}

View File

@@ -0,0 +1,8 @@
/// Special argv[1] flag used when the Codex executable self-invokes to run the
/// internal sandbox-backed filesystem helper path.
pub const CODEX_CORE_FS_OPS_ARG1: &str = "--codex-run-as-fs-ops";
/// When passed as argv[2] to the Codex filesystem helper, it should be followed
/// by a single path argument, and the helper will read the contents of the file
/// at that path and write it to stdout.
pub const READ_FILE_OPERATION_ARG2: &str = "read";

View File

@@ -0,0 +1,13 @@
//! The codex-fs-ops crate provides a helper binary for performing various
//! filesystem operations when `codex` is invoked with `--codex-run-as-fs-ops`
//! as the first argument. By exposing this functionality via a CLI, this makes
//! it possible to execute the CLI within a sandboxed context in order to ensure
//! the filesystem restrictions of the sandbox are honored.
mod command;
mod constants;
mod runner;
pub use constants::CODEX_CORE_FS_OPS_ARG1;
pub use constants::READ_FILE_OPERATION_ARG2;
pub use runner::run_from_args_and_exit;

View File

@@ -0,0 +1,66 @@
use crate::command::FsCommand;
use crate::command::parse_command_from_args;
use std::ffi::OsString;
use std::io::Read;
use std::io::Write;
/// Runs the fs-ops helper with the given arguments and I/O streams.
pub fn run_from_args_and_exit(
args: impl Iterator<Item = OsString>,
stdin: &mut impl Read,
stdout: &mut impl Write,
stderr: &mut impl Write,
) -> ! {
let exit_code = match run_from_args(args, stdin, stdout, stderr) {
Ok(()) => 0,
Err(_) => {
// Discard the specific error, since we already wrote it to stderr.
1
}
};
std::process::exit(exit_code);
}
/// Testable version of `run_from_args_and_exit` that returns a Result instead
/// of exiting the process.
fn run_from_args(
args: impl Iterator<Item = OsString>,
stdin: &mut impl Read,
stdout: &mut impl Write,
stderr: &mut impl Write,
) -> std::io::Result<()> {
try_run_from_args(args, stdin, stdout).inspect_err(|error| {
writeln!(stderr, "error: {error}").ok();
})
}
fn try_run_from_args(
args: impl Iterator<Item = OsString>,
_stdin: &mut impl Read,
stdout: &mut impl Write,
) -> std::io::Result<()> {
let command = parse_command_from_args(args)
.map_err(|error| std::io::Error::new(std::io::ErrorKind::InvalidInput, error))?;
match command {
FsCommand::ReadFile { path } => {
let mut file = std::fs::File::open(&path)?;
if !file.metadata()?.is_file() {
let error_message = format!(
"`{path}` is not a regular file",
path = path.to_string_lossy()
);
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
error_message,
));
}
std::io::copy(&mut file, stdout).map(|_| ())
}
}
}
#[cfg(test)]
#[path = "runner_tests.rs"]
mod tests;

View File

@@ -0,0 +1,114 @@
use super::run_from_args;
use crate::READ_FILE_OPERATION_ARG2;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[test]
fn run_from_args_streams_file_bytes_to_stdout() {
let tempdir = tempdir().expect("tempdir");
let path = tempdir.path().join("image.bin");
let expected = b"hello\x00world".to_vec();
std::fs::write(&path, &expected).expect("write test file");
let mut stdin = std::io::empty();
let mut stdout = Vec::new();
let mut stderr = Vec::new();
run_from_args(
[
READ_FILE_OPERATION_ARG2,
path.to_str().expect("utf-8 test path"),
]
.into_iter()
.map(Into::into),
&mut stdin,
&mut stdout,
&mut stderr,
)
.expect("read should succeed");
assert_eq!(stdout, expected);
assert_eq!(stderr, Vec::<u8>::new());
}
#[test]
#[cfg(unix)]
fn rejects_path_that_is_not_a_regular_file() {
let path = std::path::PathBuf::from("/dev/zero");
let mut stdin = std::io::empty();
let mut stdout = Vec::new();
let mut stderr = Vec::new();
let error = run_from_args(
[
READ_FILE_OPERATION_ARG2,
path.to_str().expect("utf-8 test path"),
]
.into_iter()
.map(Into::into),
&mut stdin,
&mut stdout,
&mut stderr,
)
.expect_err(
r#"reading a non-regular file should fail or else
the user risks hanging the process by trying
to read from something like /dev/zero"#,
);
assert_eq!(error.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(stdout, Vec::<u8>::new());
assert_eq!(
"error: `/dev/zero` is not a regular file\n",
String::from_utf8_lossy(&stderr),
);
}
#[test]
fn read_reports_directory_error() {
let tempdir = tempdir().expect("tempdir");
let mut stdin = std::io::empty();
let mut stdout = Vec::new();
let mut stderr = Vec::new();
let error = run_from_args(
[
READ_FILE_OPERATION_ARG2,
tempdir.path().to_str().expect("utf-8 test path"),
]
.into_iter()
.map(Into::into),
&mut stdin,
&mut stdout,
&mut stderr,
)
.expect_err("reading a directory should fail");
#[cfg(not(windows))]
assert_eq!(error.kind(), std::io::ErrorKind::InvalidInput);
#[cfg(windows)]
assert_eq!(error.kind(), std::io::ErrorKind::PermissionDenied);
}
#[test]
fn run_from_args_serializes_errors_to_stderr() {
let tempdir = tempdir().expect("tempdir");
let missing = tempdir.path().join("missing.txt");
let mut stdin = std::io::empty();
let mut stdout = Vec::new();
let mut stderr = Vec::new();
let result = run_from_args(
[
READ_FILE_OPERATION_ARG2,
missing.to_str().expect("utf-8 test path"),
]
.into_iter()
.map(Into::into),
&mut stdin,
&mut stdout,
&mut stderr,
);
assert!(result.is_err(), "missing file should fail");
assert_eq!(stdout, Vec::<u8>::new());
}