mirror of
https://github.com/openai/codex.git
synced 2026-05-05 22:01:37 +03:00
fix(core) disable command_might_be_dangerous when unsandboxed (#15036)
## Summary If we are in a mode that is already explicitly un-sandboxed, then `ApprovalPolicy::Never` should not block dangerous commands. ## Testing - [x] Existing unit test covers old behavior - [x] Added a unit test for this new case
This commit is contained in:
@@ -549,7 +549,7 @@ pub fn render_decision_for_unmatched_command(
|
||||
|
||||
// On Windows, ReadOnly sandbox is not a real sandbox, so special-case it
|
||||
// here.
|
||||
let runtime_sandbox_provides_safety =
|
||||
let environment_lacks_sandbox_protections =
|
||||
cfg!(windows) && matches!(sandbox_policy, SandboxPolicy::ReadOnly { .. });
|
||||
|
||||
// If the command is flagged as dangerous or we have no sandbox protection,
|
||||
@@ -558,9 +558,20 @@ pub fn render_decision_for_unmatched_command(
|
||||
// We prefer to prompt the user rather than outright forbid the command,
|
||||
// but if the user has explicitly disabled prompts, we must
|
||||
// forbid the command.
|
||||
if command_might_be_dangerous(command) || runtime_sandbox_provides_safety {
|
||||
if command_might_be_dangerous(command) || environment_lacks_sandbox_protections {
|
||||
return match approval_policy {
|
||||
AskForApproval::Never => Decision::Forbidden,
|
||||
AskForApproval::Never => {
|
||||
let sandbox_is_explicitly_disabled = matches!(
|
||||
sandbox_policy,
|
||||
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. }
|
||||
);
|
||||
if sandbox_is_explicitly_disabled {
|
||||
// If the sandbox is explicitly disabled, we should allow the command to run
|
||||
Decision::Allow
|
||||
} else {
|
||||
Decision::Forbidden
|
||||
}
|
||||
}
|
||||
AskForApproval::OnFailure
|
||||
| AskForApproval::OnRequest
|
||||
| AskForApproval::UnlessTrusted
|
||||
|
||||
Reference in New Issue
Block a user