Compare commits

..

2 Commits

Author SHA1 Message Date
Michael Bolin
dedd1c386a fix: suppress status card expect_used warnings after #16351 (#16378)
## Why

Follow-up to #16351.

That PR synchronized Bazel clippy lint levels with Cargo, but two
intentional `expect()` calls in `codex-rs/tui/src/status/card.rs` still
tripped `clippy::expect_used` (I believe #16201 raced with #16351, which
is why it was missed).
2026-03-31 17:38:26 -07:00
Michael Bolin
2e942ce830 ci: sync Bazel clippy lints and fix uncovered violations (#16351)
## Why

Follow-up to #16345, the Bazel clippy rollout in #15955, and the cleanup
pass in #16353.

`cargo clippy` was enforcing the workspace deny-list from
`codex-rs/Cargo.toml` because the member crates opt into `[lints]
workspace = true`, but Bazel clippy was only using `rules_rust` plus
`clippy.toml`. That left the Bazel lane vulnerable to drift:
`clippy.toml` can tune lint behavior, but it cannot set
allow/warn/deny/forbid levels.

This PR now closes both sides of the follow-up. It keeps `.bazelrc` in
sync with `[workspace.lints.clippy]`, and it fixes the real clippy
violations that the newly-synced Windows Bazel lane surfaced once that
deny-list started matching Cargo.

## What Changed

- added `.github/scripts/verify_bazel_clippy_lints.py`, a Python check
that parses `codex-rs/Cargo.toml` with `tomllib`, reads the Bazel
`build:clippy` `clippy_flag` entries from `.bazelrc`, and reports
missing, extra, or mismatched lint levels
- ran that verifier from the lightweight `ci.yml` workflow so the sync
check does not depend on a Rust toolchain being installed first
- expanded the `.bazelrc` comment to explain the Cargo `workspace =
true` linkage and why Bazel needs the deny-list duplicated explicitly
- fixed the Windows-only `codex-windows-sandbox` violations that Bazel
clippy reported after the sync, using the same style as #16353: inline
`format!` args, method references instead of trivial closures, removed
redundant clones, and replaced SID conversion `unwrap` and `expect`
calls with proper errors
- cleaned up the remaining cross-platform violations the Bazel lane
exposed in `codex-backend-client` and `core_test_support`

## Testing

Key new test introduced by this PR:

`python3 .github/scripts/verify_bazel_clippy_lints.py`
2026-03-31 17:09:48 -07:00
29 changed files with 566 additions and 805 deletions

View File

@@ -76,6 +76,44 @@ common:ci-bazel --build_metadata=TAG_workflow=bazel
build:clippy --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect
build:clippy --output_groups=+clippy_checks
build:clippy --@rules_rust//rust/settings:clippy.toml=//codex-rs:clippy.toml
# Keep this deny-list in sync with `codex-rs/Cargo.toml` `[workspace.lints.clippy]`.
# Cargo applies those lint levels to member crates that opt into `[lints] workspace = true`
# in their own `Cargo.toml`, but `rules_rust` Bazel clippy does not read Cargo lint levels.
# `clippy.toml` can configure lint behavior, but it cannot set allow/warn/deny/forbid levels.
build:clippy --@rules_rust//rust/settings:clippy_flag=-Dwarnings
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::expect_used
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::identity_op
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_clamp
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_filter
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_find
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_flatten
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_map
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_memcpy
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_non_exhaustive
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_ok_or
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_range_contains
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_retain
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_strip
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_try_fold
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_unwrap_or
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_borrow
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_borrowed_reference
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_collect
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_late_init
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_option_as_deref
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_question_mark
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::needless_update
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_clone
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_closure
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_closure_for_method_calls
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::redundant_static_lifetimes
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::trivially_copy_pass_by_ref
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::uninlined_format_args
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_filter_map
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_lazy_evaluations
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_sort_by
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unnecessary_to_owned
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::unwrap_used
# Shared config for Bazel-backed argument-comment-lint.
build:argument-comment-lint --aspects=//tools/argument-comment-lint:lint_aspect.bzl%rust_argument_comment_lint_aspect

View File

@@ -0,0 +1,234 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import re
import sys
import tomllib
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
DEFAULT_CARGO_TOML = ROOT / "codex-rs" / "Cargo.toml"
DEFAULT_BAZELRC = ROOT / ".bazelrc"
BAZEL_CLIPPY_FLAG_PREFIX = "build:clippy --@rules_rust//rust/settings:clippy_flag="
BAZEL_SPECIAL_FLAGS = {"-Dwarnings"}
VALID_LEVELS = {"allow", "warn", "deny", "forbid"}
LONG_FLAG_RE = re.compile(
r"^--(?P<level>allow|warn|deny|forbid)=clippy::(?P<lint>[a-z0-9_]+)$"
)
SHORT_FLAG_RE = re.compile(r"^-(?P<level>[AWDF])clippy::(?P<lint>[a-z0-9_]+)$")
SHORT_LEVEL_NAMES = {
"A": "allow",
"W": "warn",
"D": "deny",
"F": "forbid",
}
def main() -> int:
parser = argparse.ArgumentParser(
description=(
"Verify that Bazel clippy flags in .bazelrc stay in sync with "
"codex-rs/Cargo.toml [workspace.lints.clippy]."
)
)
parser.add_argument(
"--cargo-toml",
type=Path,
default=DEFAULT_CARGO_TOML,
help="Path to the workspace Cargo.toml to inspect.",
)
parser.add_argument(
"--bazelrc",
type=Path,
default=DEFAULT_BAZELRC,
help="Path to the .bazelrc file to inspect.",
)
args = parser.parse_args()
cargo_toml = args.cargo_toml.resolve()
bazelrc = args.bazelrc.resolve()
cargo_lints = load_workspace_clippy_lints(cargo_toml)
bazel_lints = load_bazel_clippy_lints(bazelrc)
missing = sorted(cargo_lints.keys() - bazel_lints.keys())
extra = sorted(bazel_lints.keys() - cargo_lints.keys())
mismatched = sorted(
lint
for lint in cargo_lints.keys() & bazel_lints.keys()
if cargo_lints[lint] != bazel_lints[lint]
)
if missing or extra or mismatched:
print_sync_error(
cargo_toml=cargo_toml,
bazelrc=bazelrc,
cargo_lints=cargo_lints,
bazel_lints=bazel_lints,
missing=missing,
extra=extra,
mismatched=mismatched,
)
return 1
print(
"Bazel clippy flags in "
f"{display_path(bazelrc)} match "
f"{display_path(cargo_toml)} [workspace.lints.clippy]."
)
return 0
def load_workspace_clippy_lints(cargo_toml: Path) -> dict[str, str]:
workspace = tomllib.loads(cargo_toml.read_text())["workspace"]
clippy_lints = workspace["lints"]["clippy"]
parsed: dict[str, str] = {}
for lint, level in clippy_lints.items():
if not isinstance(level, str):
raise SystemExit(
f"expected string lint level for clippy::{lint} in {cargo_toml}, got {level!r}"
)
normalized = level.strip().lower()
if normalized not in VALID_LEVELS:
raise SystemExit(
f"unsupported lint level {level!r} for clippy::{lint} in {cargo_toml}"
)
parsed[lint] = normalized
return parsed
def load_bazel_clippy_lints(bazelrc: Path) -> dict[str, str]:
parsed: dict[str, str] = {}
line_numbers: dict[str, int] = {}
for lineno, line in enumerate(bazelrc.read_text().splitlines(), start=1):
if not line.startswith(BAZEL_CLIPPY_FLAG_PREFIX):
continue
flag = line.removeprefix(BAZEL_CLIPPY_FLAG_PREFIX).strip()
if flag in BAZEL_SPECIAL_FLAGS:
continue
parsed_flag = parse_bazel_lint_flag(flag)
if parsed_flag is None:
continue
lint, level = parsed_flag
if lint in parsed:
raise SystemExit(
f"duplicate Bazel clippy entry for clippy::{lint} at "
f"{bazelrc}:{line_numbers[lint]} and {bazelrc}:{lineno}"
)
parsed[lint] = level
line_numbers[lint] = lineno
return parsed
def parse_bazel_lint_flag(flag: str) -> tuple[str, str] | None:
long_match = LONG_FLAG_RE.match(flag)
if long_match:
return long_match["lint"], long_match["level"]
short_match = SHORT_FLAG_RE.match(flag)
if short_match:
return short_match["lint"], SHORT_LEVEL_NAMES[short_match["level"]]
return None
def print_sync_error(
*,
cargo_toml: Path,
bazelrc: Path,
cargo_lints: dict[str, str],
bazel_lints: dict[str, str],
missing: list[str],
extra: list[str],
mismatched: list[str],
) -> None:
cargo_toml_display = display_path(cargo_toml)
bazelrc_display = display_path(bazelrc)
example_manifest = find_workspace_lints_example_manifest()
print(
"ERROR: Bazel clippy flags are out of sync with Cargo workspace clippy lints.",
file=sys.stderr,
)
print(file=sys.stderr)
print(
f"Cargo defines the source of truth in {cargo_toml_display} "
"[workspace.lints.clippy].",
file=sys.stderr,
)
if example_manifest is not None:
print(
"Cargo applies those lint levels to member crates that opt into "
f"`[lints] workspace = true`, for example {example_manifest}.",
file=sys.stderr,
)
print(
"Bazel clippy does not ingest Cargo lint levels automatically, and "
"`clippy.toml` can configure lint behavior but cannot set allow/warn/deny/forbid.",
file=sys.stderr,
)
print(
f"Update {bazelrc_display} so its `build:clippy` "
"`clippy_flag` entries match Cargo.",
file=sys.stderr,
)
if missing:
print(file=sys.stderr)
print("Missing Bazel entries:", file=sys.stderr)
for lint in missing:
print(f" {render_bazelrc_line(lint, cargo_lints[lint])}", file=sys.stderr)
if mismatched:
print(file=sys.stderr)
print("Mismatched lint levels:", file=sys.stderr)
for lint in mismatched:
cargo_level = cargo_lints[lint]
bazel_level = bazel_lints[lint]
print(
f" clippy::{lint}: Cargo has {cargo_level}, Bazel has {bazel_level}",
file=sys.stderr,
)
print(
f" expected: {render_bazelrc_line(lint, cargo_level)}",
file=sys.stderr,
)
if extra:
print(file=sys.stderr)
print("Extra Bazel entries with no Cargo counterpart:", file=sys.stderr)
for lint in extra:
print(f" {render_bazelrc_line(lint, bazel_lints[lint])}", file=sys.stderr)
def render_bazelrc_line(lint: str, level: str) -> str:
return f"{BAZEL_CLIPPY_FLAG_PREFIX}--{level}=clippy::{lint}"
def display_path(path: Path) -> str:
try:
return str(path.relative_to(ROOT))
except ValueError:
return str(path)
def find_workspace_lints_example_manifest() -> str | None:
for cargo_toml in sorted((ROOT / "codex-rs").glob("**/Cargo.toml")):
if cargo_toml == DEFAULT_CARGO_TOML:
continue
data = tomllib.loads(cargo_toml.read_text())
if data.get("lints", {}).get("workspace") is True:
return str(cargo_toml.relative_to(ROOT))
return None
if __name__ == "__main__":
sys.exit(main())

View File

@@ -17,6 +17,9 @@ jobs:
- name: Verify codex-rs Cargo manifests inherit workspace settings
run: python3 .github/scripts/verify_cargo_workspace_manifests.py
- name: Verify Bazel clippy flags match Cargo workspace lints
run: python3 .github/scripts/verify_bazel_clippy_lints.py
- name: Setup pnpm
uses: pnpm/action-setup@a8198c4bff370c8506180b035930dea56dbd5288 # v5
with:

View File

@@ -309,7 +309,7 @@ where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
Option::<Vec<T>>::deserialize(deserializer).map(|opt| opt.unwrap_or_default())
Option::<Vec<T>>::deserialize(deserializer).map(Option::unwrap_or_default)
}
#[derive(Clone, Debug, Deserialize)]

View File

@@ -54,105 +54,56 @@ impl ToolHandler for Handler {
call_id: call_id.clone(),
sender_thread_id: session.conversation_id,
prompt: prompt.clone(),
model: args
.model_fallback_list
.as_ref()
.and_then(|list| list.first())
.map(|candidate| candidate.model.clone())
.unwrap_or_else(|| args.model.clone().unwrap_or_default()),
reasoning_effort: args
.model_fallback_list
.as_ref()
.and_then(|list| list.first())
.and_then(|candidate| candidate.reasoning_effort)
.unwrap_or_else(|| args.reasoning_effort.unwrap_or_default()),
model: args.model.clone().unwrap_or_default(),
reasoning_effort: args.reasoning_effort.unwrap_or_default(),
}
.into(),
)
.await;
let config =
let mut config =
build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?;
let mut candidates_to_try = collect_spawn_agent_model_candidates(
args.model_fallback_list.as_ref(),
apply_requested_spawn_agent_model_overrides(
&session,
turn.as_ref(),
&mut config,
args.model.as_deref(),
args.reasoning_effort,
);
if candidates_to_try.is_empty() {
candidates_to_try.push(SpawnAgentModelCandidate {
model: None,
reasoning_effort: None,
});
}
)
.await?;
apply_role_to_config(&mut config, role_name)
.await
.map_err(FunctionCallError::RespondToModel)?;
apply_spawn_agent_runtime_overrides(&mut config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut config, child_depth);
let mut spawn_result = None;
for (idx, candidate) in candidates_to_try.iter().enumerate() {
let mut candidate_config = config.clone();
apply_requested_spawn_agent_model_overrides(
&session,
turn.as_ref(),
&mut candidate_config,
candidate.model.as_deref(),
candidate.reasoning_effort,
let result = session
.services
.agent_control
.spawn_agent_with_metadata(
config,
input_items,
Some(thread_spawn_source(
session.conversation_id,
&turn.session_source,
child_depth,
role_name,
/*task_name*/ None,
)?),
SpawnAgentOptions {
fork_parent_spawn_call_id: args.fork_context.then(|| call_id.clone()),
fork_mode: args.fork_context.then_some(SpawnAgentForkMode::FullHistory),
},
)
.await?;
apply_role_to_config(&mut candidate_config, role_name)
.await
.map_err(FunctionCallError::RespondToModel)?;
apply_spawn_agent_runtime_overrides(&mut candidate_config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut candidate_config, child_depth);
let attempt_result = session
.services
.agent_control
.spawn_agent_with_metadata(
candidate_config,
input_items.clone(),
Some(thread_spawn_source(
session.conversation_id,
&turn.session_source,
child_depth,
role_name,
/*task_name*/ None,
)?),
SpawnAgentOptions {
fork_parent_spawn_call_id: args.fork_context.then(|| call_id.clone()),
fork_mode: args.fork_context.then_some(SpawnAgentForkMode::FullHistory),
},
)
.await;
match attempt_result {
Ok(spawned_agent) => {
if spawn_should_retry_on_async_quota_exhaustion(
spawned_agent.status.clone(),
spawned_agent.thread_id,
&session.services.agent_control,
)
.await
&& idx + 1 < candidates_to_try.len()
{
continue;
}
spawn_result = Some(spawned_agent);
break;
}
Err(err) => {
if spawn_should_retry_on_quota_exhaustion(&err)
&& idx + 1 < candidates_to_try.len()
{
continue;
}
return Err(collab_spawn_error(err));
}
}
}
let Some(spawned_agent) = spawn_result else {
return Err(FunctionCallError::RespondToModel(
"No spawn attempts were executed".to_string(),
));
.await
.map_err(collab_spawn_error);
let (new_thread_id, new_agent_metadata, status) = match &result {
Ok(spawned_agent) => (
Some(spawned_agent.thread_id),
Some(spawned_agent.metadata.clone()),
spawned_agent.status.clone(),
),
Err(_) => (None, None, AgentStatus::NotFound),
};
let new_thread_id = Some(spawned_agent.thread_id);
let new_agent_metadata = Some(spawned_agent.metadata.clone());
let status = spawned_agent.status.clone();
let agent_snapshot = match new_thread_id {
Some(thread_id) => {
session
@@ -203,7 +154,7 @@ impl ToolHandler for Handler {
.into(),
)
.await;
let new_thread_id = spawned_agent.thread_id;
let new_thread_id = result?.thread_id;
let role_tag = role_name.unwrap_or(DEFAULT_ROLE_NAME);
turn.session_telemetry.counter(
"codex.multi_agent.spawn",
@@ -224,7 +175,6 @@ struct SpawnAgentArgs {
items: Option<Vec<UserInput>>,
agent_type: Option<String>,
model: Option<String>,
model_fallback_list: Option<Vec<SpawnAgentModelFallbackCandidate>>,
reasoning_effort: Option<ReasoningEffort>,
#[serde(default)]
fork_context: bool,

View File

@@ -1,5 +1,4 @@
use crate::agent::AgentStatus;
use crate::agent::status::is_final;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::config::Config;
@@ -22,12 +21,9 @@ use codex_protocol::protocol::Op;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use codex_protocol::user_input::UserInput;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use tokio::time::Duration;
use tokio::time::timeout;
/// Minimum wait timeout to prevent tight polling loops from burning CPU.
pub(crate) const MIN_WAIT_TIMEOUT_MS: i64 = 10_000;
@@ -75,96 +71,6 @@ where
})
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct SpawnAgentModelCandidate {
pub(crate) model: Option<String>,
pub(crate) reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
pub(crate) struct SpawnAgentModelFallbackCandidate {
pub(crate) model: String,
#[serde(default)]
pub(crate) reasoning_effort: Option<ReasoningEffort>,
}
pub(crate) fn collect_spawn_agent_model_candidates(
model_fallback_list: Option<&Vec<SpawnAgentModelFallbackCandidate>>,
requested_model: Option<&str>,
requested_reasoning_effort: Option<ReasoningEffort>,
) -> Vec<SpawnAgentModelCandidate> {
if let Some(model_fallback_list) = model_fallback_list {
return model_fallback_list
.iter()
.map(|candidate| SpawnAgentModelCandidate {
model: Some(candidate.model.clone()),
reasoning_effort: candidate.reasoning_effort,
})
.collect();
}
let mut candidates = Vec::new();
if requested_model.is_some() || requested_reasoning_effort.is_some() {
candidates.push(SpawnAgentModelCandidate {
model: requested_model.map(ToString::to_string),
reasoning_effort: requested_reasoning_effort,
});
}
candidates
}
pub(crate) fn spawn_should_retry_on_quota_exhaustion(error: &CodexErr) -> bool {
matches!(
error,
CodexErr::QuotaExceeded | CodexErr::UsageLimitReached(_)
)
}
pub(crate) async fn spawn_should_retry_on_async_quota_exhaustion(
thread_status: AgentStatus,
thread_id: ThreadId,
agent_control: &crate::agent::control::AgentControl,
) -> bool {
if is_final(&thread_status) && spawn_should_retry_on_quota_exhaustion_status(&thread_status) {
return true;
}
let Ok(mut status_rx) = agent_control.subscribe_status(thread_id).await else {
return false;
};
let mut status = status_rx.borrow_and_update().clone();
if is_final(&status) && spawn_should_retry_on_quota_exhaustion_status(&status) {
return true;
}
loop {
if timeout(Duration::from_millis(250), status_rx.changed())
.await
.is_err()
{
break;
}
status = status_rx.borrow().clone();
if is_final(&status) {
return spawn_should_retry_on_quota_exhaustion_status(&status);
}
}
false
}
fn spawn_should_retry_on_quota_exhaustion_status(status: &AgentStatus) -> bool {
match status {
AgentStatus::Errored(message) => {
let message = message.to_lowercase();
message.contains("insufficient_quota")
|| message.contains("usage limit")
|| message.contains("quota")
}
AgentStatus::NotFound => false,
_ => false,
}
}
pub(crate) fn build_wait_agent_statuses(
statuses: &HashMap<ThreadId, AgentStatus>,
receiver_agents: &[CollabAgentRef],
@@ -457,111 +363,3 @@ fn validate_spawn_agent_reasoning_effort(
"Reasoning effort `{requested_reasoning_effort}` is not supported for model `{model}`. Supported reasoning efforts: {supported}"
)))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::UsageLimitReachedError;
use crate::protocol::AgentStatus;
#[test]
fn collect_spawn_agent_model_candidates_prefers_fallback_list() {
let candidates = collect_spawn_agent_model_candidates(
Some(&vec![
SpawnAgentModelFallbackCandidate {
model: "fallback-a".to_string(),
reasoning_effort: Some(ReasoningEffort::High),
},
SpawnAgentModelFallbackCandidate {
model: "fallback-b".to_string(),
reasoning_effort: Some(ReasoningEffort::Minimal),
},
]),
Some("legacy-model"),
Some(ReasoningEffort::Low),
);
assert_eq!(
candidates,
vec![
SpawnAgentModelCandidate {
model: Some("fallback-a".to_string()),
reasoning_effort: Some(ReasoningEffort::High),
},
SpawnAgentModelCandidate {
model: Some("fallback-b".to_string()),
reasoning_effort: Some(ReasoningEffort::Minimal),
},
]
);
}
#[test]
fn collect_spawn_agent_model_candidates_falls_back_to_legacy_args() {
let candidates = collect_spawn_agent_model_candidates(
/*model_fallback_list*/ None,
Some("legacy-model"),
Some(ReasoningEffort::Minimal),
);
assert_eq!(
candidates,
vec![SpawnAgentModelCandidate {
model: Some("legacy-model".to_string()),
reasoning_effort: Some(ReasoningEffort::Minimal),
}]
);
}
#[test]
fn collect_spawn_agent_model_candidates_empty_when_no_model_is_set() {
let candidates = collect_spawn_agent_model_candidates(
/*model_fallback_list*/ None, /*requested_model*/ None,
/*requested_reasoning_effort*/ None,
);
assert_eq!(candidates, Vec::new());
}
#[test]
fn spawn_should_retry_on_quota_exhaustion_checks_expected_error_variants() {
assert!(spawn_should_retry_on_quota_exhaustion(
&CodexErr::QuotaExceeded
));
assert!(spawn_should_retry_on_quota_exhaustion(
&CodexErr::UsageLimitReached(UsageLimitReachedError {
plan_type: None,
resets_at: None,
rate_limits: None,
promo_message: None,
})
));
assert!(!spawn_should_retry_on_quota_exhaustion(
&CodexErr::UnsupportedOperation("thread manager dropped".to_string())
));
}
#[test]
fn collab_spawn_error_handles_thread_manager_drop() {
assert_eq!(
collab_spawn_error(CodexErr::UnsupportedOperation(
"thread manager dropped".to_string()
)),
FunctionCallError::RespondToModel("collab manager unavailable".to_string())
);
}
#[test]
fn build_wait_agent_statuses_includes_extras_in_sorted_order() {
let receiver_agents = vec![];
let mut statuses = HashMap::new();
let thread_a = ThreadId::new();
let thread_b = ThreadId::new();
statuses.insert(thread_b, AgentStatus::Completed(Some("done".to_string())));
statuses.insert(thread_a, AgentStatus::Completed(Some("done".to_string())));
let entries = build_wait_agent_statuses(&statuses, &receiver_agents);
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].thread_id, thread_a);
assert_eq!(entries[1].thread_id, thread_b);
}
}

View File

@@ -33,13 +33,14 @@ impl ToolHandler for Handler {
} = invocation;
let arguments = function_arguments(payload)?;
let args: SpawnAgentArgs = parse_arguments(&arguments)?;
let fork_mode = args.fork_mode()?;
let role_name = args
.agent_type
.as_deref()
.map(str::trim)
.filter(|role| !role.is_empty());
let initial_operation = parse_collab_input(args.message, args.items)?;
let initial_operation = parse_collab_input(/*message*/ None, Some(args.items))?;
let prompt = render_input_preview(&initial_operation);
let session_source = turn.session_source.clone();
@@ -57,24 +58,27 @@ impl ToolHandler for Handler {
call_id: call_id.clone(),
sender_thread_id: session.conversation_id,
prompt: prompt.clone(),
model: args
.model_fallback_list
.as_ref()
.and_then(|list| list.first())
.map(|candidate| candidate.model.clone())
.unwrap_or_else(|| args.model.clone().unwrap_or_default()),
reasoning_effort: args
.model_fallback_list
.as_ref()
.and_then(|list| list.first())
.and_then(|candidate| candidate.reasoning_effort)
.unwrap_or_else(|| args.reasoning_effort.unwrap_or_default()),
model: args.model.clone().unwrap_or_default(),
reasoning_effort: args.reasoning_effort.unwrap_or_default(),
}
.into(),
)
.await;
let config =
let mut config =
build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?;
apply_requested_spawn_agent_model_overrides(
&session,
turn.as_ref(),
&mut config,
args.model.as_deref(),
args.reasoning_effort,
)
.await?;
apply_role_to_config(&mut config, role_name)
.await
.map_err(FunctionCallError::RespondToModel)?;
apply_spawn_agent_runtime_overrides(&mut config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut config, child_depth);
let spawn_source = thread_spawn_source(
session.conversation_id,
@@ -83,100 +87,47 @@ impl ToolHandler for Handler {
role_name,
Some(args.task_name.clone()),
)?;
let initial_agent_op = match (spawn_source.get_agent_path(), initial_operation) {
(Some(recipient), Op::UserInput { items, .. })
if items
.iter()
.all(|item| matches!(item, UserInput::Text { .. })) =>
{
Op::InterAgentCommunication {
communication: InterAgentCommunication::new(
turn.session_source
.get_agent_path()
.unwrap_or_else(AgentPath::root),
recipient,
Vec::new(),
prompt.clone(),
/*trigger_turn*/ true,
),
}
}
(_, initial_operation) => initial_operation,
};
let mut candidates_to_try = collect_spawn_agent_model_candidates(
args.model_fallback_list.as_ref(),
args.model.as_deref(),
args.reasoning_effort,
);
if candidates_to_try.is_empty() {
candidates_to_try.push(SpawnAgentModelCandidate {
model: None,
reasoning_effort: None,
});
}
let mut spawn_result = None;
for (idx, candidate) in candidates_to_try.iter().enumerate() {
let mut candidate_config = config.clone();
apply_requested_spawn_agent_model_overrides(
&session,
turn.as_ref(),
&mut candidate_config,
candidate.model.as_deref(),
candidate.reasoning_effort,
let result = session
.services
.agent_control
.spawn_agent_with_metadata(
config,
match (spawn_source.get_agent_path(), initial_operation) {
(Some(recipient), Op::UserInput { items, .. })
if items
.iter()
.all(|item| matches!(item, UserInput::Text { .. })) =>
{
Op::InterAgentCommunication {
communication: InterAgentCommunication::new(
turn.session_source
.get_agent_path()
.unwrap_or_else(AgentPath::root),
recipient,
Vec::new(),
prompt.clone(),
/*trigger_turn*/ true,
),
}
}
(_, initial_operation) => initial_operation,
},
Some(spawn_source),
SpawnAgentOptions {
fork_parent_spawn_call_id: fork_mode.as_ref().map(|_| call_id.clone()),
fork_mode,
},
)
.await?;
apply_role_to_config(&mut candidate_config, role_name)
.await
.map_err(FunctionCallError::RespondToModel)?;
apply_spawn_agent_runtime_overrides(&mut candidate_config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut candidate_config, child_depth);
let attempt_result = session
.services
.agent_control
.spawn_agent_with_metadata(
candidate_config,
initial_agent_op.clone(),
Some(spawn_source.clone()),
SpawnAgentOptions {
fork_parent_spawn_call_id: args.fork_context.then(|| call_id.clone()),
fork_mode: args.fork_context.then_some(SpawnAgentForkMode::FullHistory),
},
)
.await;
match attempt_result {
Ok(spawned_agent) => {
if spawn_should_retry_on_async_quota_exhaustion(
spawned_agent.status.clone(),
spawned_agent.thread_id,
&session.services.agent_control,
)
.await
&& idx + 1 < candidates_to_try.len()
{
continue;
}
spawn_result = Some(spawned_agent);
break;
}
Err(err) => {
if spawn_should_retry_on_quota_exhaustion(&err)
&& idx + 1 < candidates_to_try.len()
{
continue;
}
return Err(collab_spawn_error(err));
}
}
}
let Some(spawned_agent) = spawn_result else {
return Err(FunctionCallError::RespondToModel(
"No spawn attempts were executed".to_string(),
));
.await
.map_err(collab_spawn_error);
let (new_thread_id, new_agent_metadata, status) = match &result {
Ok(spawned_agent) => (
Some(spawned_agent.thread_id),
Some(spawned_agent.metadata.clone()),
spawned_agent.status.clone(),
),
Err(_) => (None, None, AgentStatus::NotFound),
};
let new_thread_id = Some(spawned_agent.thread_id);
let new_agent_metadata = Some(spawned_agent.metadata.clone());
let status = spawned_agent.status.clone();
let agent_snapshot = match new_thread_id {
Some(thread_id) => {
session
@@ -227,6 +178,7 @@ impl ToolHandler for Handler {
.into(),
)
.await;
let _ = result?;
let role_tag = role_name.unwrap_or(DEFAULT_ROLE_NAME);
turn.session_telemetry.counter(
"codex.multi_agent.spawn",
@@ -248,16 +200,54 @@ impl ToolHandler for Handler {
}
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
struct SpawnAgentArgs {
message: Option<String>,
items: Option<Vec<UserInput>>,
items: Vec<UserInput>,
task_name: String,
agent_type: Option<String>,
model: Option<String>,
model_fallback_list: Option<Vec<SpawnAgentModelFallbackCandidate>>,
reasoning_effort: Option<ReasoningEffort>,
#[serde(default)]
fork_context: bool,
fork_turns: Option<String>,
fork_context: Option<bool>,
}
impl SpawnAgentArgs {
fn fork_mode(&self) -> Result<Option<SpawnAgentForkMode>, FunctionCallError> {
if self.fork_context.is_some() {
return Err(FunctionCallError::RespondToModel(
"fork_context is not supported in MultiAgentV2; use fork_turns instead".to_string(),
));
}
let Some(fork_turns) = self
.fork_turns
.as_deref()
.map(str::trim)
.filter(|fork_turns| !fork_turns.is_empty())
else {
return Ok(None);
};
if fork_turns.eq_ignore_ascii_case("none") {
return Ok(None);
}
if fork_turns.eq_ignore_ascii_case("all") {
return Ok(Some(SpawnAgentForkMode::FullHistory));
}
let last_n_turns = fork_turns.parse::<usize>().map_err(|_| {
FunctionCallError::RespondToModel(
"fork_turns must be `none`, `all`, or a positive integer string".to_string(),
)
})?;
if last_n_turns == 0 {
return Err(FunctionCallError::RespondToModel(
"fork_turns must be `none`, `all`, or a positive integer string".to_string(),
));
}
Ok(Some(SpawnAgentForkMode::LastNTurns(last_n_turns)))
}
}
#[derive(Debug, Serialize)]

View File

@@ -287,7 +287,7 @@ where
F: Fn(&codex_protocol::protocol::EventMsg) -> Option<T>,
{
let ev = wait_for_event(codex, |ev| matcher(ev).is_some()).await;
matcher(&ev).unwrap()
matcher(&ev).expect("EventMsg should match matcher predicate")
}
pub async fn wait_for_event_with_timeout<F>(
@@ -417,7 +417,7 @@ pub mod fs_wait {
let deadline = Instant::now() + timeout;
loop {
if path.exists() {
return Ok(path.clone());
return Ok(path);
}
let now = Instant::now();
if now >= deadline {
@@ -427,7 +427,7 @@ pub mod fs_wait {
match rx.recv_timeout(remaining) {
Ok(Ok(_event)) => {
if path.exists() {
return Ok(path.clone());
return Ok(path);
}
}
Ok(Err(err)) => return Err(err.into()),

View File

@@ -1,3 +1,5 @@
#![allow(clippy::unwrap_used)]
use std::collections::VecDeque;
use std::sync::Arc;
use std::sync::Mutex;

View File

@@ -270,7 +270,7 @@ fn docker_command_success<const N: usize>(args: [&str; N]) -> Result<()> {
let output = Command::new("docker")
.args(args)
.output()
.with_context(|| format!("run docker {:?}", args))?;
.with_context(|| format!("run docker {args:?}"))?;
if !output.status.success() {
return Err(anyhow!(
"docker {:?} failed: stdout={} stderr={}",
@@ -286,7 +286,7 @@ fn docker_command_capture_stdout<const N: usize>(args: [&str; N]) -> Result<Stri
let output = Command::new("docker")
.args(args)
.output()
.with_context(|| format!("run docker {:?}", args))?;
.with_context(|| format!("run docker {args:?}"))?;
if !output.status.success() {
return Err(anyhow!(
"docker {:?} failed: stdout={} stderr={}",
@@ -346,7 +346,7 @@ impl TestCodexBuilder {
pub fn with_model(self, model: &str) -> Self {
let new_model = model.to_string();
self.with_config(move |config| {
config.model = Some(new_model.clone());
config.model = Some(new_model);
})
}

View File

@@ -18,7 +18,6 @@ use core_test_support::skip_if_no_network;
use core_test_support::test_codex::TestCodex;
use core_test_support::test_codex::test_codex;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::time::Duration;
use tokio::time::Instant;
@@ -37,10 +36,6 @@ const REQUESTED_MODEL: &str = "gpt-5.1";
const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low;
const ROLE_MODEL: &str = "gpt-5.1-codex-max";
const ROLE_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::High;
const FALLBACK_MODEL_A: &str = "gpt-5.1";
const FALLBACK_REASONING_EFFORT_A: ReasoningEffort = ReasoningEffort::Low;
const FALLBACK_MODEL_B: &str = "gpt-5.2";
const FALLBACK_REASONING_EFFORT_B: ReasoningEffort = ReasoningEffort::Medium;
fn body_contains(req: &wiremock::Request, text: &str) -> bool {
let is_zstd = req
@@ -62,57 +57,6 @@ fn body_contains(req: &wiremock::Request, text: &str) -> bool {
.is_some_and(|body| body.contains(text))
}
fn request_uses_model_and_effort(
req: &wiremock::Request,
model: &str,
reasoning_effort: &str,
) -> bool {
let is_zstd = req
.headers
.get("content-encoding")
.and_then(|value| value.to_str().ok())
.is_some_and(|value| {
value
.split(',')
.any(|entry| entry.trim().eq_ignore_ascii_case("zstd"))
});
let bytes = if is_zstd {
zstd::stream::decode_all(std::io::Cursor::new(&req.body)).ok()
} else {
Some(req.body.clone())
};
bytes
.and_then(|body| serde_json::from_slice::<Value>(&body).ok())
.is_some_and(|body| {
body.get("model").and_then(Value::as_str) == Some(model)
&& body
.get("reasoning")
.and_then(|reasoning| reasoning.get("effort"))
.and_then(Value::as_str)
== Some(reasoning_effort)
})
}
fn request_uses_model(req: &wiremock::Request, model: &str) -> bool {
let is_zstd = req
.headers
.get("content-encoding")
.and_then(|value| value.to_str().ok())
.is_some_and(|value| {
value
.split(',')
.any(|entry| entry.trim().eq_ignore_ascii_case("zstd"))
});
let bytes = if is_zstd {
zstd::stream::decode_all(std::io::Cursor::new(&req.body)).ok()
} else {
Some(req.body.clone())
};
bytes
.and_then(|body| serde_json::from_slice::<Value>(&body).ok())
.is_some_and(|body| body.get("model").and_then(Value::as_str) == Some(model))
}
fn has_subagent_notification(req: &ResponsesRequest) -> bool {
req.message_input_texts("user")
.iter()
@@ -536,184 +480,6 @@ async fn spawn_agent_role_overrides_requested_model_and_reasoning_settings() ->
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn spawn_agent_model_fallback_list_retries_after_quota_exhaustion() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let spawn_args = serde_json::to_string(&json!({
"message": CHILD_PROMPT,
"model_fallback_list": [
{
"model": FALLBACK_MODEL_A,
"reasoning_effort": FALLBACK_REASONING_EFFORT_A,
},
{
"model": FALLBACK_MODEL_B,
"reasoning_effort": FALLBACK_REASONING_EFFORT_B,
}
]
}))?;
mount_sse_once_match(
&server,
|req: &wiremock::Request| body_contains(req, TURN_1_PROMPT),
sse(vec![
ev_response_created("resp-turn1-1"),
ev_function_call(SPAWN_CALL_ID, "spawn_agent", &spawn_args),
ev_completed("resp-turn1-1"),
]),
)
.await;
let quota_child_attempt = mount_sse_once_match(
&server,
|req: &wiremock::Request| {
body_contains(req, CHILD_PROMPT)
&& request_uses_model_and_effort(req, FALLBACK_MODEL_A, "low")
&& !body_contains(req, SPAWN_CALL_ID)
},
sse(vec![
ev_response_created("resp-child-quota"),
json!({
"type": "response.failed",
"response": {
"id": "resp-child-quota",
"error": {
"code": "insufficient_quota",
"message": "You exceeded your current quota, please check your plan and billing details."
}
}
}),
]),
)
.await;
let fallback_child_attempt = mount_sse_once_match(
&server,
|req: &wiremock::Request| {
body_contains(req, CHILD_PROMPT)
&& request_uses_model(req, FALLBACK_MODEL_B)
&& !body_contains(req, SPAWN_CALL_ID)
},
sse(vec![
ev_response_created("resp-child-fallback"),
ev_assistant_message("msg-child-fallback", "child done"),
ev_completed("resp-child-fallback"),
]),
)
.await;
let _turn1_followup = mount_sse_once_match(
&server,
|req: &wiremock::Request| body_contains(req, SPAWN_CALL_ID),
sse(vec![
ev_response_created("resp-turn1-2"),
ev_assistant_message("msg-turn1-2", "parent done"),
ev_completed("resp-turn1-2"),
]),
)
.await;
let mut builder = test_codex().with_config(|config| {
config
.features
.enable(Feature::Collab)
.expect("test config should allow feature update");
config.model = Some(INHERITED_MODEL.to_string());
config.model_reasoning_effort = Some(INHERITED_REASONING_EFFORT);
});
let test = builder.build(&server).await?;
test.submit_turn(TURN_1_PROMPT).await?;
let quota_requests = quota_child_attempt
.requests()
.into_iter()
.filter(|request| {
request.body_json().get("model").and_then(Value::as_str) == Some(FALLBACK_MODEL_A)
})
.collect::<Vec<_>>();
assert!(!quota_requests.is_empty());
for quota_request in &quota_requests {
let body = quota_request.body_json();
assert_eq!(
body.get("model").and_then(Value::as_str),
Some(FALLBACK_MODEL_A)
);
assert_eq!(
body.get("reasoning")
.and_then(|reasoning| reasoning.get("effort"))
.and_then(Value::as_str),
Some("low")
);
}
let fallback_requests = fallback_child_attempt
.requests()
.into_iter()
.filter(|request| {
request.body_json().get("model").and_then(Value::as_str) == Some(FALLBACK_MODEL_B)
})
.collect::<Vec<_>>();
assert!(!fallback_requests.is_empty());
for fallback_request in &fallback_requests {
let fallback_body = fallback_request.body_json();
assert_eq!(
fallback_body.get("model").and_then(Value::as_str),
Some(FALLBACK_MODEL_B)
);
if let Some(effort) = fallback_body
.get("reasoning")
.and_then(|reasoning| reasoning.get("effort"))
.and_then(Value::as_str)
{
assert_eq!(effort, "medium");
}
}
let deadline = Instant::now() + Duration::from_secs(2);
let child_snapshot = loop {
let spawned_ids = test
.thread_manager
.list_thread_ids()
.await
.into_iter()
.filter(|id| *id != test.session_configured.session_id)
.collect::<Vec<_>>();
let mut matching_snapshot = None;
for thread_id in spawned_ids {
let snapshot = test
.thread_manager
.get_thread(thread_id)
.await?
.config_snapshot()
.await;
if snapshot.model == FALLBACK_MODEL_B
&& snapshot.reasoning_effort == Some(FALLBACK_REASONING_EFFORT_B)
{
matching_snapshot = Some(snapshot);
break;
}
}
if let Some(snapshot) = matching_snapshot {
break snapshot;
}
if Instant::now() >= deadline {
anyhow::bail!("timed out waiting for fallback child snapshot");
}
sleep(Duration::from_millis(10)).await;
};
assert_eq!(child_snapshot.model, FALLBACK_MODEL_B);
assert_eq!(
child_snapshot.reasoning_effort,
Some(FALLBACK_REASONING_EFFORT_B)
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn spawn_agent_tool_description_mentions_role_locked_settings() -> Result<()> {
skip_if_no_network!(Ok(()));

View File

@@ -23,7 +23,7 @@ pub fn create_spawn_agent_tool_v1(options: SpawnAgentToolOptions<'_>) -> ToolSpe
let available_models_description = spawn_agent_models_description(options.available_models);
let return_value_description =
"Returns the spawned agent id plus the user-facing nickname when available.";
let properties = spawn_agent_common_properties(&options.agent_type_description);
let properties = spawn_agent_common_properties_v1(&options.agent_type_description);
ToolSpec::Function(ResponsesApiTool {
name: "spawn_agent".to_string(),
@@ -45,7 +45,7 @@ pub fn create_spawn_agent_tool_v1(options: SpawnAgentToolOptions<'_>) -> ToolSpe
pub fn create_spawn_agent_tool_v2(options: SpawnAgentToolOptions<'_>) -> ToolSpec {
let available_models_description = spawn_agent_models_description(options.available_models);
let return_value_description = "Returns the canonical task name for the spawned agent, plus the user-facing nickname when available.";
let mut properties = spawn_agent_common_properties(&options.agent_type_description);
let mut properties = spawn_agent_common_properties_v2(&options.agent_type_description);
properties.insert(
"task_name".to_string(),
JsonSchema::String {
@@ -66,7 +66,7 @@ pub fn create_spawn_agent_tool_v2(options: SpawnAgentToolOptions<'_>) -> ToolSpe
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["task_name".to_string()]),
required: Some(vec!["task_name".to_string(), "items".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(spawn_agent_output_schema_v2()),
@@ -128,20 +128,11 @@ pub fn create_send_message_tool() -> ToolSpec {
},
),
("items".to_string(), create_collab_input_items_schema()),
(
"interrupt".to_string(),
JsonSchema::Boolean {
description: Some(
"When true, stop the agent's current task and handle this immediately. When false (default), queue this message."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "send_message".to_string(),
description: "Add a message to an existing agent without triggering a new turn. Use interrupt=true to stop the current task first. In MultiAgentV2, this tool currently supports text content only."
description: "Add a message to an existing agent without triggering a new turn. In MultiAgentV2, this tool currently supports text content only."
.to_string(),
strict: false,
defer_loading: None,
@@ -544,28 +535,7 @@ fn create_collab_input_items_schema() -> JsonSchema {
}
}
fn spawn_agent_common_properties(agent_type_description: &str) -> BTreeMap<String, JsonSchema> {
let model_fallback_item_properties = BTreeMap::from([
(
"model".to_string(),
JsonSchema::String {
description: Some(
"Model to try. Must be a model slug from the current model picker list."
.to_string(),
),
},
),
(
"reasoning_effort".to_string(),
JsonSchema::String {
description: Some(
"Optional reasoning effort override for this candidate. Replaces the inherited reasoning effort."
.to_string(),
),
},
),
]);
fn spawn_agent_common_properties_v1(agent_type_description: &str) -> BTreeMap<String, JsonSchema> {
BTreeMap::from([
(
"message".to_string(),
@@ -602,15 +572,40 @@ fn spawn_agent_common_properties(agent_type_description: &str) -> BTreeMap<Strin
},
),
(
"model_fallback_list".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::Object {
properties: model_fallback_item_properties,
required: Some(vec!["model".to_string()]),
additional_properties: Some(false.into()),
}),
"reasoning_effort".to_string(),
JsonSchema::String {
description: Some(
"Ordered model candidates for fallback retries. Each entry may include an optional reasoning effort."
"Optional reasoning effort override for the new agent. Replaces the inherited reasoning effort."
.to_string(),
),
},
),
])
}
fn spawn_agent_common_properties_v2(agent_type_description: &str) -> BTreeMap<String, JsonSchema> {
BTreeMap::from([
("items".to_string(), create_collab_input_items_schema()),
(
"agent_type".to_string(),
JsonSchema::String {
description: Some(agent_type_description.to_string()),
},
),
(
"fork_turns".to_string(),
JsonSchema::String {
description: Some(
"Optional MultiAgentV2 fork mode. Use `none`, `all`, or a positive integer string such as `3` to fork only the most recent turns."
.to_string(),
),
},
),
(
"model".to_string(),
JsonSchema::String {
description: Some(
"Optional model override for the new agent. Replaces the inherited model."
.to_string(),
),
},
@@ -730,31 +725,19 @@ fn wait_agent_tool_parameters_v1(options: WaitAgentTimeoutOptions) -> JsonSchema
}
fn wait_agent_tool_parameters_v2(options: WaitAgentTimeoutOptions) -> JsonSchema {
let properties = BTreeMap::from([
(
"targets".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
description: Some(
"Agent ids or canonical task names to wait on. Pass multiple targets to wait for whichever finishes first."
.to_string(),
),
},
),
(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some(format!(
"Optional timeout in milliseconds. Defaults to {}, min {}, max {}. Prefer longer waits (minutes) to avoid busy polling.",
options.default_timeout_ms, options.min_timeout_ms, options.max_timeout_ms,
)),
},
),
]);
let properties = BTreeMap::from([(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some(format!(
"Optional timeout in milliseconds. Defaults to {}, min {}, max {}. Prefer longer waits (minutes) to avoid busy polling.",
options.default_timeout_ms, options.min_timeout_ms, options.max_timeout_ms,
)),
},
)]);
JsonSchema::Object {
properties,
required: Some(vec!["targets".to_string()]),
required: None,
additional_properties: Some(false.into()),
}
}

View File

@@ -56,34 +56,20 @@ fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() {
assert!(description.contains("visible display (`visible-model`)"));
assert!(!description.contains("hidden display (`hidden-model`)"));
assert!(properties.contains_key("task_name"));
assert!(properties.contains_key("items"));
assert!(properties.contains_key("fork_turns"));
assert!(!properties.contains_key("message"));
assert!(!properties.contains_key("fork_context"));
assert_eq!(
properties.get("agent_type"),
Some(&JsonSchema::String {
description: Some("role help".to_string()),
})
);
assert_eq!(required, Some(vec!["task_name".to_string()]));
let Some(JsonSchema::Array { items, .. }) = properties.get("model_fallback_list") else {
panic!("spawn_agent v2 should define model_fallback_list as an array of objects");
};
let JsonSchema::Object {
properties: model_fallback_item_properties,
required: Some(model_fallback_item_required),
..
} = items.as_ref()
else {
panic!("spawn_agent v2 model_fallback_list items should be objects");
};
assert_eq!(
model_fallback_item_properties.get("model"),
Some(&JsonSchema::String {
description: Some(
"Model to try. Must be a model slug from the current model picker list."
.to_string(),
),
})
required,
Some(vec!["task_name".to_string(), "items".to_string()])
);
assert_eq!(model_fallback_item_required, &vec!["model".to_string()]);
assert_eq!(
output_schema.expect("spawn_agent output schema")["required"],
json!(["agent_id", "task_name", "nickname"])
@@ -91,22 +77,21 @@ fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() {
}
#[test]
fn spawn_agent_tool_v1_includes_model_fallback_list() {
let ToolSpec::Function(ResponsesApiTool { parameters, .. }) =
create_spawn_agent_tool_v1(SpawnAgentToolOptions {
available_models: &[model_preset("visible", /*show_in_picker*/ true)],
agent_type_description: "role help".to_string(),
})
else {
fn spawn_agent_tool_v1_keeps_legacy_fork_context_field() {
let tool = create_spawn_agent_tool_v1(SpawnAgentToolOptions {
available_models: &[],
agent_type_description: "role help".to_string(),
});
let ToolSpec::Function(ResponsesApiTool { parameters, .. }) = tool else {
panic!("spawn_agent should be a function tool");
};
let JsonSchema::Object { properties, .. } = parameters else {
panic!("spawn_agent should use object params");
};
let Some(JsonSchema::Array { .. }) = properties.get("model_fallback_list") else {
panic!("model_fallback_list should be an array");
};
assert!(properties.contains_key("model_fallback_list"));
assert!(properties.contains_key("fork_context"));
assert!(!properties.contains_key("fork_turns"));
}
#[test]
@@ -129,6 +114,7 @@ fn send_message_tool_requires_items_and_uses_submission_output() {
};
assert!(properties.contains_key("target"));
assert!(properties.contains_key("items"));
assert!(!properties.contains_key("interrupt"));
assert!(!properties.contains_key("message"));
assert_eq!(
required,
@@ -141,7 +127,7 @@ fn send_message_tool_requires_items_and_uses_submission_output() {
}
#[test]
fn wait_agent_tool_v2_uses_task_targets_and_summary_output() {
fn wait_agent_tool_v2_uses_timeout_only_summary_output() {
let ToolSpec::Function(ResponsesApiTool {
parameters,
output_schema,
@@ -154,17 +140,17 @@ fn wait_agent_tool_v2_uses_task_targets_and_summary_output() {
else {
panic!("wait_agent should be a function tool");
};
let JsonSchema::Object { properties, .. } = parameters else {
let JsonSchema::Object {
properties,
required,
..
} = parameters
else {
panic!("wait_agent should use object params");
};
let Some(JsonSchema::Array {
description: Some(description),
..
}) = properties.get("targets")
else {
panic!("wait_agent should define targets array");
};
assert!(description.contains("canonical task names"));
assert!(!properties.contains_key("targets"));
assert!(properties.contains_key("timeout_ms"));
assert_eq!(required, None);
assert_eq!(
output_schema.expect("wait output schema")["properties"]["message"]["description"],
json!("Brief wait summary without the agent's final content.")

View File

@@ -82,6 +82,7 @@ impl StatusHistoryHandle {
} else {
compose_rate_limit_data_many(rate_limits, now)
};
#[expect(clippy::expect_used)]
let mut state = self
.rate_limit_state
.write()
@@ -558,6 +559,7 @@ impl HistoryCell for StatusHistoryCell {
.collect();
let mut seen: BTreeSet<String> = labels.iter().cloned().collect();
let thread_name = self.thread_name.as_deref().filter(|name| !name.is_empty());
#[expect(clippy::expect_used)]
let rate_limit_state = self
.rate_limit_state
.read()

View File

@@ -327,13 +327,13 @@ unsafe fn ensure_allow_mask_aces_with_inheritance_impl(
if !p_sd.is_null() {
LocalFree(p_sd as HLOCAL);
}
return Err(anyhow!("SetNamedSecurityInfoW failed: {}", code3));
return Err(anyhow!("SetNamedSecurityInfoW failed: {code3}"));
}
} else {
if !p_sd.is_null() {
LocalFree(p_sd as HLOCAL);
}
return Err(anyhow!("SetEntriesInAclW failed: {}", code2));
return Err(anyhow!("SetEntriesInAclW failed: {code2}"));
}
}
if !p_sd.is_null() {
@@ -401,7 +401,7 @@ pub unsafe fn add_allow_ace(path: &Path, psid: *mut c_void) -> Result<bool> {
&mut p_sd,
);
if code != ERROR_SUCCESS {
return Err(anyhow!("GetNamedSecurityInfoW failed: {}", code));
return Err(anyhow!("GetNamedSecurityInfoW failed: {code}"));
}
// Already has write? Skip costly DACL rewrite.
if dacl_has_write_allow_for_sid(p_dacl, psid) {
@@ -467,7 +467,7 @@ pub unsafe fn add_deny_write_ace(path: &Path, psid: *mut c_void) -> Result<bool>
&mut p_sd,
);
if code != ERROR_SUCCESS {
return Err(anyhow!("GetNamedSecurityInfoW failed: {}", code));
return Err(anyhow!("GetNamedSecurityInfoW failed: {code}"));
}
let mut added = false;
if !dacl_has_write_deny_for_sid(p_dacl, psid) {

View File

@@ -194,8 +194,7 @@ pub fn audit_everyone_writable(
}
crate::logging::log_note(
&format!(
"AUDIT: world-writable scan FAILED; cwd={cwd:?}; checked={checked}; duration_ms={elapsed_ms}; flagged:{}",
list
"AUDIT: world-writable scan FAILED; cwd={cwd:?}; checked={checked}; duration_ms={elapsed_ms}; flagged:{list}",
),
logs_base_dir,
);
@@ -229,7 +228,7 @@ pub fn apply_world_writable_scan_and_denies(
logs_base_dir,
) {
log_note(
&format!("AUDIT: failed to apply capability deny ACEs: {}", err),
&format!("AUDIT: failed to apply capability deny ACEs: {err}"),
logs_base_dir,
);
}

View File

@@ -34,7 +34,7 @@ fn make_random_cap_sid_string() -> String {
let b = rng.next_u32();
let c = rng.next_u32();
let d = rng.next_u32();
format!("S-1-5-21-{}-{}-{}-{}", a, b, c, d)
format!("S-1-5-21-{a}-{b}-{c}-{d}")
}
fn persist_caps(path: &Path, caps: &CapSids) -> Result<()> {

View File

@@ -480,7 +480,7 @@ pub fn main() -> Result<()> {
let _ = send_error(&pipe_write, "spawn_failed", err.to_string());
return Err(err);
}
let log_dir_owned = log_dir.map(|p| p.to_path_buf());
let log_dir_owned = log_dir.map(Path::to_path_buf);
let out_thread = spawn_output_reader(
Arc::clone(&pipe_write),
stdout_handle,

View File

@@ -144,7 +144,7 @@ pub fn read_frame<R: Read>(mut reader: R) -> Result<Option<FramedMessage>> {
}
let len = u32::from_le_bytes(len_buf) as usize;
if len > MAX_FRAME_LEN {
anyhow::bail!("frame too large: {}", len);
anyhow::bail!("frame too large: {len}");
}
let mut payload = vec![0u8; len];
reader.read_exact(&mut payload)?;

View File

@@ -93,7 +93,7 @@ mod windows_impl {
} else {
cur.join(gitdir)
};
return resolved.parent().map(|p| p.to_path_buf()).or(Some(cur));
return resolved.parent().map(Path::to_path_buf).or(Some(cur));
}
return Some(cur);
}
@@ -270,17 +270,22 @@ mod windows_impl {
}
let caps = load_or_create_cap_sids(codex_home)?;
let (psid_to_use, cap_sids) = match &policy {
SandboxPolicy::ReadOnly { .. } => (
unsafe { convert_string_sid_to_sid(&caps.readonly).unwrap() },
vec![caps.readonly.clone()],
),
SandboxPolicy::WorkspaceWrite { .. } => (
unsafe { convert_string_sid_to_sid(&caps.workspace).unwrap() },
vec![
caps.workspace.clone(),
crate::cap::workspace_cap_sid_for_cwd(codex_home, cwd)?,
],
),
SandboxPolicy::ReadOnly { .. } => {
#[allow(clippy::unwrap_used)]
let psid = unsafe { convert_string_sid_to_sid(&caps.readonly).unwrap() };
(psid, vec![caps.readonly])
}
SandboxPolicy::WorkspaceWrite { .. } => {
#[allow(clippy::unwrap_used)]
let psid = unsafe { convert_string_sid_to_sid(&caps.workspace).unwrap() };
(
psid,
vec![
caps.workspace,
crate::cap::workspace_cap_sid_for_cwd(codex_home, cwd)?,
],
)
}
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => {
unreachable!("DangerFullAccess handled above")
}
@@ -302,7 +307,7 @@ mod windows_impl {
let runner_exe = find_runner_exe(codex_home, logs_base_dir);
let runner_cmdline = runner_exe
.to_str()
.map(|s| s.to_string())
.map(ToString::to_string)
.unwrap_or_else(|| "codex-command-runner.exe".to_string());
let runner_full_cmd = format!(
"{} {} {}",
@@ -365,7 +370,7 @@ mod windows_impl {
),
logs_base_dir,
);
return Err(anyhow::anyhow!("CreateProcessWithLogonW failed: {}", err));
return Err(anyhow::anyhow!("CreateProcessWithLogonW failed: {err}"));
}
if let Err(err) = connect_pipe(h_pipe_in) {
@@ -451,7 +456,7 @@ mod windows_impl {
if exit_code == 0 {
log_success(&command, logs_base_dir);
} else {
log_failure(&command, &format!("exit code {}", exit_code), logs_base_dir);
log_failure(&command, &format!("exit code {exit_code}"), logs_base_dir);
}
Ok(CaptureResult {

View File

@@ -48,7 +48,7 @@ fn prepend_path(env_map: &mut HashMap<String, String>, prefix: &str) {
.cloned()
.or_else(|| env::var("PATH").ok())
.unwrap_or_default();
let parts: Vec<String> = existing.split(';').map(|s| s.to_string()).collect();
let parts: Vec<String> = existing.split(';').map(ToString::to_string).collect();
if parts
.first()
.map(|p| p.eq_ignore_ascii_case(prefix))
@@ -74,7 +74,7 @@ fn reorder_pathext_for_stubs(env_map: &mut HashMap<String, String>) {
let exts: Vec<String> = default
.split(';')
.filter(|e| !e.is_empty())
.map(|s| s.to_string())
.map(ToString::to_string)
.collect();
let exts_norm: Vec<String> = exts.iter().map(|e| e.to_ascii_uppercase()).collect();
let want = [".BAT", ".CMD"];
@@ -110,7 +110,7 @@ fn ensure_denybin(tools: &[&str], denybin_dir: Option<&Path>) -> Result<PathBuf>
fs::create_dir_all(&base)?;
for tool in tools {
for ext in [".bat", ".cmd"] {
let path = base.join(format!("{}{}", tool, ext));
let path = base.join(format!("{tool}{ext}"));
if !path.exists() {
let mut f = File::create(&path)?;
f.write_all(b"@echo off\\r\\nexit /b 1\\r\\n")?;
@@ -162,7 +162,7 @@ pub fn apply_no_network_to_env(env_map: &mut HashMap<String, String>) -> Result<
let base = ensure_denybin(&["ssh", "scp"], /*denybin_dir*/ None)?;
for tool in ["curl", "wget"] {
for ext in [".bat", ".cmd"] {
let p = base.join(format!("{}{}", tool, ext));
let p = base.join(format!("{tool}{ext}"));
if p.exists() {
let _ = fs::remove_file(&p);
}

View File

@@ -122,7 +122,7 @@ fn select_identity(
};
let password = decode_password(&chosen)?;
Ok(Some(SandboxIdentity {
username: chosen.username.clone(),
username: chosen.username,
password,
}))
}
@@ -187,8 +187,8 @@ pub fn require_logon_sandbox_creds(
proxy_enforced,
},
crate::setup::SetupRootOverrides {
read_roots: Some(needed_read.clone()),
write_roots: Some(needed_write.clone()),
read_roots: Some(needed_read),
write_roots: Some(needed_write),
},
)?;
identity = select_identity(network_identity, codex_home)?;

View File

@@ -335,14 +335,20 @@ mod windows_impl {
let (h_token, psid_generic, psid_workspace): (HANDLE, *mut c_void, Option<*mut c_void>) = unsafe {
match &policy {
SandboxPolicy::ReadOnly { .. } => {
let psid = convert_string_sid_to_sid(&caps.readonly).unwrap();
#[allow(clippy::expect_used)]
let psid =
convert_string_sid_to_sid(&caps.readonly).expect("valid readonly SID");
let (h, _) = super::token::create_readonly_token_with_cap(psid)?;
(h, psid, None)
}
SandboxPolicy::WorkspaceWrite { .. } => {
let psid_generic = convert_string_sid_to_sid(&caps.workspace).unwrap();
#[allow(clippy::expect_used)]
let psid_generic =
convert_string_sid_to_sid(&caps.workspace).expect("valid workspace SID");
let ws_sid = workspace_cap_sid_for_cwd(codex_home, cwd)?;
let psid_workspace = convert_string_sid_to_sid(&ws_sid).unwrap();
#[allow(clippy::expect_used)]
let psid_workspace =
convert_string_sid_to_sid(&ws_sid).expect("valid workspace SID");
let base = super::token::get_current_token_for_restriction()?;
let h_res = create_workspace_write_token_with_caps_from(
base,
@@ -363,7 +369,7 @@ mod windows_impl {
&& let Ok(base) = super::token::get_current_token_for_restriction()
{
if let Ok(bytes) = super::token::get_logon_sid_bytes(base) {
let mut tmp = bytes.clone();
let mut tmp = bytes;
let psid2 = tmp.as_mut_ptr() as *mut c_void;
allow_null_device(psid2);
}
@@ -536,7 +542,7 @@ mod windows_impl {
if exit_code == 0 {
log_success(&command, logs_base_dir);
} else {
log_failure(&command, &format!("exit code {}", exit_code), logs_base_dir);
log_failure(&command, &format!("exit code {exit_code}"), logs_base_dir);
}
if !persist_aces {
@@ -569,9 +575,11 @@ mod windows_impl {
ensure_codex_home_exists(codex_home)?;
let caps = load_or_create_cap_sids(codex_home)?;
#[allow(clippy::expect_used)]
let psid_generic =
unsafe { convert_string_sid_to_sid(&caps.workspace) }.expect("valid workspace SID");
let ws_sid = workspace_cap_sid_for_cwd(codex_home, cwd)?;
#[allow(clippy::expect_used)]
let psid_workspace =
unsafe { convert_string_sid_to_sid(&ws_sid) }.expect("valid workspace SID");
let current_dir = cwd.to_path_buf();

View File

@@ -43,7 +43,7 @@ pub fn make_env_block(env: &HashMap<String, String>) -> Vec<u16> {
});
let mut w: Vec<u16> = Vec::new();
for (k, v) in items {
let mut s = to_wide(format!("{}={}", k, v));
let mut s = to_wide(format!("{k}={v}"));
s.pop();
w.extend_from_slice(&s);
w.push(0);
@@ -149,7 +149,7 @@ pub unsafe fn create_process_as_user(
creation_flags,
);
logging::debug_log(&msg, logs_base_dir);
return Err(anyhow!("CreateProcessAsUserW failed: {}", err));
return Err(anyhow!("CreateProcessAsUserW failed: {err}"));
}
Ok(CreatedProcess {
process_info: pi,

View File

@@ -35,7 +35,7 @@ pub fn read_acl_mutex_exists() -> Result<bool> {
if err == ERROR_FILE_NOT_FOUND {
return Ok(false);
}
return Err(anyhow::anyhow!("OpenMutexW failed: {}", err));
return Err(anyhow::anyhow!("OpenMutexW failed: {err}"));
}
unsafe {
CloseHandle(handle);

View File

@@ -312,8 +312,7 @@ fn lock_sandbox_dir(
);
if set != 0 {
return Err(anyhow::anyhow!(
"SetEntriesInAclW sandbox dir failed: {}",
set
"SetEntriesInAclW sandbox dir failed: {set}",
));
}
let path_w = to_wide(dir.as_os_str());
@@ -328,8 +327,7 @@ fn lock_sandbox_dir(
);
if res != 0 {
return Err(anyhow::anyhow!(
"SetNamedSecurityInfoW sandbox dir failed: {}",
res
"SetNamedSecurityInfoW sandbox dir failed: {res}",
));
}
if !new_dacl.is_null() {
@@ -424,7 +422,7 @@ fn real_main() -> Result<()> {
});
let report = SetupErrorReport {
code: failure.code,
message: failure.message.clone(),
message: failure.message,
};
if let Err(write_err) = write_setup_error_report(&payload.codex_home, &report) {
let _ = log_line(
@@ -496,7 +494,7 @@ fn run_read_acl_only(payload: &Payload, log: &mut File) -> Result<()> {
if !refresh_errors.is_empty() {
log_line(
log,
&format!("read ACL run completed with errors: {:?}", refresh_errors),
&format!("read ACL run completed with errors: {refresh_errors:?}"),
)?;
if payload.refresh_only {
anyhow::bail!("read ACL run had errors");
@@ -637,7 +635,7 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<(
}
}
let cap_sid_str = caps.workspace.clone();
let cap_sid_str = caps.workspace;
let sandbox_group_sid_str =
string_from_sid_bytes(&sandbox_group_sid).map_err(anyhow::Error::msg)?;
let write_mask =
@@ -895,7 +893,7 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<(
if refresh_only && !refresh_errors.is_empty() {
log_line(
log,
&format!("setup refresh completed with errors: {:?}", refresh_errors),
&format!("setup refresh completed with errors: {refresh_errors:?}"),
)?;
anyhow::bail!("setup refresh had errors");
}

View File

@@ -201,7 +201,7 @@ fn run_setup_refresh_inner(
&format!("setup refresh: exited with status {status:?}"),
Some(&sandbox_dir(request.codex_home)),
);
return Err(anyhow!("setup refresh failed with status {}", status));
return Err(anyhow!("setup refresh failed with status {status}"));
}
Ok(())
}

View File

@@ -78,7 +78,7 @@ unsafe fn set_default_dacl(h_token: HANDLE, sids: &[*mut c_void]) -> Result<()>
&mut p_new_dacl,
);
if res != ERROR_SUCCESS {
return Err(anyhow!("SetEntriesInAclW failed: {}", res));
return Err(anyhow!("SetEntriesInAclW failed: {res}"));
}
let mut info = TokenDefaultDaclInfo {
default_dacl: p_new_dacl,
@@ -95,8 +95,7 @@ unsafe fn set_default_dacl(h_token: HANDLE, sids: &[*mut c_void]) -> Result<()>
LocalFree(p_new_dacl as HLOCAL);
}
return Err(anyhow!(
"SetTokenInformation(TokenDefaultDacl) failed: {}",
err
"SetTokenInformation(TokenDefaultDacl) failed: {err}",
));
}
if !p_new_dacl.is_null() {
@@ -277,7 +276,7 @@ unsafe fn enable_single_privilege(h_token: HANDLE, name: &str) -> Result<()> {
}
let err = GetLastError();
if err != 0 {
return Err(anyhow!("AdjustTokenPrivileges error {}", err));
return Err(anyhow!("AdjustTokenPrivileges error {err}"));
}
Ok(())
}

View File

@@ -83,7 +83,7 @@ pub fn format_last_error(err: i32) -> String {
std::ptr::null_mut(),
);
if len == 0 || buf_ptr.is_null() {
return format!("Win32 error {}", err);
return format!("Win32 error {err}");
}
let slice = std::slice::from_raw_parts(buf_ptr, len as usize);
let mut s = String::from_utf16_lossy(slice);