Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Bolin
0b69678daf ci: run Windows argument-comment-lint via native Bazel 2026-03-28 21:53:58 -07:00
41 changed files with 2245 additions and 2313 deletions

View File

@@ -60,8 +60,63 @@ runs:
# Use the shortest available drive to reduce argv/path length issues,
# but avoid the drive root because some Windows test launchers mis-handle
# MANIFEST paths there.
$bazelOutputUserRoot = if (Test-Path 'D:\') { 'D:\b' } else { 'C:\b' }
$hasDDrive = Test-Path 'D:\'
$bazelOutputUserRoot = if ($hasDDrive) { 'D:\b' } else { 'C:\b' }
$repoContentsCache = Join-Path $env:RUNNER_TEMP "bazel-repo-contents-cache-$env:GITHUB_RUN_ID-$env:GITHUB_JOB"
"BAZEL_OUTPUT_USER_ROOT=$bazelOutputUserRoot" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
"BAZEL_REPO_CONTENTS_CACHE=$repoContentsCache" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
if (-not $hasDDrive) {
$repositoryCache = Join-Path $env:USERPROFILE '.cache\bazel-repo-cache'
"BAZEL_REPOSITORY_CACHE=$repositoryCache" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
}
- name: Expose MSVC SDK environment (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
$vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe"
if (-not (Test-Path $vswhere)) {
throw "vswhere.exe not found"
}
$installPath = & $vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath 2>$null
if (-not $installPath) {
throw "Could not locate a Visual Studio installation with VC tools"
}
$vsDevCmd = Join-Path $installPath 'Common7\Tools\VsDevCmd.bat'
if (-not (Test-Path $vsDevCmd)) {
throw "VsDevCmd.bat not found at $vsDevCmd"
}
$varsToExport = @(
'INCLUDE',
'LIB',
'LIBPATH',
'PATH',
'UCRTVersion',
'UniversalCRTSdkDir',
'VCINSTALLDIR',
'VCToolsInstallDir',
'WindowsLibPath',
'WindowsSdkBinPath',
'WindowsSdkDir',
'WindowsSDKLibVersion',
'WindowsSDKVersion'
)
$envLines = & cmd.exe /c ('"{0}" -no_logo -arch=x64 -host_arch=x64 >nul && set' -f $vsDevCmd)
foreach ($line in $envLines) {
if ($line -notmatch '^(.*?)=(.*)$') {
continue
}
$name = $matches[1]
$value = $matches[2]
if ($varsToExport -contains $name) {
"$name=$value" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
}
}
- name: Enable Git long paths (Windows)
if: runner.os == 'Windows'

View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
set -euo pipefail
ci_config=ci-linux
case "${RUNNER_OS:-}" in
macOS)
ci_config=ci-macos
;;
Windows)
ci_config=ci-windows
;;
esac
bazel_lint_args=("$@")
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
has_host_platform_override=0
for arg in "${bazel_lint_args[@]}"; do
if [[ "$arg" == --host_platform=* ]]; then
has_host_platform_override=1
break
fi
done
if [[ $has_host_platform_override -eq 0 ]]; then
# The nightly Windows lint toolchain is registered with an MSVC exec
# platform even though the lint target platform stays on `windows-gnullvm`.
# Override the host platform here so the exec-side helper binaries actually
# match the registered toolchain set.
bazel_lint_args+=("--host_platform=//:local_windows_msvc")
fi
# Native Windows lint runs need exec-side Rust helper binaries and proc-macros
# to use rust-lld instead of the C++ linker path. The default `none`
# preference resolves to `cc` when a cc_toolchain is present, which currently
# routes these exec actions through clang++ with an argument shape it cannot
# consume.
bazel_lint_args+=("--@rules_rust//rust/settings:toolchain_linker_preference=rust")
# Some Rust top-level targets are still intentionally incompatible with the
# local Windows MSVC exec platform. Skip those explicit targets so the native
# lint aspect can run across the compatible crate graph instead of failing the
# whole build after analysis.
bazel_lint_args+=("--skip_incompatible_explicit_targets")
fi
bazel_startup_args=()
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
fi
run_bazel() {
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
return
fi
bazel "$@"
}
run_bazel_with_startup_args() {
if [[ ${#bazel_startup_args[@]} -gt 0 ]]; then
run_bazel "${bazel_startup_args[@]}" "$@"
return
fi
run_bazel "$@"
}
read_query_labels() {
local query="$1"
local query_stdout
local query_stderr
query_stdout="$(mktemp)"
query_stderr="$(mktemp)"
if ! run_bazel_with_startup_args \
--noexperimental_remote_repo_contents_cache \
query \
--keep_going \
--output=label \
"$query" >"$query_stdout" 2>"$query_stderr"; then
cat "$query_stderr" >&2
rm -f "$query_stdout" "$query_stderr"
exit 1
fi
cat "$query_stdout"
rm -f "$query_stdout" "$query_stderr"
}
final_build_targets=(//codex-rs/...)
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
# Bazel's local Windows platform currently lacks a default test toolchain for
# `rust_test`, so target the concrete Rust crate rules directly. The lint
# aspect still walks their crate graph, which preserves incremental reuse for
# non-test code while avoiding non-Rust wrapper targets such as platform_data.
final_build_targets=()
while IFS= read -r label; do
[[ -n "$label" ]] || continue
final_build_targets+=("$label")
done < <(read_query_labels 'kind("rust_(library|binary|proc_macro) rule", //codex-rs/...)')
if [[ ${#final_build_targets[@]} -eq 0 ]]; then
echo "Failed to discover Windows Bazel lint targets." >&2
exit 1
fi
fi
./.github/scripts/run-bazel-ci.sh \
-- \
build \
"${bazel_lint_args[@]}" \
-- \
"${final_build_targets[@]}"

View File

@@ -41,6 +41,15 @@ if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
fi
run_bazel() {
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
return
fi
bazel "$@"
}
ci_config=ci-linux
case "${RUNNER_OS:-}" in
macOS)
@@ -60,7 +69,7 @@ print_bazel_test_log_tails() {
bazel_info_cmd+=("${bazel_startup_args[@]}")
fi
testlogs_dir="$("${bazel_info_cmd[@]}" info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
local failed_targets=()
while IFS= read -r target; do
@@ -126,6 +135,41 @@ if [[ $remote_download_toplevel -eq 1 ]]; then
post_config_bazel_args+=(--remote_download_toplevel)
fi
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
# each job its own repo contents cache so they do not fight over the shared
# path configured in `ci-windows`.
post_config_bazel_args+=("--repo_contents_cache=${BAZEL_REPO_CONTENTS_CACHE}")
fi
if [[ -n "${BAZEL_REPOSITORY_CACHE:-}" ]]; then
post_config_bazel_args+=("--repository_cache=${BAZEL_REPOSITORY_CACHE}")
fi
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
windows_action_env_vars=(
INCLUDE
LIB
LIBPATH
PATH
UCRTVersion
UniversalCRTSdkDir
VCINSTALLDIR
VCToolsInstallDir
WindowsLibPath
WindowsSdkBinPath
WindowsSdkDir
WindowsSDKLibVersion
WindowsSDKVersion
)
for env_var in "${windows_action_env_vars[@]}"; do
if [[ -n "${!env_var:-}" ]]; then
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
fi
done
fi
bazel_console_log="$(mktemp)"
trap 'rm -f "$bazel_console_log"' EXIT
@@ -149,7 +193,7 @@ if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
bazel_run_args+=("${post_config_bazel_args[@]}")
fi
set +e
"${bazel_cmd[@]}" \
run_bazel "${bazel_cmd[@]:1}" \
--noexperimental_remote_repo_contents_cache \
"${bazel_run_args[@]}" \
-- \
@@ -184,7 +228,7 @@ else
bazel_run_args+=("${post_config_bazel_args[@]}")
fi
set +e
"${bazel_cmd[@]}" \
run_bazel "${bazel_cmd[@]:1}" \
--noexperimental_remote_repo_contents_cache \
"${bazel_run_args[@]}" \
-- \

View File

@@ -99,35 +99,27 @@ jobs:
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Install nightly argument-comment-lint toolchain
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
rustup toolchain install nightly-2025-09-18 \
--profile minimal \
--component llvm-tools-preview \
--component rustc-dev \
--component rust-src \
--no-self-update
rustup default nightly-2025-09-18
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os != 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
./.github/scripts/run-bazel-ci.sh \
-- \
build \
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
//codex-rs/...
- name: Run argument comment lint on codex-rs via packaged wrapper
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os == 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: python3 ./tools/argument-comment-lint/run-prebuilt-linter.py
run: |
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--platforms=//:local_windows \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
# --- CI to validate on different os/targets --------------------------------
lint_build:

View File

@@ -159,35 +159,27 @@ jobs:
run: |
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Install nightly argument-comment-lint toolchain
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
rustup toolchain install nightly-2025-09-18 \
--profile minimal \
--component llvm-tools-preview \
--component rustc-dev \
--component rust-src \
--no-self-update
rustup default nightly-2025-09-18
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os != 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
./.github/scripts/run-bazel-ci.sh \
-- \
build \
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
-- \
//codex-rs/...
- name: Run argument comment lint on codex-rs via packaged wrapper
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
- name: Run argument comment lint on codex-rs via Bazel
if: ${{ runner.os == 'Windows' }}
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: python3 ./tools/argument-comment-lint/run-prebuilt-linter.py
run: |
./.github/scripts/run-argument-comment-lint-bazel.sh \
--config=argument-comment-lint \
--platforms=//:local_windows \
--keep_going \
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
# --- Gatherer job that you mark as the ONLY required status -----------------
results:

View File

@@ -17,12 +17,19 @@ platform(
platform(
name = "local_windows",
constraint_values = [
# We just need to pick one of the ABIs. Do the same one we target.
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
parents = ["@platforms//host"],
)
platform(
name = "local_windows_msvc",
constraint_values = [
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
parents = ["@platforms//host"],
)
alias(
name = "rbe",
actual = "@rbe_platform",

View File

@@ -82,6 +82,13 @@ rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_r
rules_rust.patch(
patches = [
"//patches:rules_rust_windows_gnullvm_build_script.patch",
"//patches:rules_rust_windows_exec_msvc_build_script_env.patch",
"//patches:rules_rust_windows_bootstrap_process_wrapper_linker.patch",
"//patches:rules_rust_windows_msvc_direct_link_args.patch",
"//patches:rules_rust_windows_exec_bin_target.patch",
"//patches:rules_rust_windows_exec_std.patch",
"//patches:rules_rust_windows_exec_rustc_dev_rlib.patch",
"//patches:rules_rust_repository_set_exec_constraints.patch",
],
strip = 1,
)
@@ -96,6 +103,35 @@ nightly_rust.toolchain(
dev_components = True,
edition = "2024",
)
# Keep Windows exec tools on MSVC so Bazel helper binaries link correctly, but
# lint crate targets as `windows-gnullvm` to preserve the repo's actual cfgs.
nightly_rust.repository_set(
name = "rust_windows_x86_64",
dev_components = True,
edition = "2024",
exec_triple = "x86_64-pc-windows-msvc",
exec_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
target_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
],
target_triple = "x86_64-pc-windows-msvc",
versions = ["nightly/2025-09-18"],
)
nightly_rust.repository_set(
name = "rust_windows_x86_64",
target_compatible_with = [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
],
target_triple = "x86_64-pc-windows-gnullvm",
)
use_repo(nightly_rust, "rust_toolchains")
toolchains = use_extension("@rules_rs//rs/experimental/toolchains:module_extension.bzl", "toolchains")
@@ -163,6 +199,8 @@ crate.annotation(
patch_args = ["-p1"],
patches = [
"//patches:aws-lc-sys_memcmp_check.patch",
"//patches:aws-lc-sys_windows_msvc_prebuilt_nasm.patch",
"//patches:aws-lc-sys_windows_msvc_memcmp_probe.patch",
],
)

View File

@@ -94,7 +94,7 @@ const MCP_TOOL_NAME_DELIMITER: &str = "__";
const MAX_TOOL_NAME_LENGTH: usize = 64;
/// Default timeout for initializing MCP server & initially listing tools.
pub const DEFAULT_STARTUP_TIMEOUT: Duration = Duration::from_secs(30);
pub const DEFAULT_STARTUP_TIMEOUT: Duration = Duration::from_secs(10);
/// Default timeout for individual tool calls.
const DEFAULT_TOOL_TIMEOUT: Duration = Duration::from_secs(120);

View File

@@ -612,7 +612,7 @@ fn mcp_init_error_display_includes_startup_timeout_hint() {
let display = mcp_init_error_display(server_name, /*entry*/ None, &err);
assert_eq!(
"MCP client for `slow` timed out after 30 seconds. Add or adjust `startup_timeout_sec` in your config.toml:\n[mcp_servers.slow]\nstartup_timeout_sec = XX",
"MCP client for `slow` timed out after 10 seconds. Add or adjust `startup_timeout_sec` in your config.toml:\n[mcp_servers.slow]\nstartup_timeout_sec = XX",
display
);
}

View File

@@ -313,9 +313,15 @@ async fn timed_out_snapshot_shell_is_terminated() -> Result<()> {
shell_snapshot: crate::shell::empty_shell_snapshot_receiver(),
};
let err = run_script_with_timeout(&shell, &script, Duration::from_secs(1), true, dir.path())
.await
.expect_err("snapshot shell should time out");
let err = run_script_with_timeout(
&shell,
&script,
Duration::from_secs(1),
/*use_login_shell*/ true,
dir.path(),
)
.await
.expect_err("snapshot shell should time out");
assert!(
err.to_string().contains("timed out"),
"expected timeout error, got {err:?}"

File diff suppressed because it is too large Load Diff

View File

@@ -16,27 +16,14 @@ use codex_tools::ConfiguredToolSpec;
use codex_tools::FreeformTool;
use codex_tools::ResponsesApiWebSearchFilters;
use codex_tools::ResponsesApiWebSearchUserLocation;
use codex_tools::SpawnAgentToolOptions;
use codex_tools::ViewImageToolOptions;
use codex_tools::WaitAgentTimeoutOptions;
use codex_tools::create_close_agent_tool_v1;
use codex_tools::create_close_agent_tool_v2;
use codex_tools::create_exec_command_tool;
use codex_tools::create_request_permissions_tool;
use codex_tools::create_request_user_input_tool;
use codex_tools::create_resume_agent_tool;
use codex_tools::create_send_input_tool_v1;
use codex_tools::create_send_message_tool;
use codex_tools::create_spawn_agent_tool_v1;
use codex_tools::create_spawn_agent_tool_v2;
use codex_tools::create_view_image_tool;
use codex_tools::create_wait_agent_tool_v1;
use codex_tools::create_wait_agent_tool_v2;
use codex_tools::create_write_stdin_tool;
use codex_tools::mcp_tool_to_deferred_responses_api_tool;
use codex_utils_absolute_path::AbsolutePathBuf;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::PathBuf;
use super::*;
@@ -166,27 +153,6 @@ fn shell_tool_name(config: &ToolsConfig) -> Option<&'static str> {
}
}
fn request_user_input_tool_spec(default_mode_request_user_input: bool) -> ToolSpec {
create_request_user_input_tool(request_user_input_tool_description(
default_mode_request_user_input,
))
}
fn spawn_agent_tool_options(config: &ToolsConfig) -> SpawnAgentToolOptions<'_> {
SpawnAgentToolOptions {
available_models: &config.available_models,
agent_type_description: crate::agent::role::spawn_tool_spec::build(&config.agent_roles),
}
}
fn wait_agent_timeout_options() -> WaitAgentTimeoutOptions {
WaitAgentTimeoutOptions {
default_timeout_ms: DEFAULT_WAIT_TIMEOUT_MS,
min_timeout_ms: MIN_WAIT_TIMEOUT_MS,
max_timeout_ms: MAX_WAIT_TIMEOUT_MS,
}
}
fn find_tool<'a>(tools: &'a [ConfiguredToolSpec], expected_name: &str) -> &'a ConfiguredToolSpec {
tools
.iter()
@@ -341,7 +307,7 @@ fn test_full_toolset_specs_for_gpt5_codex_unified_exec_web_search() {
}),
create_write_stdin_tool(),
PLAN_TOOL.clone(),
request_user_input_tool_spec(/*default_mode_request_user_input*/ false),
create_request_user_input_tool(CollaborationModesConfig::default()),
create_apply_patch_freeform_tool(),
ToolSpec::WebSearch {
external_web_access: Some(true),
@@ -358,16 +324,16 @@ fn test_full_toolset_specs_for_gpt5_codex_unified_exec_web_search() {
}
let collab_specs = if config.multi_agent_v2 {
vec![
create_spawn_agent_tool_v2(spawn_agent_tool_options(&config)),
create_spawn_agent_tool_v2(&config),
create_send_message_tool(),
create_wait_agent_tool_v2(wait_agent_timeout_options()),
create_wait_agent_tool_v2(),
create_close_agent_tool_v2(),
]
} else {
vec![
create_spawn_agent_tool_v1(spawn_agent_tool_options(&config)),
create_spawn_agent_tool_v1(&config),
create_send_input_tool_v1(),
create_wait_agent_tool_v1(wait_agent_timeout_options()),
create_wait_agent_tool_v1(),
create_close_agent_tool_v1(),
]
};
@@ -768,7 +734,7 @@ fn request_user_input_description_reflects_default_mode_feature_flag() {
let request_user_input_tool = find_tool(&tools, "request_user_input");
assert_eq!(
request_user_input_tool.spec,
request_user_input_tool_spec(/*default_mode_request_user_input*/ false)
create_request_user_input_tool(CollaborationModesConfig::default())
);
features.enable(Feature::DefaultModeRequestUserInput);
@@ -792,7 +758,9 @@ fn request_user_input_description_reflects_default_mode_feature_flag() {
let request_user_input_tool = find_tool(&tools, "request_user_input");
assert_eq!(
request_user_input_tool.spec,
request_user_input_tool_spec(/*default_mode_request_user_input*/ true)
create_request_user_input_tool(CollaborationModesConfig {
default_mode_request_user_input: true,
})
);
}
@@ -1049,6 +1017,21 @@ fn image_generation_tools_require_feature_and_supported_model() {
);
}
#[test]
fn js_repl_freeform_grammar_blocks_common_non_js_prefixes() {
let ToolSpec::Freeform(FreeformTool { format, .. }) = create_js_repl_tool() else {
panic!("js_repl should use a freeform tool spec");
};
assert_eq!(format.syntax, "lark");
assert!(format.definition.contains("PRAGMA_LINE"));
assert!(format.definition.contains("`[^`]"));
assert!(format.definition.contains("``[^`]"));
assert!(format.definition.contains("PLAIN_JS_SOURCE"));
assert!(format.definition.contains("codex-js-repl:"));
assert!(!format.definition.contains("(?!"));
}
fn assert_model_tools(
model_slug: &str,
features: &Features,

View File

@@ -274,7 +274,10 @@ mod tests {
#[test]
fn managed_network_enforces_seccomp_even_for_full_network_policy() {
assert_eq!(
should_install_network_seccomp(NetworkSandboxPolicy::Enabled, true),
should_install_network_seccomp(
NetworkSandboxPolicy::Enabled,
/*allow_network_for_proxy*/ true,
),
true
);
}
@@ -282,7 +285,10 @@ mod tests {
#[test]
fn full_network_policy_without_managed_network_skips_seccomp() {
assert_eq!(
should_install_network_seccomp(NetworkSandboxPolicy::Enabled, false),
should_install_network_seccomp(
NetworkSandboxPolicy::Enabled,
/*allow_network_for_proxy*/ false,
),
false
);
}
@@ -291,18 +297,22 @@ mod tests {
fn restricted_network_policy_always_installs_seccomp() {
assert!(should_install_network_seccomp(
NetworkSandboxPolicy::Restricted,
false
/*allow_network_for_proxy*/ false,
));
assert!(should_install_network_seccomp(
NetworkSandboxPolicy::Restricted,
true
/*allow_network_for_proxy*/ true,
));
}
#[test]
fn managed_proxy_routes_use_proxy_routed_seccomp_mode() {
assert_eq!(
network_seccomp_mode(NetworkSandboxPolicy::Enabled, true, true),
network_seccomp_mode(
NetworkSandboxPolicy::Enabled,
/*allow_network_for_proxy*/ true,
/*proxy_routed_network*/ true,
),
Some(NetworkSeccompMode::ProxyRouted)
);
}
@@ -310,7 +320,11 @@ mod tests {
#[test]
fn restricted_network_without_proxy_routing_uses_restricted_mode() {
assert_eq!(
network_seccomp_mode(NetworkSandboxPolicy::Restricted, false, false),
network_seccomp_mode(
NetworkSandboxPolicy::Restricted,
/*allow_network_for_proxy*/ false,
/*proxy_routed_network*/ false,
),
Some(NetworkSeccompMode::Restricted)
);
}
@@ -318,7 +332,11 @@ mod tests {
#[test]
fn full_network_without_managed_proxy_skips_network_seccomp_mode() {
assert_eq!(
network_seccomp_mode(NetworkSandboxPolicy::Enabled, false, false),
network_seccomp_mode(
NetworkSandboxPolicy::Enabled,
/*allow_network_for_proxy*/ false,
/*proxy_routed_network*/ false,
),
None
);
}

View File

@@ -718,7 +718,8 @@ mod tests {
#[test]
fn rewrites_proxy_url_to_local_loopback_port() {
let rewritten =
rewrite_proxy_env_value("socks5h://127.0.0.1:8081", 43210).expect("rewritten value");
rewrite_proxy_env_value("socks5h://127.0.0.1:8081", /*local_port*/ 43210)
.expect("rewritten value");
assert_eq!(rewritten, "socks5h://127.0.0.1:43210");
}

View File

@@ -21,12 +21,8 @@ schema and Responses API tool primitives that no longer need to live in
- `ResponsesApiWebSearchUserLocation`
- `ResponsesApiNamespace`
- `ResponsesApiNamespaceTool`
- code-mode `ToolSpec` adapters and `exec` / `wait` spec builders
- JS REPL spec builders
- MCP resource, `list_dir`, and `test_sync_tool` spec builders
- code-mode `ToolSpec` adapters
- local host tool spec builders for shell/exec/request-permissions/view-image
- collaboration and agent-job `ToolSpec` builders for spawn/send/wait/close,
`request_user_input`, and CSV fanout/reporting
- `parse_tool_input_schema()`
- `parse_dynamic_tool()`
- `parse_mcp_tool()`

View File

@@ -1,141 +0,0 @@
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use std::collections::BTreeMap;
pub fn create_spawn_agents_on_csv_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"csv_path".to_string(),
JsonSchema::String {
description: Some("Path to the CSV file containing input rows.".to_string()),
},
),
(
"instruction".to_string(),
JsonSchema::String {
description: Some(
"Instruction template to apply to each CSV row. Use {column_name} placeholders to inject values from the row."
.to_string(),
),
},
),
(
"id_column".to_string(),
JsonSchema::String {
description: Some("Optional column name to use as stable item id.".to_string()),
},
),
(
"output_csv_path".to_string(),
JsonSchema::String {
description: Some("Optional output CSV path for exported results.".to_string()),
},
),
(
"max_concurrency".to_string(),
JsonSchema::Number {
description: Some(
"Maximum concurrent workers for this job. Defaults to 16 and is capped by config."
.to_string(),
),
},
),
(
"max_workers".to_string(),
JsonSchema::Number {
description: Some(
"Alias for max_concurrency. Set to 1 to run sequentially.".to_string(),
),
},
),
(
"max_runtime_seconds".to_string(),
JsonSchema::Number {
description: Some(
"Maximum runtime per worker before it is failed. Defaults to 1800 seconds."
.to_string(),
),
},
),
(
"output_schema".to_string(),
JsonSchema::Object {
properties: BTreeMap::new(),
required: None,
additional_properties: None,
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "spawn_agents_on_csv".to_string(),
description: "Process a CSV by spawning one worker sub-agent per row. The instruction string is a template where `{column}` placeholders are replaced with row values. Each worker must call `report_agent_job_result` with a JSON object (matching `output_schema` when provided); missing reports are treated as failures. This call blocks until all rows finish and automatically exports results to `output_csv_path` (or a default path)."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["csv_path".to_string(), "instruction".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
pub fn create_report_agent_job_result_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"job_id".to_string(),
JsonSchema::String {
description: Some("Identifier of the job.".to_string()),
},
),
(
"item_id".to_string(),
JsonSchema::String {
description: Some("Identifier of the job item.".to_string()),
},
),
(
"result".to_string(),
JsonSchema::Object {
properties: BTreeMap::new(),
required: None,
additional_properties: None,
},
),
(
"stop".to_string(),
JsonSchema::Boolean {
description: Some(
"Optional. When true, cancels the remaining job items after this result is recorded."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "report_agent_job_result".to_string(),
description:
"Worker-only tool to report a result for an agent job item. Main agents should not call this."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec![
"job_id".to_string(),
"item_id".to_string(),
"result".to_string(),
]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
#[cfg(test)]
#[path = "agent_job_tool_tests.rs"]
mod tests;

View File

@@ -1,140 +0,0 @@
use super::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
#[test]
fn spawn_agents_on_csv_tool_requires_csv_and_instruction() {
assert_eq!(
create_spawn_agents_on_csv_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "spawn_agents_on_csv".to_string(),
description: "Process a CSV by spawning one worker sub-agent per row. The instruction string is a template where `{column}` placeholders are replaced with row values. Each worker must call `report_agent_job_result` with a JSON object (matching `output_schema` when provided); missing reports are treated as failures. This call blocks until all rows finish and automatically exports results to `output_csv_path` (or a default path)."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"csv_path".to_string(),
JsonSchema::String {
description: Some("Path to the CSV file containing input rows.".to_string()),
},
),
(
"instruction".to_string(),
JsonSchema::String {
description: Some(
"Instruction template to apply to each CSV row. Use {column_name} placeholders to inject values from the row."
.to_string(),
),
},
),
(
"id_column".to_string(),
JsonSchema::String {
description: Some("Optional column name to use as stable item id.".to_string()),
},
),
(
"output_csv_path".to_string(),
JsonSchema::String {
description: Some("Optional output CSV path for exported results.".to_string()),
},
),
(
"max_concurrency".to_string(),
JsonSchema::Number {
description: Some(
"Maximum concurrent workers for this job. Defaults to 16 and is capped by config."
.to_string(),
),
},
),
(
"max_workers".to_string(),
JsonSchema::Number {
description: Some(
"Alias for max_concurrency. Set to 1 to run sequentially.".to_string(),
),
},
),
(
"max_runtime_seconds".to_string(),
JsonSchema::Number {
description: Some(
"Maximum runtime per worker before it is failed. Defaults to 1800 seconds."
.to_string(),
),
},
),
(
"output_schema".to_string(),
JsonSchema::Object {
properties: BTreeMap::new(),
required: None,
additional_properties: None,
},
),
]),
required: Some(vec!["csv_path".to_string(), "instruction".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}
#[test]
fn report_agent_job_result_tool_requires_result_payload() {
assert_eq!(
create_report_agent_job_result_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "report_agent_job_result".to_string(),
description:
"Worker-only tool to report a result for an agent job item. Main agents should not call this."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"job_id".to_string(),
JsonSchema::String {
description: Some("Identifier of the job.".to_string()),
},
),
(
"item_id".to_string(),
JsonSchema::String {
description: Some("Identifier of the job item.".to_string()),
},
),
(
"result".to_string(),
JsonSchema::Object {
properties: BTreeMap::new(),
required: None,
additional_properties: None,
},
),
(
"stop".to_string(),
JsonSchema::Boolean {
description: Some(
"Optional. When true, cancels the remaining job items after this result is recorded."
.to_string(),
),
},
),
]),
required: Some(vec![
"job_id".to_string(),
"item_id".to_string(),
"result".to_string(),
]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}

View File

@@ -1,729 +0,0 @@
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use codex_protocol::openai_models::ModelPreset;
use serde_json::Value;
use serde_json::json;
use std::collections::BTreeMap;
#[derive(Debug, Clone)]
pub struct SpawnAgentToolOptions<'a> {
pub available_models: &'a [ModelPreset],
pub agent_type_description: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct WaitAgentTimeoutOptions {
pub default_timeout_ms: i64,
pub min_timeout_ms: i64,
pub max_timeout_ms: i64,
}
pub fn create_spawn_agent_tool_v1(options: SpawnAgentToolOptions<'_>) -> ToolSpec {
let available_models_description = spawn_agent_models_description(options.available_models);
let return_value_description =
"Returns the spawned agent id plus the user-facing nickname when available.";
let properties = spawn_agent_common_properties(&options.agent_type_description);
ToolSpec::Function(ResponsesApiTool {
name: "spawn_agent".to_string(),
description: spawn_agent_tool_description(
&available_models_description,
return_value_description,
),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false.into()),
},
output_schema: Some(spawn_agent_output_schema_v1()),
})
}
pub fn create_spawn_agent_tool_v2(options: SpawnAgentToolOptions<'_>) -> ToolSpec {
let available_models_description = spawn_agent_models_description(options.available_models);
let return_value_description = "Returns the canonical task name for the spawned agent, plus the user-facing nickname when available.";
let mut properties = spawn_agent_common_properties(&options.agent_type_description);
properties.insert(
"task_name".to_string(),
JsonSchema::String {
description: Some(
"Task name for the new agent. Use lowercase letters, digits, and underscores."
.to_string(),
),
},
);
ToolSpec::Function(ResponsesApiTool {
name: "spawn_agent".to_string(),
description: spawn_agent_tool_description(
&available_models_description,
return_value_description,
),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["task_name".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(spawn_agent_output_schema_v2()),
})
}
pub fn create_send_input_tool_v1() -> ToolSpec {
let properties = BTreeMap::from([
(
"target".to_string(),
JsonSchema::String {
description: Some("Agent id to message (from spawn_agent).".to_string()),
},
),
(
"message".to_string(),
JsonSchema::String {
description: Some(
"Legacy plain-text message to send to the agent. Use either message or items."
.to_string(),
),
},
),
("items".to_string(), create_collab_input_items_schema()),
(
"interrupt".to_string(),
JsonSchema::Boolean {
description: Some(
"When true, stop the agent's current task and handle this immediately. When false (default), queue this message."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "send_input".to_string(),
description: "Send a message to an existing agent. Use interrupt=true to redirect work immediately. You should reuse the agent by send_input if you believe your assigned task is highly dependent on the context of a previous task."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["target".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(send_input_output_schema()),
})
}
pub fn create_send_message_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"target".to_string(),
JsonSchema::String {
description: Some(
"Agent id or canonical task name to message (from spawn_agent).".to_string(),
),
},
),
("items".to_string(), create_collab_input_items_schema()),
(
"interrupt".to_string(),
JsonSchema::Boolean {
description: Some(
"When true, stop the agent's current task and handle this immediately. When false (default), queue this message."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "send_message".to_string(),
description: "Add a message to an existing agent without triggering a new turn. Use interrupt=true to stop the current task first. In MultiAgentV2, this tool currently supports text content only."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["target".to_string(), "items".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(send_input_output_schema()),
})
}
pub fn create_assign_task_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"target".to_string(),
JsonSchema::String {
description: Some(
"Agent id or canonical task name to message (from spawn_agent).".to_string(),
),
},
),
("items".to_string(), create_collab_input_items_schema()),
(
"interrupt".to_string(),
JsonSchema::Boolean {
description: Some(
"When true, stop the agent's current task and handle this immediately. When false (default), queue this message."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "assign_task".to_string(),
description: "Add a message to an existing agent and trigger a turn in the target. Use interrupt=true to redirect work immediately. In MultiAgentV2, this tool currently supports text content only."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["target".to_string(), "items".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(send_input_output_schema()),
})
}
pub fn create_resume_agent_tool() -> ToolSpec {
let properties = BTreeMap::from([(
"id".to_string(),
JsonSchema::String {
description: Some("Agent id to resume.".to_string()),
},
)]);
ToolSpec::Function(ResponsesApiTool {
name: "resume_agent".to_string(),
description:
"Resume a previously closed agent by id so it can receive send_input and wait_agent calls."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["id".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(resume_agent_output_schema()),
})
}
pub fn create_wait_agent_tool_v1(options: WaitAgentTimeoutOptions) -> ToolSpec {
ToolSpec::Function(ResponsesApiTool {
name: "wait_agent".to_string(),
description: "Wait for agents to reach a final status. Completed statuses may include the agent's final message. Returns empty status when timed out. Once the agent reaches a final status, a notification message will be received containing the same completed status."
.to_string(),
strict: false,
defer_loading: None,
parameters: wait_agent_tool_parameters_v1(options),
output_schema: Some(wait_output_schema_v1()),
})
}
pub fn create_wait_agent_tool_v2(options: WaitAgentTimeoutOptions) -> ToolSpec {
ToolSpec::Function(ResponsesApiTool {
name: "wait_agent".to_string(),
description: "Wait for agents to reach a final status. Returns a brief wait summary instead of the agent's final content. Returns a timeout summary when no agent reaches a final status before the deadline."
.to_string(),
strict: false,
defer_loading: None,
parameters: wait_agent_tool_parameters_v2(options),
output_schema: Some(wait_output_schema_v2()),
})
}
pub fn create_list_agents_tool() -> ToolSpec {
let properties = BTreeMap::from([(
"path_prefix".to_string(),
JsonSchema::String {
description: Some(
"Optional task-path prefix. Accepts the same relative or absolute task-path syntax as other MultiAgentV2 agent targets."
.to_string(),
),
},
)]);
ToolSpec::Function(ResponsesApiTool {
name: "list_agents".to_string(),
description:
"List live agents in the current root thread tree. Optionally filter by task-path prefix."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false.into()),
},
output_schema: Some(list_agents_output_schema()),
})
}
pub fn create_close_agent_tool_v1() -> ToolSpec {
let properties = BTreeMap::from([(
"target".to_string(),
JsonSchema::String {
description: Some("Agent id to close (from spawn_agent).".to_string()),
},
)]);
ToolSpec::Function(ResponsesApiTool {
name: "close_agent".to_string(),
description: "Close an agent and any open descendants when they are no longer needed, and return the target agent's previous status before shutdown was requested. Don't keep agents open for too long if they are not needed anymore.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["target".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(close_agent_output_schema()),
})
}
pub fn create_close_agent_tool_v2() -> ToolSpec {
let properties = BTreeMap::from([(
"target".to_string(),
JsonSchema::String {
description: Some(
"Agent id or canonical task name to close (from spawn_agent).".to_string(),
),
},
)]);
ToolSpec::Function(ResponsesApiTool {
name: "close_agent".to_string(),
description: "Close an agent and any open descendants when they are no longer needed, and return the target agent's previous status before shutdown was requested. Don't keep agents open for too long if they are not needed anymore.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["target".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: Some(close_agent_output_schema()),
})
}
fn agent_status_output_schema() -> Value {
json!({
"oneOf": [
{
"type": "string",
"enum": ["pending_init", "running", "shutdown", "not_found"]
},
{
"type": "object",
"properties": {
"completed": {
"type": ["string", "null"]
}
},
"required": ["completed"],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"errored": {
"type": "string"
}
},
"required": ["errored"],
"additionalProperties": false
}
]
})
}
fn spawn_agent_output_schema_v1() -> Value {
json!({
"type": "object",
"properties": {
"agent_id": {
"type": "string",
"description": "Thread identifier for the spawned agent."
},
"nickname": {
"type": ["string", "null"],
"description": "User-facing nickname for the spawned agent when available."
}
},
"required": ["agent_id", "nickname"],
"additionalProperties": false
})
}
fn spawn_agent_output_schema_v2() -> Value {
json!({
"type": "object",
"properties": {
"agent_id": {
"type": ["string", "null"],
"description": "Legacy thread identifier for the spawned agent."
},
"task_name": {
"type": "string",
"description": "Canonical task name for the spawned agent."
},
"nickname": {
"type": ["string", "null"],
"description": "User-facing nickname for the spawned agent when available."
}
},
"required": ["agent_id", "task_name", "nickname"],
"additionalProperties": false
})
}
fn send_input_output_schema() -> Value {
json!({
"type": "object",
"properties": {
"submission_id": {
"type": "string",
"description": "Identifier for the queued input submission."
}
},
"required": ["submission_id"],
"additionalProperties": false
})
}
fn list_agents_output_schema() -> Value {
json!({
"type": "object",
"properties": {
"agents": {
"type": "array",
"items": {
"type": "object",
"properties": {
"agent_name": {
"type": "string",
"description": "Canonical task name for the agent when available, otherwise the agent id."
},
"agent_status": {
"description": "Last known status of the agent.",
"allOf": [agent_status_output_schema()]
},
"last_task_message": {
"type": ["string", "null"],
"description": "Most recent user or inter-agent instruction received by the agent, when available."
}
},
"required": ["agent_name", "agent_status", "last_task_message"],
"additionalProperties": false
},
"description": "Live agents visible in the current root thread tree."
}
},
"required": ["agents"],
"additionalProperties": false
})
}
fn resume_agent_output_schema() -> Value {
json!({
"type": "object",
"properties": {
"status": agent_status_output_schema()
},
"required": ["status"],
"additionalProperties": false
})
}
fn wait_output_schema_v1() -> Value {
json!({
"type": "object",
"properties": {
"status": {
"type": "object",
"description": "Final statuses keyed by agent id.",
"additionalProperties": agent_status_output_schema()
},
"timed_out": {
"type": "boolean",
"description": "Whether the wait call returned due to timeout before any agent reached a final status."
}
},
"required": ["status", "timed_out"],
"additionalProperties": false
})
}
fn wait_output_schema_v2() -> Value {
json!({
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "Brief wait summary without the agent's final content."
},
"timed_out": {
"type": "boolean",
"description": "Whether the wait call returned due to timeout before any agent reached a final status."
}
},
"required": ["message", "timed_out"],
"additionalProperties": false
})
}
fn close_agent_output_schema() -> Value {
json!({
"type": "object",
"properties": {
"previous_status": {
"description": "The agent status observed before shutdown was requested.",
"allOf": [agent_status_output_schema()]
}
},
"required": ["previous_status"],
"additionalProperties": false
})
}
fn create_collab_input_items_schema() -> JsonSchema {
let properties = BTreeMap::from([
(
"type".to_string(),
JsonSchema::String {
description: Some(
"Input item type: text, image, local_image, skill, or mention.".to_string(),
),
},
),
(
"text".to_string(),
JsonSchema::String {
description: Some("Text content when type is text.".to_string()),
},
),
(
"image_url".to_string(),
JsonSchema::String {
description: Some("Image URL when type is image.".to_string()),
},
),
(
"path".to_string(),
JsonSchema::String {
description: Some(
"Path when type is local_image/skill, or structured mention target such as app://<connector-id> or plugin://<plugin-name>@<marketplace-name> when type is mention."
.to_string(),
),
},
),
(
"name".to_string(),
JsonSchema::String {
description: Some("Display name when type is skill or mention.".to_string()),
},
),
]);
JsonSchema::Array {
items: Box::new(JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false.into()),
}),
description: Some(
"Structured input items. Use this to pass explicit mentions (for example app:// connector paths)."
.to_string(),
),
}
}
fn spawn_agent_common_properties(agent_type_description: &str) -> BTreeMap<String, JsonSchema> {
BTreeMap::from([
(
"message".to_string(),
JsonSchema::String {
description: Some(
"Initial plain-text task for the new agent. Use either message or items."
.to_string(),
),
},
),
("items".to_string(), create_collab_input_items_schema()),
(
"agent_type".to_string(),
JsonSchema::String {
description: Some(agent_type_description.to_string()),
},
),
(
"fork_context".to_string(),
JsonSchema::Boolean {
description: Some(
"When true, fork the current thread history into the new agent before sending the initial prompt. This must be used when you want the new agent to have exactly the same context as you."
.to_string(),
),
},
),
(
"model".to_string(),
JsonSchema::String {
description: Some(
"Optional model override for the new agent. Replaces the inherited model."
.to_string(),
),
},
),
(
"reasoning_effort".to_string(),
JsonSchema::String {
description: Some(
"Optional reasoning effort override for the new agent. Replaces the inherited reasoning effort."
.to_string(),
),
},
),
])
}
fn spawn_agent_tool_description(
available_models_description: &str,
return_value_description: &str,
) -> String {
format!(
r#"
Only use `spawn_agent` if and only if the user explicitly asks for sub-agents, delegation, or parallel agent work.
Requests for depth, thoroughness, research, investigation, or detailed codebase analysis do not count as permission to spawn.
Agent-role guidance below only helps choose which agent to use after spawning is already authorized; it never authorizes spawning by itself.
Spawn a sub-agent for a well-scoped task. {return_value_description} This spawn_agent tool provides you access to smaller but more efficient sub-agents. A mini model can solve many tasks faster than the main model. You should follow the rules and guidelines below to use this tool.
{available_models_description}
### When to delegate vs. do the subtask yourself
- First, quickly analyze the overall user task and form a succinct high-level plan. Identify which tasks are immediate blockers on the critical path, and which tasks are sidecar tasks that are needed but can run in parallel without blocking the next local step. As part of that plan, explicitly decide what immediate task you should do locally right now. Do this planning step before delegating to agents so you do not hand off the immediate blocking task to a submodel and then waste time waiting on it.
- Use the smaller subagent when a subtask is easy enough for it to handle and can run in parallel with your local work. Prefer delegating concrete, bounded sidecar tasks that materially advance the main task without blocking your immediate next local step.
- Do not delegate urgent blocking work when your immediate next step depends on that result. If the very next action is blocked on that task, the main rollout should usually do it locally to keep the critical path moving.
- Keep work local when the subtask is too difficult to delegate well and when it is tightly coupled, urgent, or likely to block your immediate next step.
### Designing delegated subtasks
- Subtasks must be concrete, well-defined, and self-contained.
- Delegated subtasks must materially advance the main task.
- Do not duplicate work between the main rollout and delegated subtasks.
- Avoid issuing multiple delegate calls on the same unresolved thread unless the new delegated task is genuinely different and necessary.
- Narrow the delegated ask to the concrete output you need next.
- For coding tasks, prefer delegating concrete code-change worker subtasks over read-only explorer analysis when the subagent can make a bounded patch in a clear write scope.
- When delegating coding work, instruct the submodel to edit files directly in its forked workspace and list the file paths it changed in the final answer.
- For code-edit subtasks, decompose work so each delegated task has a disjoint write set.
### After you delegate
- Call wait_agent very sparingly. Only call wait_agent when you need the result immediately for the next critical-path step and you are blocked until it returns.
- Do not redo delegated subagent tasks yourself; focus on integrating results or tackling non-overlapping work.
- While the subagent is running in the background, do meaningful non-overlapping work immediately.
- Do not repeatedly wait by reflex.
- When a delegated coding task returns, quickly review the uploaded changes, then integrate or refine them.
### Parallel delegation patterns
- Run multiple independent information-seeking subtasks in parallel when you have distinct questions that can be answered independently.
- Split implementation into disjoint codebase slices and spawn multiple agents for them in parallel when the write scopes do not overlap.
- Delegate verification only when it can run in parallel with ongoing implementation and is likely to catch a concrete risk before final integration.
- The key is to find opportunities to spawn multiple independent subtasks in parallel within the same round, while ensuring each subtask is well-defined, self-contained, and materially advances the main task."#
)
}
fn spawn_agent_models_description(models: &[ModelPreset]) -> String {
let visible_models: Vec<&ModelPreset> =
models.iter().filter(|model| model.show_in_picker).collect();
if visible_models.is_empty() {
return "No picker-visible models are currently loaded.".to_string();
}
visible_models
.into_iter()
.map(|model| {
let efforts = model
.supported_reasoning_efforts
.iter()
.map(|preset| format!("{} ({})", preset.effort, preset.description))
.collect::<Vec<_>>()
.join(", ");
format!(
"- {} (`{}`): {} Default reasoning effort: {}. Supported reasoning efforts: {}.",
model.display_name,
model.model,
model.description,
model.default_reasoning_effort,
efforts
)
})
.collect::<Vec<_>>()
.join("\n")
}
fn wait_agent_tool_parameters_v1(options: WaitAgentTimeoutOptions) -> JsonSchema {
let properties = BTreeMap::from([
(
"targets".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
description: Some(
"Agent ids to wait on. Pass multiple ids to wait for whichever finishes first."
.to_string(),
),
},
),
(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some(format!(
"Optional timeout in milliseconds. Defaults to {}, min {}, max {}. Prefer longer waits (minutes) to avoid busy polling.",
options.default_timeout_ms, options.min_timeout_ms, options.max_timeout_ms,
)),
},
),
]);
JsonSchema::Object {
properties,
required: Some(vec!["targets".to_string()]),
additional_properties: Some(false.into()),
}
}
fn wait_agent_tool_parameters_v2(options: WaitAgentTimeoutOptions) -> JsonSchema {
let properties = BTreeMap::from([
(
"targets".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
description: Some(
"Agent ids or canonical task names to wait on. Pass multiple targets to wait for whichever finishes first."
.to_string(),
),
},
),
(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some(format!(
"Optional timeout in milliseconds. Defaults to {}, min {}, max {}. Prefer longer waits (minutes) to avoid busy polling.",
options.default_timeout_ms, options.min_timeout_ms, options.max_timeout_ms,
)),
},
),
]);
JsonSchema::Object {
properties,
required: Some(vec!["targets".to_string()]),
additional_properties: Some(false.into()),
}
}
#[cfg(test)]
#[path = "agent_tool_tests.rs"]
mod tests;

View File

@@ -1,152 +0,0 @@
use super::*;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use pretty_assertions::assert_eq;
use serde_json::json;
fn model_preset(id: &str, show_in_picker: bool) -> ModelPreset {
ModelPreset {
id: id.to_string(),
model: format!("{id}-model"),
display_name: format!("{id} display"),
description: format!("{id} description"),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balanced".to_string(),
}],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker,
availability_nux: None,
supported_in_api: true,
input_modalities: Vec::new(),
}
}
#[test]
fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() {
let tool = create_spawn_agent_tool_v2(SpawnAgentToolOptions {
available_models: &[
model_preset("visible", /*show_in_picker*/ true),
model_preset("hidden", /*show_in_picker*/ false),
],
agent_type_description: "role help".to_string(),
});
let ToolSpec::Function(ResponsesApiTool {
description,
parameters,
output_schema,
..
}) = tool
else {
panic!("spawn_agent should be a function tool");
};
let JsonSchema::Object {
properties,
required,
..
} = parameters
else {
panic!("spawn_agent should use object params");
};
assert!(description.contains("visible display (`visible-model`)"));
assert!(!description.contains("hidden display (`hidden-model`)"));
assert!(properties.contains_key("task_name"));
assert_eq!(
properties.get("agent_type"),
Some(&JsonSchema::String {
description: Some("role help".to_string()),
})
);
assert_eq!(required, Some(vec!["task_name".to_string()]));
assert_eq!(
output_schema.expect("spawn_agent output schema")["required"],
json!(["agent_id", "task_name", "nickname"])
);
}
#[test]
fn send_message_tool_requires_items_and_uses_submission_output() {
let ToolSpec::Function(ResponsesApiTool {
parameters,
output_schema,
..
}) = create_send_message_tool()
else {
panic!("send_message should be a function tool");
};
let JsonSchema::Object {
properties,
required,
..
} = parameters
else {
panic!("send_message should use object params");
};
assert!(properties.contains_key("target"));
assert!(properties.contains_key("items"));
assert!(!properties.contains_key("message"));
assert_eq!(
required,
Some(vec!["target".to_string(), "items".to_string()])
);
assert_eq!(
output_schema.expect("send_message output schema")["required"],
json!(["submission_id"])
);
}
#[test]
fn wait_agent_tool_v2_uses_task_targets_and_summary_output() {
let ToolSpec::Function(ResponsesApiTool {
parameters,
output_schema,
..
}) = create_wait_agent_tool_v2(WaitAgentTimeoutOptions {
default_timeout_ms: 30_000,
min_timeout_ms: 10_000,
max_timeout_ms: 3_600_000,
})
else {
panic!("wait_agent should be a function tool");
};
let JsonSchema::Object { properties, .. } = parameters else {
panic!("wait_agent should use object params");
};
let Some(JsonSchema::Array {
description: Some(description),
..
}) = properties.get("targets")
else {
panic!("wait_agent should define targets array");
};
assert!(description.contains("canonical task names"));
assert_eq!(
output_schema.expect("wait output schema")["properties"]["message"]["description"],
json!("Brief wait summary without the agent's final content.")
);
}
#[test]
fn list_agents_tool_includes_path_prefix_and_agent_fields() {
let ToolSpec::Function(ResponsesApiTool {
parameters,
output_schema,
..
}) = create_list_agents_tool()
else {
panic!("list_agents should be a function tool");
};
let JsonSchema::Object { properties, .. } = parameters else {
panic!("list_agents should use object params");
};
assert!(properties.contains_key("path_prefix"));
assert_eq!(
output_schema.expect("list_agents output schema")["properties"]["agents"]["items"]["required"],
json!(["agent_name", "agent_status", "last_task_message"])
);
}

View File

@@ -1,11 +1,6 @@
use crate::FreeformTool;
use crate::FreeformToolFormat;
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use codex_code_mode::CodeModeToolKind;
use codex_code_mode::ToolDefinition as CodeModeToolDefinition;
use std::collections::BTreeMap;
/// Augment tool descriptions with code-mode-specific exec samples.
pub fn augment_tool_spec_for_code_mode(spec: ToolSpec) -> ToolSpec {
@@ -37,85 +32,6 @@ pub fn tool_spec_to_code_mode_tool_definition(spec: &ToolSpec) -> Option<CodeMod
.then(|| codex_code_mode::augment_tool_definition(definition))
}
pub fn create_wait_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"cell_id".to_string(),
JsonSchema::String {
description: Some("Identifier of the running exec cell.".to_string()),
},
),
(
"yield_time_ms".to_string(),
JsonSchema::Number {
description: Some(
"How long to wait (in milliseconds) for more output before yielding again."
.to_string(),
),
},
),
(
"max_tokens".to_string(),
JsonSchema::Number {
description: Some(
"Maximum number of output tokens to return for this wait call.".to_string(),
),
},
),
(
"terminate".to_string(),
JsonSchema::Boolean {
description: Some("Whether to terminate the running exec cell.".to_string()),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: codex_code_mode::WAIT_TOOL_NAME.to_string(),
description: format!(
"Waits on a yielded `{}` cell and returns new output or completion.\n{}",
codex_code_mode::PUBLIC_TOOL_NAME,
codex_code_mode::build_wait_tool_description().trim()
),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["cell_id".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
defer_loading: None,
})
}
pub fn create_code_mode_tool(
enabled_tools: &[(String, String)],
code_mode_only_enabled: bool,
) -> ToolSpec {
const CODE_MODE_FREEFORM_GRAMMAR: &str = r#"
start: pragma_source | plain_source
pragma_source: PRAGMA_LINE NEWLINE SOURCE
plain_source: SOURCE
PRAGMA_LINE: /[ \t]*\/\/ @exec:[^\r\n]*/
NEWLINE: /\r?\n/
SOURCE: /[\s\S]+/
"#;
ToolSpec::Freeform(FreeformTool {
name: codex_code_mode::PUBLIC_TOOL_NAME.to_string(),
description: codex_code_mode::build_exec_tool_description(
enabled_tools,
code_mode_only_enabled,
),
format: FreeformToolFormat {
r#type: "grammar".to_string(),
syntax: "lark".to_string(),
definition: CODE_MODE_FREEFORM_GRAMMAR.to_string(),
},
})
}
fn code_mode_tool_definition_for_spec(spec: &ToolSpec) -> Option<CodeModeToolDefinition> {
match spec {
ToolSpec::Function(tool) => Some(CodeModeToolDefinition {

View File

@@ -1,6 +1,4 @@
use super::augment_tool_spec_for_code_mode;
use super::create_code_mode_tool;
use super::create_wait_tool;
use super::tool_spec_to_code_mode_tool_definition;
use crate::AdditionalProperties;
use crate::FreeformTool;
@@ -123,89 +121,3 @@ fn tool_spec_to_code_mode_tool_definition_skips_unsupported_variants() {
None
);
}
#[test]
fn create_wait_tool_matches_expected_spec() {
assert_eq!(
create_wait_tool(),
ToolSpec::Function(ResponsesApiTool {
name: codex_code_mode::WAIT_TOOL_NAME.to_string(),
description: format!(
"Waits on a yielded `{}` cell and returns new output or completion.\n{}",
codex_code_mode::PUBLIC_TOOL_NAME,
codex_code_mode::build_wait_tool_description().trim()
),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"cell_id".to_string(),
JsonSchema::String {
description: Some("Identifier of the running exec cell.".to_string()),
},
),
(
"max_tokens".to_string(),
JsonSchema::Number {
description: Some(
"Maximum number of output tokens to return for this wait call."
.to_string(),
),
},
),
(
"terminate".to_string(),
JsonSchema::Boolean {
description: Some(
"Whether to terminate the running exec cell.".to_string(),
),
},
),
(
"yield_time_ms".to_string(),
JsonSchema::Number {
description: Some(
"How long to wait (in milliseconds) for more output before yielding again."
.to_string(),
),
},
),
]),
required: Some(vec!["cell_id".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}
#[test]
fn create_code_mode_tool_matches_expected_spec() {
let enabled_tools = vec![("update_plan".to_string(), "Update the plan".to_string())];
assert_eq!(
create_code_mode_tool(&enabled_tools, /*code_mode_only_enabled*/ true),
ToolSpec::Freeform(FreeformTool {
name: codex_code_mode::PUBLIC_TOOL_NAME.to_string(),
description: codex_code_mode::build_exec_tool_description(
&enabled_tools,
/*code_mode_only*/ true
),
format: FreeformToolFormat {
r#type: "grammar".to_string(),
syntax: "lark".to_string(),
definition: r#"
start: pragma_source | plain_source
pragma_source: PRAGMA_LINE NEWLINE SOURCE
plain_source: SOURCE
PRAGMA_LINE: /[ \t]*\/\/ @exec:[^\r\n]*/
NEWLINE: /\r?\n/
SOURCE: /[\s\S]+/
"#
.to_string(),
},
})
);
}

View File

@@ -1,59 +0,0 @@
use crate::FreeformTool;
use crate::FreeformToolFormat;
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use std::collections::BTreeMap;
pub fn create_js_repl_tool() -> ToolSpec {
// Keep JS input freeform, but block the most common malformed payload shapes
// (JSON wrappers, quoted strings, and markdown fences) before they reach the
// runtime `reject_json_or_quoted_source` validation. The API's regex engine
// does not support look-around, so this uses a "first significant token"
// pattern rather than negative lookaheads.
const JS_REPL_FREEFORM_GRAMMAR: &str = r#"
start: pragma_source | plain_source
pragma_source: PRAGMA_LINE NEWLINE js_source
plain_source: PLAIN_JS_SOURCE
js_source: JS_SOURCE
PRAGMA_LINE: /[ \t]*\/\/ codex-js-repl:[^\r\n]*/
NEWLINE: /\r?\n/
PLAIN_JS_SOURCE: /(?:\s*)(?:[^\s{\"`]|`[^`]|``[^`])[\s\S]*/
JS_SOURCE: /(?:\s*)(?:[^\s{\"`]|`[^`]|``[^`])[\s\S]*/
"#;
ToolSpec::Freeform(FreeformTool {
name: "js_repl".to_string(),
description: "Runs JavaScript in a persistent Node kernel with top-level await. This is a freeform tool: send raw JavaScript source text, optionally with a first-line pragma like `// codex-js-repl: timeout_ms=15000`; do not send JSON/quotes/markdown fences."
.to_string(),
format: FreeformToolFormat {
r#type: "grammar".to_string(),
syntax: "lark".to_string(),
definition: JS_REPL_FREEFORM_GRAMMAR.to_string(),
},
})
}
pub fn create_js_repl_reset_tool() -> ToolSpec {
ToolSpec::Function(ResponsesApiTool {
name: "js_repl_reset".to_string(),
description:
"Restarts the js_repl kernel for this run and clears persisted top-level bindings."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::new(),
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
#[cfg(test)]
#[path = "js_repl_tool_tests.rs"]
mod tests;

View File

@@ -1,40 +0,0 @@
use super::*;
use crate::ToolSpec;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
#[test]
fn js_repl_tool_uses_expected_freeform_grammar() {
let ToolSpec::Freeform(FreeformTool { format, .. }) = create_js_repl_tool() else {
panic!("js_repl should use a freeform tool spec");
};
assert_eq!(format.syntax, "lark");
assert!(format.definition.contains("PRAGMA_LINE"));
assert!(format.definition.contains("`[^`]"));
assert!(format.definition.contains("``[^`]"));
assert!(format.definition.contains("PLAIN_JS_SOURCE"));
assert!(format.definition.contains("codex-js-repl:"));
assert!(!format.definition.contains("(?!"));
}
#[test]
fn js_repl_reset_tool_matches_expected_spec() {
assert_eq!(
create_js_repl_reset_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "js_repl_reset".to_string(),
description:
"Restarts the js_repl kernel for this run and clears persisted top-level bindings."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::new(),
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}

View File

@@ -1,44 +1,19 @@
//! Shared tool definitions and Responses API tool primitives that can live
//! outside `codex-core`.
mod agent_job_tool;
mod agent_tool;
mod code_mode;
mod dynamic_tool;
mod js_repl_tool;
mod json_schema;
mod local_tool;
mod mcp_resource_tool;
mod mcp_tool;
mod request_user_input_tool;
mod responses_api;
mod tool_definition;
mod tool_spec;
mod utility_tool;
mod view_image;
pub use agent_job_tool::create_report_agent_job_result_tool;
pub use agent_job_tool::create_spawn_agents_on_csv_tool;
pub use agent_tool::SpawnAgentToolOptions;
pub use agent_tool::WaitAgentTimeoutOptions;
pub use agent_tool::create_assign_task_tool;
pub use agent_tool::create_close_agent_tool_v1;
pub use agent_tool::create_close_agent_tool_v2;
pub use agent_tool::create_list_agents_tool;
pub use agent_tool::create_resume_agent_tool;
pub use agent_tool::create_send_input_tool_v1;
pub use agent_tool::create_send_message_tool;
pub use agent_tool::create_spawn_agent_tool_v1;
pub use agent_tool::create_spawn_agent_tool_v2;
pub use agent_tool::create_wait_agent_tool_v1;
pub use agent_tool::create_wait_agent_tool_v2;
pub use code_mode::augment_tool_spec_for_code_mode;
pub use code_mode::create_code_mode_tool;
pub use code_mode::create_wait_tool;
pub use code_mode::tool_spec_to_code_mode_tool_definition;
pub use dynamic_tool::parse_dynamic_tool;
pub use js_repl_tool::create_js_repl_reset_tool;
pub use js_repl_tool::create_js_repl_tool;
pub use json_schema::AdditionalProperties;
pub use json_schema::JsonSchema;
pub use json_schema::parse_tool_input_schema;
@@ -49,12 +24,8 @@ pub use local_tool::create_request_permissions_tool;
pub use local_tool::create_shell_command_tool;
pub use local_tool::create_shell_tool;
pub use local_tool::create_write_stdin_tool;
pub use mcp_resource_tool::create_list_mcp_resource_templates_tool;
pub use mcp_resource_tool::create_list_mcp_resources_tool;
pub use mcp_resource_tool::create_read_mcp_resource_tool;
pub use mcp_tool::mcp_call_tool_result_output_schema;
pub use mcp_tool::parse_mcp_tool;
pub use request_user_input_tool::create_request_user_input_tool;
pub use responses_api::FreeformTool;
pub use responses_api::FreeformToolFormat;
pub use responses_api::ResponsesApiNamespace;
@@ -71,7 +42,5 @@ pub use tool_spec::ResponsesApiWebSearchFilters;
pub use tool_spec::ResponsesApiWebSearchUserLocation;
pub use tool_spec::ToolSpec;
pub use tool_spec::create_tools_json_for_responses_api;
pub use utility_tool::create_list_dir_tool;
pub use utility_tool::create_test_sync_tool;
pub use view_image::ViewImageToolOptions;
pub use view_image::create_view_image_tool;

View File

@@ -1,118 +0,0 @@
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use std::collections::BTreeMap;
pub fn create_list_mcp_resources_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"server".to_string(),
JsonSchema::String {
description: Some(
"Optional MCP server name. When omitted, lists resources from every configured server."
.to_string(),
),
},
),
(
"cursor".to_string(),
JsonSchema::String {
description: Some(
"Opaque cursor returned by a previous list_mcp_resources call for the same server."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "list_mcp_resources".to_string(),
description: "Lists resources provided by MCP servers. Resources allow servers to share data that provides context to language models, such as files, database schemas, or application-specific information. Prefer resources over web search when possible.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
pub fn create_list_mcp_resource_templates_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"server".to_string(),
JsonSchema::String {
description: Some(
"Optional MCP server name. When omitted, lists resource templates from all configured servers."
.to_string(),
),
},
),
(
"cursor".to_string(),
JsonSchema::String {
description: Some(
"Opaque cursor returned by a previous list_mcp_resource_templates call for the same server."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "list_mcp_resource_templates".to_string(),
description: "Lists resource templates provided by MCP servers. Parameterized resource templates allow servers to share data that takes parameters and provides context to language models, such as files, database schemas, or application-specific information. Prefer resource templates over web search when possible.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
pub fn create_read_mcp_resource_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"server".to_string(),
JsonSchema::String {
description: Some(
"MCP server name exactly as configured. Must match the 'server' field returned by list_mcp_resources."
.to_string(),
),
},
),
(
"uri".to_string(),
JsonSchema::String {
description: Some(
"Resource URI to read. Must be one of the URIs returned by list_mcp_resources."
.to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "read_mcp_resource".to_string(),
description:
"Read a specific resource from an MCP server given the server name and resource URI."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["server".to_string(), "uri".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
#[cfg(test)]
#[path = "mcp_resource_tool_tests.rs"]
mod tests;

View File

@@ -1,119 +0,0 @@
use super::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
#[test]
fn list_mcp_resources_tool_matches_expected_spec() {
assert_eq!(
create_list_mcp_resources_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "list_mcp_resources".to_string(),
description: "Lists resources provided by MCP servers. Resources allow servers to share data that provides context to language models, such as files, database schemas, or application-specific information. Prefer resources over web search when possible.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"server".to_string(),
JsonSchema::String {
description: Some(
"Optional MCP server name. When omitted, lists resources from every configured server."
.to_string(),
),
},
),
(
"cursor".to_string(),
JsonSchema::String {
description: Some(
"Opaque cursor returned by a previous list_mcp_resources call for the same server."
.to_string(),
),
},
),
]),
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}
#[test]
fn list_mcp_resource_templates_tool_matches_expected_spec() {
assert_eq!(
create_list_mcp_resource_templates_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "list_mcp_resource_templates".to_string(),
description: "Lists resource templates provided by MCP servers. Parameterized resource templates allow servers to share data that takes parameters and provides context to language models, such as files, database schemas, or application-specific information. Prefer resource templates over web search when possible.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"server".to_string(),
JsonSchema::String {
description: Some(
"Optional MCP server name. When omitted, lists resource templates from all configured servers."
.to_string(),
),
},
),
(
"cursor".to_string(),
JsonSchema::String {
description: Some(
"Opaque cursor returned by a previous list_mcp_resource_templates call for the same server."
.to_string(),
),
},
),
]),
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}
#[test]
fn read_mcp_resource_tool_matches_expected_spec() {
assert_eq!(
create_read_mcp_resource_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "read_mcp_resource".to_string(),
description:
"Read a specific resource from an MCP server given the server name and resource URI."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"server".to_string(),
JsonSchema::String {
description: Some(
"MCP server name exactly as configured. Must match the 'server' field returned by list_mcp_resources."
.to_string(),
),
},
),
(
"uri".to_string(),
JsonSchema::String {
description: Some(
"Resource URI to read. Must be one of the URIs returned by list_mcp_resources."
.to_string(),
),
},
),
]),
required: Some(vec!["server".to_string(), "uri".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}

View File

@@ -1,94 +0,0 @@
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use std::collections::BTreeMap;
pub fn create_request_user_input_tool(description: String) -> ToolSpec {
let option_props = BTreeMap::from([
(
"label".to_string(),
JsonSchema::String {
description: Some("User-facing label (1-5 words).".to_string()),
},
),
(
"description".to_string(),
JsonSchema::String {
description: Some(
"One short sentence explaining impact/tradeoff if selected.".to_string(),
),
},
),
]);
let options_schema = JsonSchema::Array {
description: Some(
"Provide 2-3 mutually exclusive choices. Put the recommended option first and suffix its label with \"(Recommended)\". Do not include an \"Other\" option in this list; the client will add a free-form \"Other\" option automatically."
.to_string(),
),
items: Box::new(JsonSchema::Object {
properties: option_props,
required: Some(vec!["label".to_string(), "description".to_string()]),
additional_properties: Some(false.into()),
}),
};
let question_props = BTreeMap::from([
(
"id".to_string(),
JsonSchema::String {
description: Some(
"Stable identifier for mapping answers (snake_case).".to_string(),
),
},
),
(
"header".to_string(),
JsonSchema::String {
description: Some(
"Short header label shown in the UI (12 or fewer chars).".to_string(),
),
},
),
(
"question".to_string(),
JsonSchema::String {
description: Some("Single-sentence prompt shown to the user.".to_string()),
},
),
("options".to_string(), options_schema),
]);
let questions_schema = JsonSchema::Array {
description: Some("Questions to show the user. Prefer 1 and do not exceed 3".to_string()),
items: Box::new(JsonSchema::Object {
properties: question_props,
required: Some(vec![
"id".to_string(),
"header".to_string(),
"question".to_string(),
"options".to_string(),
]),
additional_properties: Some(false.into()),
}),
};
let properties = BTreeMap::from([("questions".to_string(), questions_schema)]);
ToolSpec::Function(ResponsesApiTool {
name: "request_user_input".to_string(),
description,
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["questions".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
#[cfg(test)]
#[path = "request_user_input_tool_tests.rs"]
mod tests;

View File

@@ -1,102 +0,0 @@
use super::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
#[test]
fn request_user_input_tool_includes_questions_schema() {
assert_eq!(
create_request_user_input_tool("Ask the user to choose.".to_string()),
ToolSpec::Function(ResponsesApiTool {
name: "request_user_input".to_string(),
description: "Ask the user to choose.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([(
"questions".to_string(),
JsonSchema::Array {
description: Some(
"Questions to show the user. Prefer 1 and do not exceed 3".to_string(),
),
items: Box::new(JsonSchema::Object {
properties: BTreeMap::from([
(
"header".to_string(),
JsonSchema::String {
description: Some(
"Short header label shown in the UI (12 or fewer chars)."
.to_string(),
),
},
),
(
"id".to_string(),
JsonSchema::String {
description: Some(
"Stable identifier for mapping answers (snake_case)."
.to_string(),
),
},
),
(
"options".to_string(),
JsonSchema::Array {
description: Some(
"Provide 2-3 mutually exclusive choices. Put the recommended option first and suffix its label with \"(Recommended)\". Do not include an \"Other\" option in this list; the client will add a free-form \"Other\" option automatically."
.to_string(),
),
items: Box::new(JsonSchema::Object {
properties: BTreeMap::from([
(
"description".to_string(),
JsonSchema::String {
description: Some(
"One short sentence explaining impact/tradeoff if selected."
.to_string(),
),
},
),
(
"label".to_string(),
JsonSchema::String {
description: Some(
"User-facing label (1-5 words)."
.to_string(),
),
},
),
]),
required: Some(vec![
"label".to_string(),
"description".to_string(),
]),
additional_properties: Some(false.into()),
}),
},
),
(
"question".to_string(),
JsonSchema::String {
description: Some(
"Single-sentence prompt shown to the user.".to_string(),
),
},
),
]),
required: Some(vec![
"id".to_string(),
"header".to_string(),
"question".to_string(),
"options".to_string(),
]),
additional_properties: Some(false.into()),
}),
},
)]),
required: Some(vec!["questions".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}

View File

@@ -1,125 +0,0 @@
use crate::JsonSchema;
use crate::ResponsesApiTool;
use crate::ToolSpec;
use std::collections::BTreeMap;
pub fn create_list_dir_tool() -> ToolSpec {
let properties = BTreeMap::from([
(
"dir_path".to_string(),
JsonSchema::String {
description: Some("Absolute path to the directory to list.".to_string()),
},
),
(
"offset".to_string(),
JsonSchema::Number {
description: Some(
"The entry number to start listing from. Must be 1 or greater.".to_string(),
),
},
),
(
"limit".to_string(),
JsonSchema::Number {
description: Some("The maximum number of entries to return.".to_string()),
},
),
(
"depth".to_string(),
JsonSchema::Number {
description: Some(
"The maximum directory depth to traverse. Must be 1 or greater.".to_string(),
),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "list_dir".to_string(),
description:
"Lists entries in a local directory with 1-indexed entry numbers and simple type labels."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["dir_path".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
pub fn create_test_sync_tool() -> ToolSpec {
let barrier_properties = BTreeMap::from([
(
"id".to_string(),
JsonSchema::String {
description: Some(
"Identifier shared by concurrent calls that should rendezvous".to_string(),
),
},
),
(
"participants".to_string(),
JsonSchema::Number {
description: Some(
"Number of tool calls that must arrive before the barrier opens".to_string(),
),
},
),
(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some(
"Maximum time in milliseconds to wait at the barrier".to_string(),
),
},
),
]);
let properties = BTreeMap::from([
(
"sleep_before_ms".to_string(),
JsonSchema::Number {
description: Some(
"Optional delay in milliseconds before any other action".to_string(),
),
},
),
(
"sleep_after_ms".to_string(),
JsonSchema::Number {
description: Some(
"Optional delay in milliseconds after completing the barrier".to_string(),
),
},
),
(
"barrier".to_string(),
JsonSchema::Object {
properties: barrier_properties,
required: Some(vec!["id".to_string(), "participants".to_string()]),
additional_properties: Some(false.into()),
},
),
]);
ToolSpec::Function(ResponsesApiTool {
name: "test_sync_tool".to_string(),
description: "Internal synchronization helper used by Codex integration tests.".to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
}
#[cfg(test)]
#[path = "utility_tool_tests.rs"]
mod tests;

View File

@@ -1,137 +0,0 @@
use super::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
#[test]
fn list_dir_tool_matches_expected_spec() {
assert_eq!(
create_list_dir_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "list_dir".to_string(),
description:
"Lists entries in a local directory with 1-indexed entry numbers and simple type labels."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"depth".to_string(),
JsonSchema::Number {
description: Some(
"The maximum directory depth to traverse. Must be 1 or greater."
.to_string(),
),
},
),
(
"dir_path".to_string(),
JsonSchema::String {
description: Some(
"Absolute path to the directory to list.".to_string(),
),
},
),
(
"limit".to_string(),
JsonSchema::Number {
description: Some(
"The maximum number of entries to return.".to_string(),
),
},
),
(
"offset".to_string(),
JsonSchema::Number {
description: Some(
"The entry number to start listing from. Must be 1 or greater."
.to_string(),
),
},
),
]),
required: Some(vec!["dir_path".to_string()]),
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}
#[test]
fn test_sync_tool_matches_expected_spec() {
assert_eq!(
create_test_sync_tool(),
ToolSpec::Function(ResponsesApiTool {
name: "test_sync_tool".to_string(),
description: "Internal synchronization helper used by Codex integration tests."
.to_string(),
strict: false,
defer_loading: None,
parameters: JsonSchema::Object {
properties: BTreeMap::from([
(
"barrier".to_string(),
JsonSchema::Object {
properties: BTreeMap::from([
(
"id".to_string(),
JsonSchema::String {
description: Some(
"Identifier shared by concurrent calls that should rendezvous"
.to_string(),
),
},
),
(
"participants".to_string(),
JsonSchema::Number {
description: Some(
"Number of tool calls that must arrive before the barrier opens"
.to_string(),
),
},
),
(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some(
"Maximum time in milliseconds to wait at the barrier"
.to_string(),
),
},
),
]),
required: Some(vec![
"id".to_string(),
"participants".to_string(),
]),
additional_properties: Some(false.into()),
},
),
(
"sleep_after_ms".to_string(),
JsonSchema::Number {
description: Some(
"Optional delay in milliseconds after completing the barrier"
.to_string(),
),
},
),
(
"sleep_before_ms".to_string(),
JsonSchema::Number {
description: Some(
"Optional delay in milliseconds before any other action"
.to_string(),
),
},
),
]),
required: None,
additional_properties: Some(false.into()),
},
output_schema: None,
})
);
}

View File

@@ -1,7 +1,14 @@
exports_files([
"abseil_windows_gnullvm_thread_identity.patch",
"aws-lc-sys_memcmp_check.patch",
"aws-lc-sys_windows_msvc_prebuilt_nasm.patch",
"aws-lc-sys_windows_msvc_memcmp_probe.patch",
"llvm_windows_symlink_extract.patch",
"rules_rust_windows_bootstrap_process_wrapper_linker.patch",
"rules_rust_windows_exec_bin_target.patch",
"rules_rust_windows_exec_std.patch",
"rules_rust_repository_set_exec_constraints.patch",
"rules_rust_windows_msvc_direct_link_args.patch",
"rules_rust_windows_gnullvm_build_script.patch",
"rules_rs_windows_gnullvm_exec.patch",
"rusty_v8_prebuilt_out_dir.patch",

View File

@@ -0,0 +1,38 @@
diff --git a/builder/cc_builder.rs b/builder/cc_builder.rs
--- a/builder/cc_builder.rs
+++ b/builder/cc_builder.rs
@@ -667,12 +667,24 @@
if cargo_env("HOST") != target() {
return;
}
+
+ let bazel_execroot = Self::bazel_execroot(self.manifest_dir.as_path());
+ if bazel_execroot.is_some() && target().ends_with("windows-msvc") {
+ // This probe guards a GCC memcmp miscompile. Under Bazel's
+ // hermetic Windows/MSVC build-script toolchain we drive clang with
+ // MinGW-flavored CFLAGS, which is good enough for compiling
+ // aws-lc but not for linking and executing this standalone test
+ // binary. Skip the probe in that configuration instead of failing
+ // the whole build script.
+ emit_warning("Skipping memcmp probe for Bazel windows-msvc build scripts.");
+ return;
+ }
let basename = "memcmp_invalid_stripped_check";
let exec_path = out_dir().join(basename);
let memcmp_build = cc::Build::default();
let memcmp_compiler = memcmp_build.get_compiler();
if !memcmp_compiler.is_like_clang() && !memcmp_compiler.is_like_gnu() {
// The logic below assumes a Clang or GCC compiler is in use
return;
}
@@ -701,7 +713,7 @@
memcmp_compile_args.push(flag.into());
}
}
- if let Some(execroot) = Self::bazel_execroot(self.manifest_dir.as_path()) {
+ if let Some(execroot) = bazel_execroot {
// In Bazel build-script sandboxes, `cc` can pass `bazel-out/...` args
// relative to the execroot while the process runs from elsewhere.
// Normalize those args to absolute paths so this check can still link.

View File

@@ -0,0 +1,35 @@
diff --git a/builder/main.rs b/builder/main.rs
--- a/builder/main.rs
+++ b/builder/main.rs
@@ -721,16 +721,29 @@
fn get_crate_cflags() -> Option<String> {
optional_env_optional_crate_target("TARGET_CFLAGS")
.or(optional_env_optional_crate_target("CFLAGS"))
+}
+
+fn is_bazel_windows_msvc_build_script() -> bool {
+ if !target().ends_with("windows-msvc") {
+ return false;
+ }
+
+ let cargo_manifest_dir = cargo_env("CARGO_MANIFEST_DIR");
+ let manifest_dir = Path::new(&cargo_manifest_dir);
+ manifest_dir
+ .components()
+ .any(|component| component.as_os_str() == "bazel-out")
}
fn use_prebuilt_nasm() -> bool {
+ let use_prebuilt_for_bazel_windows_msvc = is_bazel_windows_msvc_build_script();
target_os() == "windows"
&& target_arch() == "x86_64"
&& !is_no_asm()
- && !test_nasm_command() // NASM not found in environment
&& Some(false) != allow_prebuilt_nasm() // not prevented by environment
&& !is_disable_prebuilt_nasm() // not prevented by feature
// permitted by environment or by feature
+ && (use_prebuilt_for_bazel_windows_msvc || !test_nasm_command())
&& (Some(true) == allow_prebuilt_nasm() || is_prebuilt_nasm())
}
fn allow_prebuilt_nasm() -> Option<bool> {

View File

@@ -0,0 +1,26 @@
# What: let `rules_rust` repository_set entries specify an explicit exec-platform
# constraint set.
# Why: codex needs Windows nightly lint toolchains to run helper binaries on an
# MSVC exec platform while still targeting `windows-gnullvm` crates.
diff --git a/rust/extensions.bzl b/rust/extensions.bzl
--- a/rust/extensions.bzl
+++ b/rust/extensions.bzl
@@ -52,6 +52,7 @@ def _rust_impl(module_ctx):
"allocator_library": repository_set.allocator_library,
"dev_components": repository_set.dev_components,
"edition": repository_set.edition,
+ "exec_compatible_with": [str(v) for v in repository_set.exec_compatible_with] if repository_set.exec_compatible_with else None,
"exec_triple": repository_set.exec_triple,
"extra_target_triples": {repository_set.target_triple: [str(v) for v in repository_set.target_compatible_with]},
"name": repository_set.name,
@@ -166,6 +167,9 @@ _COMMON_TAG_KWARGS = {
_RUST_REPOSITORY_SET_TAG_ATTRS = {
+ "exec_compatible_with": attr.label_list(
+ doc = "Execution platform constraints for this repository_set.",
+ ),
"exec_triple": attr.string(
doc = "Exec triple for this repository_set.",
),
"name": attr.string(

View File

@@ -0,0 +1,23 @@
--- a/rust/private/rustc.bzl
+++ b/rust/private/rustc.bzl
@@ -472,7 +472,19 @@
)
ld_is_direct_driver = False
- if not ld or toolchain.linker_preference == "rust":
+ # The bootstrap process wrapper is built without the normal rules_rust
+ # process wrapper. On Windows nightly toolchains that expose rust-lld, the
+ # C++ toolchain path currently resolves to clang++ while still emitting
+ # MSVC-style arguments, so prefer rust-lld for this one bootstrap binary
+ # instead of switching all Rust actions over.
+ use_bootstrap_rust_linker = (
+ toolchain.target_os.startswith("windows") and
+ toolchain.linker != None and
+ hasattr(ctx.executable, "_bootstrap_process_wrapper") and
+ not ctx.executable._process_wrapper
+ )
+
+ if not ld or toolchain.linker_preference == "rust" or use_bootstrap_rust_linker:
ld = toolchain.linker.path
ld_is_direct_driver = toolchain.linker_type == "direct"

View File

@@ -0,0 +1,71 @@
# What: compile exec-side Rust binaries against the exec Windows triple instead
# of the lint target triple.
# Why: Windows native argument-comment-lint keeps the repo target platform on
# `windows-gnullvm` to preserve cfg coverage, but exec-side helper binaries
# (build.rs, runners, bootstrap tools) must link as host tools. With
# `toolchain_linker_preference=rust`, rules_rust was still feeding those exec
# binaries the `windows-gnullvm` target/std path, which broke linking under the
# native Bazel lint lane.
diff --git a/rust/private/rustc.bzl b/rust/private/rustc.bzl
--- a/rust/private/rustc.bzl
+++ b/rust/private/rustc.bzl
@@ -129,6 +129,20 @@
build_setting = config.bool(flag = True),
)
-def _get_rustc_env(attr, toolchain, crate_name):
+def _effective_target_arch(toolchain, use_exec_target):
+ return toolchain.exec_triple.arch if use_exec_target else toolchain.target_arch
+
+def _effective_target_os(toolchain, use_exec_target):
+ return toolchain.exec_triple.system if use_exec_target else toolchain.target_os
+
+def _effective_target_flag_value(toolchain, use_exec_target):
+ return toolchain.exec_triple.str if use_exec_target else toolchain.target_flag_value
+
+def _effective_rust_std_paths(toolchain, use_exec_target):
+ if use_exec_target:
+ return ["{}/lib/rustlib/{}/lib".format(toolchain.sysroot, toolchain.exec_triple.str)]
+ return toolchain.rust_std_paths
+
+def _get_rustc_env(attr, toolchain, crate_name, use_exec_target = False):
"""Gathers rustc environment variables
@@ -147,6 +161,6 @@
result = {
- "CARGO_CFG_TARGET_ARCH": "" if toolchain.target_arch == None else toolchain.target_arch,
- "CARGO_CFG_TARGET_OS": "" if toolchain.target_os == None else toolchain.target_os,
+ "CARGO_CFG_TARGET_ARCH": "" if _effective_target_arch(toolchain, use_exec_target) == None else _effective_target_arch(toolchain, use_exec_target),
+ "CARGO_CFG_TARGET_OS": "" if _effective_target_os(toolchain, use_exec_target) == None else _effective_target_os(toolchain, use_exec_target),
"CARGO_CRATE_NAME": crate_name,
"CARGO_PKG_AUTHORS": "",
@@ -997,9 +1011,11 @@
if build_metadata and not use_json_output:
fail("build_metadata requires parse_json_output")
+ use_exec_target = is_exec_configuration(ctx) and crate_info.type == "bin"
+
output_dir = getattr(crate_info.output, "dirname", None)
linker_script = getattr(file, "linker_script", None)
- env = _get_rustc_env(attr, toolchain, crate_info.name)
+ env = _get_rustc_env(attr, toolchain, crate_info.name, use_exec_target)
# Wrapper args first
@@ -1138,5 +1154,5 @@
if error_format != "json":
# Color is not compatible with json output.
rustc_flags.add("--color=always")
- rustc_flags.add(toolchain.target_flag_value, format = "--target=%s")
+ rustc_flags.add(_effective_target_flag_value(toolchain, use_exec_target), format = "--target=%s")
if hasattr(attr, "crate_features"):
@@ -1144,6 +1160,6 @@
if linker_script:
rustc_flags.add(linker_script, format = "--codegen=link-arg=-T%s")
# Tell Rustc where to find the standard library (or libcore)
- rustc_flags.add_all(toolchain.rust_std_paths, before_each = "-L", format_each = "%s")
+ rustc_flags.add_all(_effective_rust_std_paths(toolchain, use_exec_target), before_each = "-L", format_each = "%s")
rustc_flags.add_all(rust_flags, map_each = map_flag)

View File

@@ -0,0 +1,111 @@
diff --git a/cargo/private/cargo_build_script.bzl b/cargo/private/cargo_build_script.bzl
--- a/cargo/private/cargo_build_script.bzl
+++ b/cargo/private/cargo_build_script.bzl
@@ -142,40 +142,82 @@ def _strip_stack_protector_for_windows_llvm_mingw(toolchain, args):
def _rewrite_windows_exec_msvc_cc_args(toolchain, args):
"""Translate GNU-flavored cc args when exec-side build scripts target Windows MSVC."""
if toolchain.target_flag_value != toolchain.exec_triple.str or not toolchain.exec_triple.str.endswith("-pc-windows-msvc"):
return args
- rewritten = []
- skip_next = False
- for arg in args:
- if skip_next:
- skip_next = False
- continue
+ rewritten = [
+ "-target",
+ toolchain.target_flag_value,
+ ]
+ skip_next = False
+ for index in range(len(args)):
+ arg = args[index]
+
+ if skip_next:
+ skip_next = False
+ continue
if arg == "-target":
- skip_next = True
+ skip_next = True
continue
if arg.startswith("-target=") or arg.startswith("--target="):
continue
if arg == "-nostdlibinc" or arg.startswith("--sysroot"):
continue
- if "mingw-w64-" in arg or "mingw_import_libraries_directory" in arg or "mingw_crt_library_search_directory" in arg:
+ if arg.startswith("-fstack-protector") or arg.startswith("-D_FORTIFY_SOURCE="):
continue
- if arg.startswith("-fstack-protector"):
- continue
-
- if arg.startswith("-D_FORTIFY_SOURCE="):
- continue
+ if arg == "-isystem" and index + 1 < len(args):
+ path = args[index + 1]
+ if "mingw-w64-" in path or "mingw_import_libraries_directory" in path or "mingw_crt_library_search_directory" in path:
+ skip_next = True
+ continue
rewritten.append(arg)
- return [
- "-target",
- toolchain.target_flag_value,
- ] + rewritten
+ return rewritten
+
+def _rewrite_windows_exec_msvc_link_args(toolchain, args):
+ """Translate GNU-flavored link args when exec-side build scripts target Windows MSVC."""
+ if toolchain.target_flag_value != toolchain.exec_triple.str or not toolchain.exec_triple.str.endswith("-pc-windows-msvc"):
+ return args
+
+ rewritten = []
+ skip_next = False
+ for index in range(len(args)):
+ arg = args[index]
+
+ if skip_next:
+ skip_next = False
+ continue
+
+ if arg == "--sysroot":
+ skip_next = True
+ continue
+
+ if arg.startswith("--sysroot="):
+ continue
+
+ if arg == "-L" and index + 1 < len(args):
+ path = args[index + 1]
+ if "mingw_import_libraries_directory" in path or "mingw_crt_library_search_directory" in path:
+ skip_next = True
+ continue
+ rewritten.extend([arg, path])
+ skip_next = True
+ continue
+
+ if arg.startswith("-L") and (
+ "mingw_import_libraries_directory" in arg or
+ "mingw_crt_library_search_directory" in arg
+ ):
+ continue
+
+ rewritten.append(arg)
+
+ return rewritten
def get_cc_compile_args_and_env(cc_toolchain, feature_configuration):
"""Gather cc environment variables from the given `cc_toolchain`
@@ -509,6 +550,7 @@ def _construct_build_script_env(
linker, _, link_args, linker_env = get_linker_and_args(ctx, "bin", toolchain, cc_toolchain, feature_configuration, None)
env.update(**linker_env)
env["LD"] = linker
+ link_args = _rewrite_windows_exec_msvc_link_args(toolchain, link_args)
env["LDFLAGS"] = " ".join(_pwd_flags(link_args))
# Defaults for cxx flags.

View File

@@ -0,0 +1,11 @@
diff --git a/rust/private/repository_utils.bzl b/rust/private/repository_utils.bzl
--- a/rust/private/repository_utils.bzl
+++ b/rust/private/repository_utils.bzl
@@ -53,6 +53,7 @@ filegroup(
"lib/*{dylib_ext}*",
"lib/rustlib/{target_triple}/codegen-backends/*{dylib_ext}",
"lib/rustlib/{target_triple}/lib/*{dylib_ext}*",
+ "lib/rustlib/{target_triple}/lib/*.rlib",
"lib/rustlib/{target_triple}/lib/*.rmeta",
],
allow_empty = True,

View File

@@ -0,0 +1,181 @@
# What: expose an exec-side Rust standard library alongside the target stdlib.
# Why: mixed Windows toolchains compile repo crates for `windows-gnullvm`, but
# exec-side helper binaries (build.rs, runners) may need the host MSVC stdlib.
# The toolchain sysroot must therefore carry both stdlib trees so rustc can
# resolve the correct one for each `--target`.
diff --git a/rust/toolchain.bzl b/rust/toolchain.bzl
--- a/rust/toolchain.bzl
+++ b/rust/toolchain.bzl
@@ -209,6 +209,7 @@ def _generate_sysroot(
clippy = None,
cargo_clippy = None,
llvm_tools = None,
+ exec_rust_std = None,
rust_std = None,
rustfmt = None,
linker = None):
@@ -312,7 +313,15 @@ def _generate_sysroot(
# Made available to support $(location) expansion in stdlib_linkflags and extra_rustc_flags.
transitive_file_sets.append(depset(ctx.files.rust_std))
+
+ sysroot_exec_rust_std = None
+ if exec_rust_std:
+ sysroot_exec_rust_std = _symlink_sysroot_tree(ctx, name, exec_rust_std)
+ transitive_file_sets.extend([sysroot_exec_rust_std])
+ # Made available to support $(location) expansion in extra_exec_rustc_flags.
+ transitive_file_sets.append(depset(ctx.files.exec_rust_std))
+
# Declare a file in the root of the sysroot to make locating the sysroot easy
sysroot_anchor = ctx.actions.declare_file("{}/rust.sysroot".format(name))
ctx.actions.write(
@@ -323,6 +332,7 @@ def _generate_sysroot(
"cargo-clippy: {}".format(cargo_clippy),
"linker: {}".format(linker),
"llvm_tools: {}".format(llvm_tools),
+ "exec_rust_std: {}".format(exec_rust_std),
"rust_std: {}".format(rust_std),
"rustc_lib: {}".format(rustc_lib),
"rustc: {}".format(rustc),
@@ -340,6 +350,7 @@ def _generate_sysroot(
cargo_clippy = sysroot_cargo_clippy,
clippy = sysroot_clippy,
linker = sysroot_linker,
+ exec_rust_std = sysroot_exec_rust_std,
rust_std = sysroot_rust_std,
rustc = sysroot_rustc,
rustc_lib = sysroot_rustc_lib,
@@ -410,12 +421,14 @@ def _rust_toolchain_impl(ctx):
)
rust_std = ctx.attr.rust_std
+ exec_rust_std = ctx.attr.exec_rust_std if ctx.attr.exec_rust_std else rust_std
sysroot = _generate_sysroot(
ctx = ctx,
rustc = ctx.file.rustc,
rustdoc = ctx.file.rust_doc,
rustc_lib = ctx.attr.rustc_lib,
+ exec_rust_std = exec_rust_std,
rust_std = rust_std,
rustfmt = ctx.file.rustfmt,
clippy = ctx.file.clippy_driver,
@@ -452,7 +465,7 @@ def _rust_toolchain_impl(ctx):
expanded_stdlib_linkflags = _expand_flags(ctx, "stdlib_linkflags", rust_std[rust_common.stdlib_info].srcs, make_variables)
expanded_extra_rustc_flags = _expand_flags(ctx, "extra_rustc_flags", rust_std[rust_common.stdlib_info].srcs, make_variables)
- expanded_extra_exec_rustc_flags = _expand_flags(ctx, "extra_exec_rustc_flags", rust_std[rust_common.stdlib_info].srcs, make_variables)
+ expanded_extra_exec_rustc_flags = _expand_flags(ctx, "extra_exec_rustc_flags", exec_rust_std[rust_common.stdlib_info].srcs, make_variables)
linking_context = cc_common.create_linking_context(
linker_inputs = depset([
@@ -793,6 +806,10 @@ rust_toolchain = rule(
doc = "The Rust standard library.",
mandatory = True,
),
+ "exec_rust_std": attr.label(
+ doc = "Optional Rust standard library for exec-configuration Rust tools. Defaults to rust_std.",
+ mandatory = False,
+ ),
"rustc": attr.label(
doc = "The location of the `rustc` binary. Can be a direct source or a filegroup containing one item.",
allow_single_file = True,
diff --git a/rust/private/repository_utils.bzl b/rust/private/repository_utils.bzl
--- a/rust/private/repository_utils.bzl
+++ b/rust/private/repository_utils.bzl
@@ -341,6 +341,7 @@ rust_toolchain(
name = "{toolchain_name}",
rust_doc = "//:rustdoc",
rust_std = "//:rust_std-{target_triple}",
+ exec_rust_std = {exec_rust_std_label},
rustc = "//:rustc",
linker = {linker_label},
linker_type = {linker_type},
@@ -384,6 +385,7 @@ def BUILD_for_rust_toolchain(
include_llvm_tools,
include_linker,
include_objcopy = False,
+ exec_rust_std_label = None,
stdlib_linkflags = None,
extra_rustc_flags = None,
extra_exec_rustc_flags = None,
@@ -405,6 +407,7 @@ def BUILD_for_rust_toolchain(
include_llvm_tools (bool): Whether llvm-tools are present in the toolchain.
include_linker (bool): Whether a linker is available in the toolchain.
include_objcopy (bool): Whether rust-objcopy is available in the toolchain.
+ exec_rust_std_label (str, optional): Label for an exec-side stdlib when it differs from rust_std.
stdlib_linkflags (list, optional): Overridden flags needed for linking to rust
stdlib, akin to BAZEL_LINKLIBS. Defaults to
None.
@@ -453,6 +456,7 @@ def BUILD_for_rust_toolchain(
staticlib_ext = system_to_staticlib_ext(target_triple.system),
dylib_ext = system_to_dylib_ext(target_triple.system),
allocator_library = repr(allocator_library_label),
+ exec_rust_std_label = repr(exec_rust_std_label),
global_allocator_library = repr(global_allocator_library_label),
stdlib_linkflags = stdlib_linkflags,
default_edition = default_edition,
diff --git a/rust/private/rustc.bzl b/rust/private/rustc.bzl
--- a/rust/private/rustc.bzl
+++ b/rust/private/rustc.bzl
@@ -1011,7 +1011,10 @@ def construct_arguments(
if build_metadata and not use_json_output:
fail("build_metadata requires parse_json_output")
- use_exec_target = is_exec_configuration(ctx) and crate_info.type == "bin"
+ # Exec-configuration crates (build scripts, proc-macros, and their
+ # dependencies) must all target the exec triple so they can link against
+ # each other and the exec-side standard library.
+ use_exec_target = is_exec_configuration(ctx)
output_dir = getattr(crate_info.output, "dirname", None)
linker_script = getattr(file, "linker_script", None)
diff --git a/rust/repositories.bzl b/rust/repositories.bzl
--- a/rust/repositories.bzl
+++ b/rust/repositories.bzl
@@ -536,6 +536,18 @@ def _rust_toolchain_tools_repository_impl(ctx):
build_components.append(rust_stdlib_content)
sha256s.update(rust_stdlib_sha256)
+ exec_rust_std_label = None
+ if exec_triple.str != target_triple.str:
+ exec_rust_stdlib_content, exec_rust_stdlib_sha256 = load_rust_stdlib(
+ ctx = ctx,
+ target_triple = exec_triple,
+ version = version,
+ iso_date = iso_date,
+ )
+ build_components.append(exec_rust_stdlib_content)
+ sha256s.update(exec_rust_stdlib_sha256)
+ exec_rust_std_label = "//:rust_std-{}".format(exec_triple.str)
+
stdlib_linkflags = None
if "BAZEL_RUST_STDLIB_LINKFLAGS" in ctx.os.environ:
stdlib_linkflags = ctx.os.environ["BAZEL_RUST_STDLIB_LINKFLAGS"].split(":")
@@ -552,6 +564,7 @@ def _rust_toolchain_tools_repository_impl(ctx):
include_llvm_tools = include_llvm_tools,
include_linker = include_linker,
include_objcopy = include_objcopy,
+ exec_rust_std_label = exec_rust_std_label,
extra_rustc_flags = ctx.attr.extra_rustc_flags,
extra_exec_rustc_flags = ctx.attr.extra_exec_rustc_flags,
opt_level = ctx.attr.opt_level if ctx.attr.opt_level else None,
@@ -575,8 +588,16 @@ def _rust_toolchain_tools_repository_impl(ctx):
if ctx.attr.dev_components:
rustc_dev_sha256 = load_rustc_dev_nightly(
ctx = ctx,
target_triple = target_triple,
version = version,
iso_date = iso_date,
)
sha256s.update(rustc_dev_sha256)
+ if exec_triple.str != target_triple.str:
+ exec_rustc_dev_sha256 = load_rustc_dev_nightly(
+ ctx = ctx,
+ target_triple = exec_triple,
+ version = version,
+ iso_date = iso_date,
+ )
+ sha256s.update(exec_rustc_dev_sha256)

View File

@@ -1,7 +1,7 @@
diff --git a/cargo/private/cargo_build_script.bzl b/cargo/private/cargo_build_script.bzl
--- a/cargo/private/cargo_build_script.bzl
+++ b/cargo/private/cargo_build_script.bzl
@@ -120,6 +120,25 @@
@@ -120,6 +120,63 @@
executable = True,
)
@@ -23,16 +23,56 @@ diff --git a/cargo/private/cargo_build_script.bzl b/cargo/private/cargo_build_sc
+ # flags through CFLAGS/CXXFLAGS breaks build.rs probe binaries compiled via
+ # cc-rs.
+ return [arg for arg in args if not arg.startswith("-fstack-protector")]
+
+def _rewrite_windows_exec_msvc_cc_args(toolchain, args):
+ """Translate GNU-flavored cc args when exec-side build scripts target Windows MSVC."""
+ if toolchain.target_flag_value != toolchain.exec_triple.str or not toolchain.exec_triple.str.endswith("-pc-windows-msvc"):
+ return args
+
+ rewritten = []
+ skip_next = False
+ for arg in args:
+ if skip_next:
+ skip_next = False
+ continue
+
+ if arg == "-target":
+ skip_next = True
+ continue
+
+ if arg.startswith("-target=") or arg.startswith("--target="):
+ continue
+
+ if arg == "-nostdlibinc" or arg.startswith("--sysroot"):
+ continue
+
+ if "mingw-w64-" in arg or "mingw_import_libraries_directory" in arg or "mingw_crt_library_search_directory" in arg:
+ continue
+
+ if arg.startswith("-fstack-protector"):
+ continue
+
+ if arg.startswith("-D_FORTIFY_SOURCE="):
+ continue
+
+ rewritten.append(arg)
+
+ return [
+ "-target",
+ toolchain.target_flag_value,
+ ] + rewritten
+
def get_cc_compile_args_and_env(cc_toolchain, feature_configuration):
"""Gather cc environment variables from the given `cc_toolchain`
@@ -503,6 +522,8 @@
@@ -503,6 +560,10 @@
if not env["AR"]:
env["AR"] = cc_toolchain.ar_executable
+ cc_c_args = _strip_stack_protector_for_windows_llvm_mingw(toolchain, cc_c_args)
+ cc_cxx_args = _strip_stack_protector_for_windows_llvm_mingw(toolchain, cc_cxx_args)
+ cc_c_args = _rewrite_windows_exec_msvc_cc_args(toolchain, cc_c_args)
+ cc_cxx_args = _rewrite_windows_exec_msvc_cc_args(toolchain, cc_cxx_args)
# Populate CFLAGS and CXXFLAGS that cc-rs relies on when building from source, in particular
# to determine the deployment target when building for apple platforms (`macosx-version-min`
# for example, itself derived from the `macos_minimum_os` Bazel argument).

View File

@@ -0,0 +1,64 @@
--- a/rust/private/rustc.bzl
+++ b/rust/private/rustc.bzl
@@ -2305,7 +2305,7 @@
return crate.metadata.dirname
return crate.output.dirname
-def _portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, for_windows = False, for_darwin = False, flavor_msvc = False):
+def _portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, for_windows = False, for_darwin = False, flavor_msvc = False, use_direct_driver = False):
artifact = get_preferred_artifact(lib, use_pic)
if ambiguous_libs and artifact.path in ambiguous_libs:
artifact = ambiguous_libs[artifact.path]
@@ -2344,6 +2344,11 @@
artifact.basename.startswith("test-") or artifact.basename.startswith("std-")
):
return [] if for_darwin else ["-lstatic=%s" % get_lib_name(artifact)]
+
+ if for_windows and use_direct_driver and not artifact.basename.endswith(".lib"):
+ return [
+ "-Clink-arg={}".format(artifact.path),
+ ]
if flavor_msvc:
return [
@@ -2381,7 +2386,7 @@
])
elif include_link_flags:
get_lib_name = get_lib_name_for_windows if flavor_msvc else get_lib_name_default
- ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, flavor_msvc = flavor_msvc))
+ ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, flavor_msvc = flavor_msvc, use_direct_driver = use_direct_driver))
# Windows toolchains can inherit POSIX defaults like -pthread from C deps,
# which fails to link with the MinGW/LLD toolchain. Drop them here.
@@ -2558,17 +2563,25 @@
else:
# For all other crate types we want to link C++ runtime library statically
# (for example libstdc++.a or libc++.a).
+ runtime_libs = cc_toolchain.static_runtime_lib(feature_configuration = feature_configuration)
args.add_all(
- cc_toolchain.static_runtime_lib(feature_configuration = feature_configuration),
+ runtime_libs,
map_each = _get_dirname,
format_each = "-Lnative=%s",
)
if include_link_flags:
- args.add_all(
- cc_toolchain.static_runtime_lib(feature_configuration = feature_configuration),
- map_each = get_lib_name,
- format_each = "-lstatic=%s",
- )
+ if toolchain.target_os == "windows" and use_direct_link_driver:
+ for runtime_lib in runtime_libs.to_list():
+ if runtime_lib.basename.endswith(".lib"):
+ args.add(get_lib_name(runtime_lib), format = "-lstatic=%s")
+ else:
+ args.add(runtime_lib.path, format = "--codegen=link-arg=%s")
+ else:
+ args.add_all(
+ runtime_libs,
+ map_each = get_lib_name,
+ format_each = "-lstatic=%s",
+ )
def _get_dirname(file):
"""A helper function for `_add_native_link_flags`.