diff --git a/.bazelrc b/.bazelrc index 2ff9cbc28e..ce7c1e1d43 100644 --- a/.bazelrc +++ b/.bazelrc @@ -60,3 +60,51 @@ common:remote --jobs=800 # Enable pipelined compilation since we are not bound by local CPU count. #common:remote --@rules_rust//rust/settings:pipelined_compilation +# GitHub Actions CI configs. +common:ci --remote_download_minimal +common:ci --keep_going +common:ci --verbose_failures +common:ci --build_metadata=REPO_URL=https://github.com/openai/codex.git +common:ci --build_metadata=ROLE=CI +common:ci --build_metadata=VISIBILITY=PUBLIC + +# Disable disk cache in CI since we have a remote one and aren't using persistent workers. +common:ci --disk_cache= + +# Shared config for the main Bazel CI workflow. +common:ci-bazel --config=ci +common:ci-bazel --build_metadata=TAG_workflow=bazel + +# Rearrange caches on Windows so they're on the same volume as the checkout. +common:ci-windows --config=ci-bazel +common:ci-windows --build_metadata=TAG_os=windows +common:ci-windows --repo_contents_cache=D:/a/.cache/bazel-repo-contents-cache +common:ci-windows --repository_cache=D:/a/.cache/bazel-repo-cache + +# We prefer to run the build actions entirely remotely so we can dial up the concurrency. +# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform. + +# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both. +# Linux crossbuilds don't work until we untangle the libc constraint mess. +common:ci-linux --config=ci-bazel +common:ci-linux --build_metadata=TAG_os=linux +common:ci-linux --config=remote +common:ci-linux --strategy=remote +common:ci-linux --platforms=//:rbe + +# On mac, we can run all the build actions remotely but test actions locally. +common:ci-macos --config=ci-bazel +common:ci-macos --build_metadata=TAG_os=macos +common:ci-macos --config=remote +common:ci-macos --strategy=remote +common:ci-macos --strategy=TestRunner=darwin-sandbox,local + +# Linux-only V8 CI config. +common:ci-v8 --config=ci +common:ci-v8 --build_metadata=TAG_workflow=v8 +common:ci-v8 --build_metadata=TAG_os=linux +common:ci-v8 --config=remote +common:ci-v8 --strategy=remote + +# Optional per-user local overrides. +try-import %workspace%/user.bazelrc diff --git a/.github/workflows/bazel.yml b/.github/workflows/bazel.yml index b2ef107ca7..79d963a537 100644 --- a/.github/workflows/bazel.yml +++ b/.github/workflows/bazel.yml @@ -78,30 +78,17 @@ jobs: shell: bash run: ./scripts/check-module-bazel-lock.sh - # TODO(mbolin): Bring this back once we have caching working. Currently, - # we never seem to get a cache hit but we still end up paying the cost of - # uploading at the end of the build, which takes over a minute! - # - # Cache build and external artifacts so that the next ci build is incremental. - # Because github action caches cannot be updated after a build, we need to - # store the contents of each build in a unique cache key, then fall back to loading - # it on the next ci run. We use hashFiles(...) in the key and restore-keys- with - # the prefix to load the most recent cache for the branch on a cache miss. You - # should customize the contents of hashFiles to capture any bazel input sources, - # although this doesn't need to be perfect. If none of the input sources change - # then a cache hit will load an existing cache and bazel won't have to do any work. - # In the case of a cache miss, you want the fallback cache to contain most of the - # previously built artifacts to minimize build time. The more precise you are with - # hashFiles sources the less work bazel will have to do. - # - name: Mount bazel caches - # uses: actions/cache@v5 - # with: - # path: | - # ~/.cache/bazel-repo-cache - # ~/.cache/bazel-repo-contents-cache - # key: bazel-cache-${{ matrix.os }}-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'MODULE.bazel') }} - # restore-keys: | - # bazel-cache-${{ matrix.os }} + # Restore bazel repository cache so we don't have to redownload all the external dependencies + # on every CI run. + - name: Restore bazel repository cache + id: cache_bazel_repository_restore + uses: actions/cache/restore@v5 + with: + path: | + ~/.cache/bazel-repo-cache + key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} + restore-keys: | + bazel-cache-${{ matrix.target }} - name: Configure Bazel startup args (Windows) if: runner.os == 'Windows' @@ -157,10 +144,7 @@ jobs: bazel_args=( test --test_verbose_timeout_warnings - --build_metadata=REPO_URL=https://github.com/openai/codex.git --build_metadata=COMMIT_SHA=$(git rev-parse HEAD) - --build_metadata=ROLE=CI - --build_metadata=VISIBILITY=PUBLIC ) bazel_targets=( @@ -177,6 +161,13 @@ jobs: bazel_args+=("--test_env=CODEX_JS_REPL_NODE_PATH=${node_bin}") fi + ci_config=ci-linux + if [[ "${RUNNER_OS:-}" == "macOS" ]]; then + ci_config=ci-macos + elif [[ "${RUNNER_OS:-}" == "Windows" ]]; then + ci_config=ci-windows + fi + if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then echo "BuildBuddy API key is available; using remote Bazel configuration." # Work around Bazel 9 remote repo contents cache / overlay materialization failures @@ -186,8 +177,8 @@ jobs: set +e bazel $BAZEL_STARTUP_ARGS \ --noexperimental_remote_repo_contents_cache \ - --bazelrc=.github/workflows/ci.bazelrc \ "${bazel_args[@]}" \ + "--config=${ci_config}" \ "--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY" \ -- \ "${bazel_targets[@]}" \ @@ -229,3 +220,14 @@ jobs: print_failed_bazel_test_logs "$bazel_console_log" exit "$bazel_status" fi + + # Save bazel repository cache explicitly; make non-fatal so cache uploading + # never fails the overall job. Only save when key wasn't hit. + - name: Save bazel repository cache + if: always() && !cancelled() && steps.cache_bazel_repository_restore.outputs.cache-hit != 'true' + continue-on-error: true + uses: actions/cache/save@v5 + with: + path: | + ~/.cache/bazel-repo-cache + key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} diff --git a/.github/workflows/ci.bazelrc b/.github/workflows/ci.bazelrc deleted file mode 100644 index 997a774eb3..0000000000 --- a/.github/workflows/ci.bazelrc +++ /dev/null @@ -1,27 +0,0 @@ -common --remote_download_minimal -common --keep_going -common --verbose_failures - -# Disable disk cache since we have remote one and aren't using persistent workers. -common --disk_cache= - -# Rearrange caches on Windows so they're on the same volume as the checkout. -common:windows --repo_contents_cache=D:/a/.cache/bazel-repo-contents-cache -common:windows --repository_cache=D:/a/.cache/bazel-repo-cache - -# We prefer to run the build actions entirely remotely so we can dial up the concurrency. -# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform. - -# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both. -# Linux crossbuilds don't work until we untangle the libc constraint mess. -common:linux --config=remote -common:linux --strategy=remote -common:linux --platforms=//:rbe - -# On mac, we can run all the build actions remotely but test actions locally. -common:macos --config=remote -common:macos --strategy=remote -common:macos --strategy=TestRunner=darwin-sandbox,local - -# On windows we cannot cross-build the tests but run them locally due to what appears to be a Bazel bug -# (windows vs unix path confusion) diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index c203e2b742..a7e6aa4431 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -547,7 +547,10 @@ jobs: tests: name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.remote_env == 'true' && ' (remote)' || '' }} runs-on: ${{ matrix.runs_on || matrix.runner }} - timeout-minutes: ${{ matrix.runner == 'windows-arm64' && 35 || 30 }} + # Perhaps we can bring this back down to 30m once we finish the cutover + # from tui_app_server/ to tui/. Incidentally, windows-arm64 was the main + # offender for exceeding the timeout. + timeout-minutes: 45 needs: changed if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }} defaults: diff --git a/.github/workflows/rusty-v8-release.yml b/.github/workflows/rusty-v8-release.yml index bb191b88cb..60aac24366 100644 --- a/.github/workflows/rusty-v8-release.yml +++ b/.github/workflows/rusty-v8-release.yml @@ -116,8 +116,8 @@ jobs: bazel \ --noexperimental_remote_repo_contents_cache \ - --bazelrc=.github/workflows/v8-ci.bazelrc \ "${bazel_args[@]}" \ + --config=ci-v8 \ "--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}" - name: Stage release pair diff --git a/.github/workflows/v8-canary.yml b/.github/workflows/v8-canary.yml index 213c6a7b60..6e068e8021 100644 --- a/.github/workflows/v8-canary.yml +++ b/.github/workflows/v8-canary.yml @@ -108,8 +108,8 @@ jobs: bazel \ --noexperimental_remote_repo_contents_cache \ - --bazelrc=.github/workflows/v8-ci.bazelrc \ "${bazel_args[@]}" \ + --config=ci-v8 \ "--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}" - name: Stage release pair diff --git a/.github/workflows/v8-ci.bazelrc b/.github/workflows/v8-ci.bazelrc deleted file mode 100644 index df1b4bec3d..0000000000 --- a/.github/workflows/v8-ci.bazelrc +++ /dev/null @@ -1,5 +0,0 @@ -import %workspace%/.github/workflows/ci.bazelrc - -common --build_metadata=REPO_URL=https://github.com/openai/codex.git -common --build_metadata=ROLE=CI -common --build_metadata=VISIBILITY=PUBLIC diff --git a/.gitignore b/.gitignore index 8f39b7b1c8..82269594bb 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ node_modules # build dist/ bazel-* +user.bazelrc build/ out/ storybook-static/ diff --git a/AGENTS.md b/AGENTS.md index 3a287a5991..7a81eab40c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -40,6 +40,7 @@ In the codex-rs folder where the rust code lives: `codex-rs/tui/src/bottom_pane/mod.rs`, and similarly central orchestration modules. - When extracting code from a large module, move the related tests and module/type docs toward the new implementation so the invariants stay close to the code that owns them. +- When running Rust commands (e.g. `just fix` or `cargo test`) be patient with the command and never try to kill them using the PID. Rust lock can make the execution slow, this is expected. Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests: @@ -50,6 +51,19 @@ Before finalizing a large change to `codex-rs`, run `just fix -p ` (in Also run `just argument-comment-lint` to ensure the codebase is clean of comment lint errors. +## The `codex-core` crate + +Over time, the `codex-core` crate (defined in `codex-rs/core/`) has become bloated because it is the largest crate, so it is often easier to add something new to `codex-core` rather than refactor out the library code you need so your new code neither takes a dependency on, nor contributes to the size of, `codex-core`. + +To that end: **resist adding code to codex-core**! + +Particularly when introducing a new concept/feature/API, before adding to `codex-core`, consider whether: + +- There is an existing crate other than `codex-core` that is an appropriate place for your new code to live. +- It is time to introduce a new crate to the Cargo workspace for your new functionality. Refactor existing code as necessary to make this happen. + +Likewise, when reviewing code, do not hesitate to push back on PRs that would unnecessarily add code to `codex-core`. + ## TUI style conventions See `codex-rs/tui/styles.md`. diff --git a/MODULE.bazel.lock b/MODULE.bazel.lock index ee89f6243f..7b3454a508 100644 --- a/MODULE.bazel.lock +++ b/MODULE.bazel.lock @@ -614,14 +614,10 @@ "anyhow_1.0.101": "{\"dependencies\":[{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.51\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.6\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"syn\",\"req\":\"^2.0\"},{\"kind\":\"dev\",\"name\":\"thiserror\",\"req\":\"^2\"},{\"features\":[\"diff\"],\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.108\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}", "arbitrary_1.4.2": "{\"dependencies\":[{\"name\":\"derive_arbitrary\",\"optional\":true,\"req\":\"~1.4.0\"},{\"kind\":\"dev\",\"name\":\"exhaustigen\",\"req\":\"^0.1.0\"}],\"features\":{\"derive\":[\"derive_arbitrary\"]}}", "arboard_3.6.1": "{\"dependencies\":[{\"features\":[\"std\"],\"name\":\"clipboard-win\",\"req\":\"^5.3.1\",\"target\":\"cfg(windows)\"},{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.10.2\"},{\"default_features\":false,\"features\":[\"png\"],\"name\":\"image\",\"optional\":true,\"req\":\"^0.25\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"default_features\":false,\"features\":[\"tiff\"],\"name\":\"image\",\"optional\":true,\"req\":\"^0.25\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"png\",\"bmp\"],\"name\":\"image\",\"optional\":true,\"req\":\"^0.25\",\"target\":\"cfg(windows)\"},{\"name\":\"log\",\"req\":\"^0.4\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"name\":\"log\",\"req\":\"^0.4\",\"target\":\"cfg(windows)\"},{\"name\":\"objc2\",\"req\":\"^0.6.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"objc2-core-graphics\",\"NSPasteboard\",\"NSPasteboardItem\",\"NSImage\"],\"name\":\"objc2-app-kit\",\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"CFCGTypes\"],\"name\":\"objc2-core-foundation\",\"optional\":true,\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"CGImage\",\"CGColorSpace\",\"CGDataProvider\"],\"name\":\"objc2-core-graphics\",\"optional\":true,\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"NSArray\",\"NSString\",\"NSEnumerator\",\"NSGeometry\",\"NSValue\"],\"name\":\"objc2-foundation\",\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"name\":\"parking_lot\",\"req\":\"^0.12\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"name\":\"percent-encoding\",\"req\":\"^2.3.1\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"features\":[\"Win32_Foundation\",\"Win32_Storage_FileSystem\",\"Win32_System_DataExchange\",\"Win32_System_Memory\",\"Win32_System_Ole\",\"Win32_UI_Shell\"],\"name\":\"windows-sys\",\"req\":\">=0.52.0, <0.61.0\",\"target\":\"cfg(windows)\"},{\"name\":\"wl-clipboard-rs\",\"optional\":true,\"req\":\"^0.9.0\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"name\":\"x11rb\",\"req\":\"^0.13\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"}],\"features\":{\"core-graphics\":[\"dep:objc2-core-graphics\"],\"default\":[\"image-data\"],\"image\":[\"dep:image\"],\"image-data\":[\"dep:objc2-core-graphics\",\"dep:objc2-core-foundation\",\"image\",\"windows-sys\",\"core-graphics\"],\"wayland-data-control\":[\"wl-clipboard-rs\"],\"windows-sys\":[\"windows-sys/Win32_Graphics_Gdi\"],\"wl-clipboard-rs\":[\"dep:wl-clipboard-rs\"]}}", - "arc-swap_1.8.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"adaptive-barrier\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"~0.7\"},{\"kind\":\"dev\",\"name\":\"crossbeam-utils\",\"req\":\"~0.8\"},{\"kind\":\"dev\",\"name\":\"itertools\",\"req\":\"^0.14\"},{\"kind\":\"dev\",\"name\":\"num_cpus\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"~0.12\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"name\":\"rustversion\",\"req\":\"^1\"},{\"features\":[\"rc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0.130\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0.177\"}],\"features\":{\"experimental-strategies\":[],\"experimental-thread-local\":[],\"internal-test-strategies\":[],\"weak\":[]}}", + "arc-swap_1.9.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"adaptive-barrier\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"~0.7\"},{\"kind\":\"dev\",\"name\":\"crossbeam-utils\",\"req\":\"~0.8\"},{\"kind\":\"dev\",\"name\":\"itertools\",\"req\":\"^0.14\"},{\"kind\":\"dev\",\"name\":\"num_cpus\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"~0.12\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"name\":\"rustversion\",\"req\":\"^1\"},{\"features\":[\"rc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0.130\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0.177\"}],\"features\":{\"experimental-strategies\":[],\"experimental-thread-local\":[],\"internal-test-strategies\":[],\"weak\":[]}}", "arrayvec_0.7.6": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.4\"},{\"default_features\":false,\"name\":\"borsh\",\"optional\":true,\"req\":\"^1.2.0\"},{\"kind\":\"dev\",\"name\":\"matches\",\"req\":\"^0.1\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.4\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}", "ascii-canvas_3.0.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"diff\",\"req\":\"^0.1\"},{\"name\":\"term\",\"req\":\"^0.7\"}],\"features\":{}}", "ascii_1.1.0": "{\"dependencies\":[{\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.25\"},{\"name\":\"serde_test\",\"optional\":true,\"req\":\"^1.0\"}],\"features\":{\"alloc\":[],\"default\":[\"std\"],\"std\":[\"alloc\"]}}", - "askama_0.15.4": "{\"dependencies\":[{\"default_features\":false,\"name\":\"askama_macros\",\"optional\":true,\"req\":\"=0.15.4\"},{\"kind\":\"dev\",\"name\":\"assert_matches\",\"req\":\"^1.5.0\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.8\"},{\"name\":\"itoa\",\"req\":\"^1.0.11\"},{\"default_features\":false,\"name\":\"percent-encoding\",\"optional\":true,\"req\":\"^2.1.0\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"serde_json\",\"optional\":true,\"req\":\"^1.0\"}],\"features\":{\"alloc\":[\"askama_macros?/alloc\",\"serde?/alloc\",\"serde_json?/alloc\",\"percent-encoding?/alloc\"],\"code-in-doc\":[\"askama_macros?/code-in-doc\"],\"config\":[\"askama_macros?/config\"],\"default\":[\"config\",\"derive\",\"std\",\"urlencode\"],\"derive\":[\"dep:askama_macros\",\"dep:askama_macros\"],\"full\":[\"default\",\"code-in-doc\",\"serde_json\"],\"nightly-spans\":[\"askama_macros/nightly-spans\"],\"serde_json\":[\"std\",\"askama_macros?/serde_json\",\"dep:serde\",\"dep:serde_json\"],\"std\":[\"alloc\",\"askama_macros?/std\",\"serde?/std\",\"serde_json?/std\",\"percent-encoding?/std\"],\"urlencode\":[\"askama_macros?/urlencode\",\"dep:percent-encoding\"]}}", - "askama_derive_0.15.4": "{\"dependencies\":[{\"name\":\"basic-toml\",\"optional\":true,\"req\":\"^0.1.1\"},{\"kind\":\"dev\",\"name\":\"console\",\"req\":\"^0.16.0\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.8\"},{\"name\":\"memchr\",\"req\":\"^2\"},{\"name\":\"parser\",\"package\":\"askama_parser\",\"req\":\"=0.15.4\"},{\"kind\":\"dev\",\"name\":\"prettyplease\",\"req\":\"^0.2.20\"},{\"default_features\":false,\"name\":\"proc-macro2\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"pulldown-cmark\",\"optional\":true,\"req\":\"^0.13.0\"},{\"default_features\":false,\"name\":\"quote\",\"req\":\"^1\"},{\"name\":\"rustc-hash\",\"req\":\"^2.0.0\"},{\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"serde_derive\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"similar\",\"req\":\"^2.6.0\"},{\"default_features\":false,\"features\":[\"clone-impls\",\"derive\",\"full\",\"parsing\",\"printing\"],\"name\":\"syn\",\"req\":\"^2.0.3\"}],\"features\":{\"alloc\":[],\"code-in-doc\":[\"dep:pulldown-cmark\"],\"config\":[\"external-sources\",\"dep:basic-toml\",\"dep:serde\",\"dep:serde_derive\",\"parser/config\"],\"default\":[\"alloc\",\"code-in-doc\",\"config\",\"external-sources\",\"proc-macro\",\"serde_json\",\"std\",\"urlencode\"],\"external-sources\":[],\"nightly-spans\":[],\"proc-macro\":[\"proc-macro2/proc-macro\"],\"serde_json\":[],\"std\":[\"alloc\"],\"urlencode\":[]}}", - "askama_macros_0.15.4": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"external-sources\",\"proc-macro\"],\"name\":\"askama_derive\",\"package\":\"askama_derive\",\"req\":\"=0.15.4\"}],\"features\":{\"alloc\":[\"askama_derive/alloc\"],\"code-in-doc\":[\"askama_derive/code-in-doc\"],\"config\":[\"askama_derive/config\"],\"default\":[\"config\",\"derive\",\"std\",\"urlencode\"],\"derive\":[],\"full\":[\"default\",\"code-in-doc\",\"serde_json\"],\"nightly-spans\":[\"askama_derive/nightly-spans\"],\"serde_json\":[\"askama_derive/serde_json\"],\"std\":[\"askama_derive/std\"],\"urlencode\":[\"askama_derive/urlencode\"]}}", - "askama_parser_0.15.4": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.8\"},{\"name\":\"rustc-hash\",\"req\":\"^2.0.0\"},{\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"serde_derive\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"unicode-ident\",\"req\":\"^1.0.12\"},{\"features\":[\"simd\"],\"name\":\"winnow\",\"req\":\"^0.7.0\"}],\"features\":{\"config\":[\"dep:serde\",\"dep:serde_derive\"]}}", "asn1-rs-derive_0.6.0": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"req\":\"^1.0\"},{\"name\":\"quote\",\"req\":\"^1.0\"},{\"features\":[\"full\"],\"name\":\"syn\",\"req\":\"^2.0\"},{\"name\":\"synstructure\",\"req\":\"^0.13\"}],\"features\":{}}", "asn1-rs-impl_0.2.0": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"req\":\"^1\"},{\"name\":\"quote\",\"req\":\"^1\"},{\"name\":\"syn\",\"req\":\"^2.0\"}],\"features\":{}}", "asn1-rs_0.7.1": "{\"dependencies\":[{\"name\":\"asn1-rs-derive\",\"req\":\"^0.6\"},{\"name\":\"asn1-rs-impl\",\"req\":\"^0.2\"},{\"name\":\"bitvec\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"colored\",\"optional\":true,\"req\":\"^3.0\"},{\"kind\":\"dev\",\"name\":\"colored\",\"req\":\"^3.0\"},{\"name\":\"cookie-factory\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"displaydoc\",\"req\":\"^0.2.2\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"nom\",\"req\":\"^7.0\"},{\"name\":\"num-bigint\",\"optional\":true,\"req\":\"^0.4\"},{\"name\":\"num-traits\",\"req\":\"^0.2.14\"},{\"kind\":\"dev\",\"name\":\"pem\",\"req\":\"^3.0\"},{\"name\":\"rusticata-macros\",\"req\":\"^4.0\"},{\"name\":\"thiserror\",\"req\":\"^2.0.0\"},{\"features\":[\"macros\",\"parsing\",\"formatting\"],\"name\":\"time\",\"optional\":true,\"req\":\"^0.3\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0\"}],\"features\":{\"bigint\":[\"num-bigint\"],\"bits\":[\"bitvec\"],\"datetime\":[\"time\"],\"debug\":[\"std\",\"colored\"],\"default\":[\"std\"],\"serialize\":[\"cookie-factory\"],\"std\":[],\"trace\":[\"debug\"]}}", diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index 00fe37ebbe..ce8e81f539 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -410,6 +410,7 @@ dependencies = [ "codex-app-server-protocol", "codex-core", "codex-features", + "codex-login", "codex-protocol", "codex-utils-cargo-bin", "core_test_support", @@ -453,9 +454,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.8.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +checksum = "a07d1f37ff60921c83bdfc7407723bdefe89b44b98a9b772f225c8f9d67141a6" dependencies = [ "rustversion", ] @@ -481,58 +482,6 @@ dependencies = [ "term", ] -[[package]] -name = "askama" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08e1676b346cadfec169374f949d7490fd80a24193d37d2afce0c047cf695e57" -dependencies = [ - "askama_macros", - "itoa", - "percent-encoding", - "serde", - "serde_json", -] - -[[package]] -name = "askama_derive" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7661ff56517787343f376f75db037426facd7c8d3049cef8911f1e75016f3a37" -dependencies = [ - "askama_parser", - "basic-toml", - "memchr", - "proc-macro2", - "quote", - "rustc-hash 2.1.1", - "serde", - "serde_derive", - "syn 2.0.114", -] - -[[package]] -name = "askama_macros" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713ee4dbfd1eb719c2dab859465b01fa1d21cb566684614a713a6b7a99a4e47b" -dependencies = [ - "askama_derive", -] - -[[package]] -name = "askama_parser" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d62d674238a526418b30c0def480d5beadb9d8964e7f38d635b03bf639c704c" -dependencies = [ - "rustc-hash 2.1.1", - "serde", - "serde_derive", - "unicode-ident", - "winnow", -] - [[package]] name = "asn1-rs" version = "0.7.1" @@ -1469,6 +1418,7 @@ dependencies = [ "codex-sandboxing", "codex-shell-command", "codex-state", + "codex-tools", "codex-utils-absolute-path", "codex-utils-cargo-bin", "codex-utils-cli", @@ -1596,6 +1546,7 @@ dependencies = [ "anyhow", "codex-apply-patch", "codex-linux-sandbox", + "codex-sandboxing", "codex-shell-escalation", "codex-utils-home-dir", "dotenvy", @@ -1603,27 +1554,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "codex-artifacts" -version = "0.0.0" -dependencies = [ - "codex-package-manager", - "flate2", - "pretty_assertions", - "reqwest", - "serde", - "serde_json", - "sha2", - "tar", - "tempfile", - "thiserror 2.0.18", - "tokio", - "url", - "which 8.0.0", - "wiremock", - "zip", -] - [[package]] name = "codex-async-utils" version = "0.0.0" @@ -1667,6 +1597,7 @@ dependencies = [ "codex-connectors", "codex-core", "codex-git-utils", + "codex-login", "codex-utils-cargo-bin", "codex-utils-cli", "pretty_assertions", @@ -1881,7 +1812,6 @@ version = "0.0.0" dependencies = [ "anyhow", "arc-swap", - "askama", "assert_cmd", "assert_matches", "async-channel", @@ -1896,7 +1826,6 @@ dependencies = [ "codex-app-server-protocol", "codex-apply-patch", "codex-arg0", - "codex-artifacts", "codex-async-utils", "codex-code-mode", "codex-config", @@ -1921,7 +1850,7 @@ dependencies = [ "codex-shell-escalation", "codex-state", "codex-terminal-detection", - "codex-test-macros", + "codex-tools", "codex-utils-absolute-path", "codex-utils-cache", "codex-utils-cargo-bin", @@ -1934,6 +1863,7 @@ dependencies = [ "codex-utils-readiness", "codex-utils-stream-parser", "codex-utils-string", + "codex-utils-template", "codex-windows-sandbox", "core-foundation 0.9.4", "core_test_support", @@ -2084,6 +2014,7 @@ name = "codex-exec-server" version = "0.0.0" dependencies = [ "anyhow", + "arc-swap", "async-trait", "base64 0.22.1", "clap", @@ -2253,6 +2184,7 @@ dependencies = [ "clap", "codex-core", "codex-protocol", + "codex-sandboxing", "codex-utils-absolute-path", "landlock", "libc", @@ -2324,6 +2256,7 @@ dependencies = [ "anyhow", "codex-arg0", "codex-core", + "codex-exec-server", "codex-features", "codex-protocol", "codex-shell-command", @@ -2573,7 +2506,6 @@ dependencies = [ "codex-network-proxy", "codex-protocol", "codex-utils-absolute-path", - "dirs", "dunce", "libc", "pretty_assertions", @@ -2581,6 +2513,7 @@ dependencies = [ "tempfile", "tracing", "url", + "which 8.0.0", ] [[package]] @@ -2697,12 +2630,13 @@ dependencies = [ ] [[package]] -name = "codex-test-macros" +name = "codex-tools" version = "0.0.0" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", + "pretty_assertions", + "rmcp", + "serde", + "serde_json", ] [[package]] @@ -2725,6 +2659,7 @@ dependencies = [ "codex-client", "codex-cloud-requirements", "codex-core", + "codex-exec-server", "codex-features", "codex-feedback", "codex-file-search", diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index f19fb650f7..f05355e393 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -51,6 +51,7 @@ members = [ "otel", "tui", "tui_app_server", + "tools", "v8-poc", "utils/absolute-path", "utils/cargo-bin", @@ -80,10 +81,8 @@ members = [ "state", "terminal-detection", "codex-experimental-api-macros", - "test-macros", "package-manager", "plugin", - "artifacts", ] resolver = "2" @@ -102,7 +101,6 @@ app_test_support = { path = "app-server/tests/common" } codex-ansi-escape = { path = "ansi-escape" } codex-analytics = { path = "analytics" } codex-api = { path = "codex-api" } -codex-artifacts = { path = "artifacts" } codex-code-mode = { path = "code-mode" } codex-package-manager = { path = "package-manager" } codex-app-server = { path = "app-server" } @@ -152,8 +150,8 @@ codex-shell-escalation = { path = "shell-escalation" } codex-skills = { path = "skills" } codex-state = { path = "state" } codex-stdio-to-uds = { path = "stdio-to-uds" } -codex-test-macros = { path = "test-macros" } codex-terminal-detection = { path = "terminal-detection" } +codex-tools = { path = "tools" } codex-tui = { path = "tui" } codex-tui-app-server = { path = "tui_app_server" } codex-v8-poc = { path = "v8-poc" } @@ -189,7 +187,7 @@ allocative = "0.3.3" ansi-to-tui = "7.0.0" anyhow = "1" arboard = { version = "3", features = ["wayland-data-control"] } -askama = "0.15.4" +arc-swap = "1.9.0" assert_cmd = "2" assert_matches = "1.5.0" async-channel = "2.3.1" @@ -397,6 +395,7 @@ unwrap_used = "deny" ignored = [ "icu_provider", "openssl-sys", + "codex-package-manager", "codex-utils-readiness", "codex-utils-template", "codex-v8-poc", diff --git a/codex-rs/app-server-client/src/lib.rs b/codex-rs/app-server-client/src/lib.rs index 1ea9f6fd87..ad9eca65e7 100644 --- a/codex-rs/app-server-client/src/lib.rs +++ b/codex-rs/app-server-client/src/lib.rs @@ -868,8 +868,11 @@ mod tests { use tokio::net::TcpListener; use tokio::time::Duration; use tokio::time::timeout; - use tokio_tungstenite::accept_async; + use tokio_tungstenite::accept_hdr_async; use tokio_tungstenite::tungstenite::Message; + use tokio_tungstenite::tungstenite::handshake::server::Request as WebSocketRequest; + use tokio_tungstenite::tungstenite::handshake::server::Response as WebSocketResponse; + use tokio_tungstenite::tungstenite::http::header::AUTHORIZATION; async fn build_test_config() -> Config { match ConfigBuilder::default().build().await { @@ -908,6 +911,19 @@ mod tests { } async fn start_test_remote_server(handler: F) -> String + where + F: FnOnce(tokio_tungstenite::WebSocketStream) -> Fut + + Send + + 'static, + Fut: std::future::Future + Send + 'static, + { + start_test_remote_server_with_auth(None, handler).await + } + + async fn start_test_remote_server_with_auth( + expected_auth_token: Option, + handler: F, + ) -> String where F: FnOnce(tokio_tungstenite::WebSocketStream) -> Fut + Send @@ -920,9 +936,23 @@ mod tests { let addr = listener.local_addr().expect("listener address"); tokio::spawn(async move { let (stream, _) = listener.accept().await.expect("accept should succeed"); - let websocket = accept_async(stream) - .await - .expect("websocket upgrade should succeed"); + let websocket = accept_hdr_async( + stream, + move |request: &WebSocketRequest, response: WebSocketResponse| { + let provided_auth_token = request + .headers() + .get(AUTHORIZATION) + .and_then(|value| value.to_str().ok()) + .map(str::to_owned); + let expected_auth_token = expected_auth_token + .as_ref() + .map(|token| format!("Bearer {token}")); + assert_eq!(provided_auth_token, expected_auth_token); + Ok(response) + }, + ) + .await + .expect("websocket upgrade should succeed"); handler(websocket).await; }); format!("ws://{addr}") @@ -1037,6 +1067,7 @@ mod tests { fn test_remote_connect_args(websocket_url: String) -> RemoteAppServerConnectArgs { RemoteAppServerConnectArgs { websocket_url, + auth_token: None, client_name: "codex-app-server-client-test".to_string(), client_version: "0.0.0-test".to_string(), experimental_api: true, @@ -1253,6 +1284,7 @@ mod tests { }), ) .await; + websocket.close(None).await.expect("close should succeed"); }) .await; let client = RemoteAppServerClient::connect(test_remote_connect_args(websocket_url)) @@ -1273,6 +1305,59 @@ mod tests { client.shutdown().await.expect("shutdown should complete"); } + #[tokio::test] + async fn remote_connect_includes_auth_header_when_configured() { + let auth_token = "remote-bearer-token".to_string(); + let websocket_url = start_test_remote_server_with_auth( + Some(auth_token.clone()), + |mut websocket| async move { + expect_remote_initialize(&mut websocket).await; + websocket.close(None).await.expect("close should succeed"); + }, + ) + .await; + let client = RemoteAppServerClient::connect(RemoteAppServerConnectArgs { + auth_token: Some(auth_token), + ..test_remote_connect_args(websocket_url) + }) + .await + .expect("remote client should connect"); + + client.shutdown().await.expect("shutdown should complete"); + } + + #[tokio::test] + async fn remote_connect_rejects_non_loopback_ws_when_auth_configured() { + let result = RemoteAppServerClient::connect(RemoteAppServerConnectArgs { + websocket_url: "ws://example.com:4500".to_string(), + auth_token: Some("remote-bearer-token".to_string()), + ..test_remote_connect_args("ws://127.0.0.1:1".to_string()) + }) + .await; + let err = match result { + Ok(_) => panic!("non-loopback ws should be rejected before connect"), + Err(err) => err, + }; + assert_eq!(err.kind(), ErrorKind::InvalidInput); + assert!( + err.to_string() + .contains("remote auth tokens require `wss://` or loopback `ws://` URLs") + ); + } + + #[test] + fn remote_auth_token_transport_policy_allows_wss_and_loopback_ws() { + assert!(crate::remote::websocket_url_supports_auth_token( + &url::Url::parse("wss://example.com:443").expect("wss URL should parse") + )); + assert!(crate::remote::websocket_url_supports_auth_token( + &url::Url::parse("ws://127.0.0.1:4500").expect("loopback ws URL should parse") + )); + assert!(!crate::remote::websocket_url_supports_auth_token( + &url::Url::parse("ws://example.com:4500").expect("non-loopback ws URL should parse") + )); + } + #[tokio::test] async fn remote_duplicate_request_id_keeps_original_waiter() { let (first_request_seen_tx, first_request_seen_rx) = tokio::sync::oneshot::channel(); @@ -1425,6 +1510,7 @@ mod tests { .await; let mut client = RemoteAppServerClient::connect(RemoteAppServerConnectArgs { websocket_url, + auth_token: None, client_name: "codex-app-server-client-test".to_string(), client_version: "0.0.0-test".to_string(), experimental_api: true, diff --git a/codex-rs/app-server-client/src/remote.rs b/codex-rs/app-server-client/src/remote.rs index 9cf37e262f..a82b924e45 100644 --- a/codex-rs/app-server-client/src/remote.rs +++ b/codex-rs/app-server-client/src/remote.rs @@ -48,6 +48,9 @@ use tokio_tungstenite::MaybeTlsStream; use tokio_tungstenite::WebSocketStream; use tokio_tungstenite::connect_async; use tokio_tungstenite::tungstenite::Message; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; +use tokio_tungstenite::tungstenite::http::HeaderValue; +use tokio_tungstenite::tungstenite::http::header::AUTHORIZATION; use tracing::warn; use url::Url; @@ -57,6 +60,7 @@ const INITIALIZE_TIMEOUT: Duration = Duration::from_secs(10); #[derive(Debug, Clone)] pub struct RemoteAppServerConnectArgs { pub websocket_url: String, + pub auth_token: Option, pub client_name: String, pub client_version: String, pub experimental_api: bool, @@ -86,6 +90,16 @@ impl RemoteAppServerConnectArgs { } } +pub(crate) fn websocket_url_supports_auth_token(url: &Url) -> bool { + match (url.scheme(), url.host()) { + ("wss", Some(_)) => true, + ("ws", Some(url::Host::Domain(domain))) => domain.eq_ignore_ascii_case("localhost"), + ("ws", Some(url::Host::Ipv4(addr))) => addr.is_loopback(), + ("ws", Some(url::Host::Ipv6(addr))) => addr.is_loopback(), + _ => false, + } +} + enum RemoteClientCommand { Request { request: Box, @@ -132,7 +146,31 @@ impl RemoteAppServerClient { format!("invalid websocket URL `{websocket_url}`: {err}"), ) })?; - let stream = timeout(CONNECT_TIMEOUT, connect_async(url.as_str())) + if args.auth_token.is_some() && !websocket_url_supports_auth_token(&url) { + return Err(IoError::new( + ErrorKind::InvalidInput, + format!( + "remote auth tokens require `wss://` or loopback `ws://` URLs; got `{websocket_url}`" + ), + )); + } + let mut request = url.as_str().into_client_request().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid websocket URL `{websocket_url}`: {err}"), + ) + })?; + if let Some(auth_token) = args.auth_token.as_deref() { + let header_value = + HeaderValue::from_str(&format!("Bearer {auth_token}")).map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid remote authorization header value: {err}"), + ) + })?; + request.headers_mut().insert(AUTHORIZATION, header_value); + } + let stream = timeout(CONNECT_TIMEOUT, connect_async(request)) .await .map_err(|_| { IoError::new( diff --git a/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json b/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json index 2c146b9522..617fa1f3cb 100644 --- a/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json +++ b/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json @@ -28,41 +28,6 @@ }, "type": "object" }, - "AdditionalMacOsPermissions": { - "properties": { - "accessibility": { - "type": "boolean" - }, - "automations": { - "$ref": "#/definitions/MacOsAutomationPermission" - }, - "calendar": { - "type": "boolean" - }, - "contacts": { - "$ref": "#/definitions/MacOsContactsPermission" - }, - "launchServices": { - "type": "boolean" - }, - "preferences": { - "$ref": "#/definitions/MacOsPreferencesPermission" - }, - "reminders": { - "type": "boolean" - } - }, - "required": [ - "accessibility", - "automations", - "calendar", - "contacts", - "launchServices", - "preferences", - "reminders" - ], - "type": "object" - }, "AdditionalNetworkPermissions": { "properties": { "enabled": { @@ -86,16 +51,6 @@ } ] }, - "macos": { - "anyOf": [ - { - "$ref": "#/definitions/AdditionalMacOsPermissions" - }, - { - "type": "null" - } - ] - }, "network": { "anyOf": [ { @@ -298,60 +253,6 @@ } ] }, - "CommandExecutionRequestApprovalSkillMetadata": { - "properties": { - "pathToSkillsMd": { - "type": "string" - } - }, - "required": [ - "pathToSkillsMd" - ], - "type": "object" - }, - "MacOsAutomationPermission": { - "oneOf": [ - { - "enum": [ - "none", - "all" - ], - "type": "string" - }, - { - "additionalProperties": false, - "properties": { - "bundle_ids": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "bundle_ids" - ], - "title": "BundleIdsMacOsAutomationPermission", - "type": "object" - } - ] - }, - "MacOsContactsPermission": { - "enum": [ - "none", - "read_only", - "read_write" - ], - "type": "string" - }, - "MacOsPreferencesPermission": { - "enum": [ - "none", - "read_only", - "read_write" - ], - "type": "string" - }, "NetworkApprovalContext": { "properties": { "host": { diff --git a/codex-rs/app-server-protocol/schema/json/ServerNotification.json b/codex-rs/app-server-protocol/schema/json/ServerNotification.json index 37f7ff8c5f..8ca93137c6 100644 --- a/codex-rs/app-server-protocol/schema/json/ServerNotification.json +++ b/codex-rs/app-server-protocol/schema/json/ServerNotification.json @@ -1206,6 +1206,7 @@ "HookEventName": { "enum": [ "preToolUse", + "postToolUse", "sessionStart", "userPromptSubmit", "stop" diff --git a/codex-rs/app-server-protocol/schema/json/ServerRequest.json b/codex-rs/app-server-protocol/schema/json/ServerRequest.json index 1fbbfb1b0a..7c11a4c02b 100644 --- a/codex-rs/app-server-protocol/schema/json/ServerRequest.json +++ b/codex-rs/app-server-protocol/schema/json/ServerRequest.json @@ -28,41 +28,6 @@ }, "type": "object" }, - "AdditionalMacOsPermissions": { - "properties": { - "accessibility": { - "type": "boolean" - }, - "automations": { - "$ref": "#/definitions/MacOsAutomationPermission" - }, - "calendar": { - "type": "boolean" - }, - "contacts": { - "$ref": "#/definitions/MacOsContactsPermission" - }, - "launchServices": { - "type": "boolean" - }, - "preferences": { - "$ref": "#/definitions/MacOsPreferencesPermission" - }, - "reminders": { - "type": "boolean" - } - }, - "required": [ - "accessibility", - "automations", - "calendar", - "contacts", - "launchServices", - "preferences", - "reminders" - ], - "type": "object" - }, "AdditionalNetworkPermissions": { "properties": { "enabled": { @@ -86,16 +51,6 @@ } ] }, - "macos": { - "anyOf": [ - { - "$ref": "#/definitions/AdditionalMacOsPermissions" - }, - { - "type": "null" - } - ] - }, "network": { "anyOf": [ { @@ -452,17 +407,6 @@ ], "type": "object" }, - "CommandExecutionRequestApprovalSkillMetadata": { - "properties": { - "pathToSkillsMd": { - "type": "string" - } - }, - "required": [ - "pathToSkillsMd" - ], - "type": "object" - }, "DynamicToolCallParams": { "properties": { "arguments": true, @@ -638,49 +582,6 @@ ], "type": "object" }, - "MacOsAutomationPermission": { - "oneOf": [ - { - "enum": [ - "none", - "all" - ], - "type": "string" - }, - { - "additionalProperties": false, - "properties": { - "bundle_ids": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "bundle_ids" - ], - "title": "BundleIdsMacOsAutomationPermission", - "type": "object" - } - ] - }, - "MacOsContactsPermission": { - "enum": [ - "none", - "read_only", - "read_write" - ], - "type": "string" - }, - "MacOsPreferencesPermission": { - "enum": [ - "none", - "read_only", - "read_write" - ], - "type": "string" - }, "McpElicitationArrayType": { "enum": [ "array" diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json index 0819c7a7fd..e880be9ac7 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json @@ -28,41 +28,6 @@ }, "type": "object" }, - "AdditionalMacOsPermissions": { - "properties": { - "accessibility": { - "type": "boolean" - }, - "automations": { - "$ref": "#/definitions/MacOsAutomationPermission" - }, - "calendar": { - "type": "boolean" - }, - "contacts": { - "$ref": "#/definitions/MacOsContactsPermission" - }, - "launchServices": { - "type": "boolean" - }, - "preferences": { - "$ref": "#/definitions/MacOsPreferencesPermission" - }, - "reminders": { - "type": "boolean" - } - }, - "required": [ - "accessibility", - "automations", - "calendar", - "contacts", - "launchServices", - "preferences", - "reminders" - ], - "type": "object" - }, "AdditionalNetworkPermissions": { "properties": { "enabled": { @@ -86,16 +51,6 @@ } ] }, - "macos": { - "anyOf": [ - { - "$ref": "#/definitions/AdditionalMacOsPermissions" - }, - { - "type": "null" - } - ] - }, "network": { "anyOf": [ { @@ -1864,17 +1819,6 @@ "title": "CommandExecutionRequestApprovalResponse", "type": "object" }, - "CommandExecutionRequestApprovalSkillMetadata": { - "properties": { - "pathToSkillsMd": { - "type": "string" - } - }, - "required": [ - "pathToSkillsMd" - ], - "type": "object" - }, "DynamicToolCallParams": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -2479,49 +2423,6 @@ "title": "JSONRPCResponse", "type": "object" }, - "MacOsAutomationPermission": { - "oneOf": [ - { - "enum": [ - "none", - "all" - ], - "type": "string" - }, - { - "additionalProperties": false, - "properties": { - "bundle_ids": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "bundle_ids" - ], - "title": "BundleIdsMacOsAutomationPermission", - "type": "object" - } - ] - }, - "MacOsContactsPermission": { - "enum": [ - "none", - "read_only", - "read_write" - ], - "type": "string" - }, - "MacOsPreferencesPermission": { - "enum": [ - "none", - "read_only", - "read_write" - ], - "type": "string" - }, "McpElicitationArrayType": { "enum": [ "array" @@ -8221,6 +8122,7 @@ "HookEventName": { "enum": [ "preToolUse", + "postToolUse", "sessionStart", "userPromptSubmit", "stop" diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json index 8a542f608c..824fd3701e 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json @@ -4892,6 +4892,7 @@ "HookEventName": { "enum": [ "preToolUse", + "postToolUse", "sessionStart", "userPromptSubmit", "stop" diff --git a/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json index 881c343601..bce797086c 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json @@ -4,6 +4,7 @@ "HookEventName": { "enum": [ "preToolUse", + "postToolUse", "sessionStart", "userPromptSubmit", "stop" diff --git a/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json index 18fdb5008d..72f32d0d9d 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json @@ -4,6 +4,7 @@ "HookEventName": { "enum": [ "preToolUse", + "postToolUse", "sessionStart", "userPromptSubmit", "stop" diff --git a/codex-rs/app-server-protocol/schema/typescript/MacOsAutomationPermission.ts b/codex-rs/app-server-protocol/schema/typescript/MacOsAutomationPermission.ts deleted file mode 100644 index 31036b23ee..0000000000 --- a/codex-rs/app-server-protocol/schema/typescript/MacOsAutomationPermission.ts +++ /dev/null @@ -1,5 +0,0 @@ -// GENERATED CODE! DO NOT MODIFY BY HAND! - -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. - -export type MacOsAutomationPermission = "none" | "all" | { "bundle_ids": Array }; diff --git a/codex-rs/app-server-protocol/schema/typescript/MacOsContactsPermission.ts b/codex-rs/app-server-protocol/schema/typescript/MacOsContactsPermission.ts deleted file mode 100644 index dd6d7b59ef..0000000000 --- a/codex-rs/app-server-protocol/schema/typescript/MacOsContactsPermission.ts +++ /dev/null @@ -1,5 +0,0 @@ -// GENERATED CODE! DO NOT MODIFY BY HAND! - -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. - -export type MacOsContactsPermission = "none" | "read_only" | "read_write"; diff --git a/codex-rs/app-server-protocol/schema/typescript/MacOsPreferencesPermission.ts b/codex-rs/app-server-protocol/schema/typescript/MacOsPreferencesPermission.ts deleted file mode 100644 index 2f5234a268..0000000000 --- a/codex-rs/app-server-protocol/schema/typescript/MacOsPreferencesPermission.ts +++ /dev/null @@ -1,5 +0,0 @@ -// GENERATED CODE! DO NOT MODIFY BY HAND! - -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. - -export type MacOsPreferencesPermission = "none" | "read_only" | "read_write"; diff --git a/codex-rs/app-server-protocol/schema/typescript/index.ts b/codex-rs/app-server-protocol/schema/typescript/index.ts index 65defba9f0..9b53219ea5 100644 --- a/codex-rs/app-server-protocol/schema/typescript/index.ts +++ b/codex-rs/app-server-protocol/schema/typescript/index.ts @@ -41,9 +41,6 @@ export type { InputModality } from "./InputModality"; export type { LocalShellAction } from "./LocalShellAction"; export type { LocalShellExecAction } from "./LocalShellExecAction"; export type { LocalShellStatus } from "./LocalShellStatus"; -export type { MacOsAutomationPermission } from "./MacOsAutomationPermission"; -export type { MacOsContactsPermission } from "./MacOsContactsPermission"; -export type { MacOsPreferencesPermission } from "./MacOsPreferencesPermission"; export type { MessagePhase } from "./MessagePhase"; export type { ModeKind } from "./ModeKind"; export type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalMacOsPermissions.ts b/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalMacOsPermissions.ts deleted file mode 100644 index 177661bb0e..0000000000 --- a/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalMacOsPermissions.ts +++ /dev/null @@ -1,8 +0,0 @@ -// GENERATED CODE! DO NOT MODIFY BY HAND! - -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { MacOsAutomationPermission } from "../MacOsAutomationPermission"; -import type { MacOsContactsPermission } from "../MacOsContactsPermission"; -import type { MacOsPreferencesPermission } from "../MacOsPreferencesPermission"; - -export type AdditionalMacOsPermissions = { preferences: MacOsPreferencesPermission, automations: MacOsAutomationPermission, launchServices: boolean, accessibility: boolean, calendar: boolean, reminders: boolean, contacts: MacOsContactsPermission, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalPermissionProfile.ts b/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalPermissionProfile.ts index 701ba54316..65836c119d 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalPermissionProfile.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/AdditionalPermissionProfile.ts @@ -2,7 +2,6 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. import type { AdditionalFileSystemPermissions } from "./AdditionalFileSystemPermissions"; -import type { AdditionalMacOsPermissions } from "./AdditionalMacOsPermissions"; import type { AdditionalNetworkPermissions } from "./AdditionalNetworkPermissions"; -export type AdditionalPermissionProfile = { network: AdditionalNetworkPermissions | null, fileSystem: AdditionalFileSystemPermissions | null, macos: AdditionalMacOsPermissions | null, }; +export type AdditionalPermissionProfile = { network: AdditionalNetworkPermissions | null, fileSystem: AdditionalFileSystemPermissions | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts index 623fb971c1..8fb6375e69 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts @@ -4,7 +4,6 @@ import type { AdditionalPermissionProfile } from "./AdditionalPermissionProfile"; import type { CommandAction } from "./CommandAction"; import type { CommandExecutionApprovalDecision } from "./CommandExecutionApprovalDecision"; -import type { CommandExecutionRequestApprovalSkillMetadata } from "./CommandExecutionRequestApprovalSkillMetadata"; import type { ExecPolicyAmendment } from "./ExecPolicyAmendment"; import type { NetworkApprovalContext } from "./NetworkApprovalContext"; import type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment"; @@ -44,10 +43,6 @@ commandActions?: Array | null, * Optional additional permissions requested for this command. */ additionalPermissions?: AdditionalPermissionProfile | null, -/** - * Optional skill metadata when the approval was triggered by a skill script. - */ -skillMetadata?: CommandExecutionRequestApprovalSkillMetadata | null, /** * Optional proposed execpolicy amendment to allow similar commands without prompting. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalSkillMetadata.ts b/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalSkillMetadata.ts deleted file mode 100644 index dcfd7c2916..0000000000 --- a/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalSkillMetadata.ts +++ /dev/null @@ -1,5 +0,0 @@ -// GENERATED CODE! DO NOT MODIFY BY HAND! - -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. - -export type CommandExecutionRequestApprovalSkillMetadata = { pathToSkillsMd: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/HookEventName.ts b/codex-rs/app-server-protocol/schema/typescript/v2/HookEventName.ts index b75ee3930a..b97c709b98 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/HookEventName.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/HookEventName.ts @@ -2,4 +2,4 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -export type HookEventName = "preToolUse" | "sessionStart" | "userPromptSubmit" | "stop"; +export type HookEventName = "preToolUse" | "postToolUse" | "sessionStart" | "userPromptSubmit" | "stop"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/index.ts b/codex-rs/app-server-protocol/schema/typescript/v2/index.ts index d0b1b88198..73fe05eaeb 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/index.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/index.ts @@ -5,7 +5,6 @@ export type { AccountLoginCompletedNotification } from "./AccountLoginCompletedN export type { AccountRateLimitsUpdatedNotification } from "./AccountRateLimitsUpdatedNotification"; export type { AccountUpdatedNotification } from "./AccountUpdatedNotification"; export type { AdditionalFileSystemPermissions } from "./AdditionalFileSystemPermissions"; -export type { AdditionalMacOsPermissions } from "./AdditionalMacOsPermissions"; export type { AdditionalNetworkPermissions } from "./AdditionalNetworkPermissions"; export type { AdditionalPermissionProfile } from "./AdditionalPermissionProfile"; export type { AgentMessageDeltaNotification } from "./AgentMessageDeltaNotification"; @@ -54,7 +53,6 @@ export type { CommandExecutionApprovalDecision } from "./CommandExecutionApprova export type { CommandExecutionOutputDeltaNotification } from "./CommandExecutionOutputDeltaNotification"; export type { CommandExecutionRequestApprovalParams } from "./CommandExecutionRequestApprovalParams"; export type { CommandExecutionRequestApprovalResponse } from "./CommandExecutionRequestApprovalResponse"; -export type { CommandExecutionRequestApprovalSkillMetadata } from "./CommandExecutionRequestApprovalSkillMetadata"; export type { CommandExecutionSource } from "./CommandExecutionSource"; export type { CommandExecutionStatus } from "./CommandExecutionStatus"; export type { Config } from "./Config"; diff --git a/codex-rs/app-server-protocol/src/export.rs b/codex-rs/app-server-protocol/src/export.rs index b89f23c666..033f605973 100644 --- a/codex-rs/app-server-protocol/src/export.rs +++ b/codex-rs/app-server-protocol/src/export.rs @@ -2298,10 +2298,6 @@ mod tests { command_execution_request_approval_ts.contains("additionalPermissions"), true ); - assert_eq!( - command_execution_request_approval_ts.contains("skillMetadata"), - true - ); Ok(()) } @@ -2705,10 +2701,6 @@ export type Config = { stableField: Keep, unstableField: string | null } & ({ [k command_execution_request_approval_json.contains("additionalPermissions"), false ); - assert_eq!( - command_execution_request_approval_json.contains("skillMetadata"), - false - ); let client_request_json = fs::read_to_string(output_dir.join("ClientRequest.json"))?; assert_eq!( @@ -2721,7 +2713,6 @@ export type Config = { stableField: Keep, unstableField: string | null } & ({ [k fs::read_to_string(output_dir.join("codex_app_server_protocol.schemas.json"))?; assert_eq!(bundle_json.contains("mockExperimentalField"), false); assert_eq!(bundle_json.contains("additionalPermissions"), false); - assert_eq!(bundle_json.contains("skillMetadata"), false); assert_eq!(bundle_json.contains("MockExperimentalMethodParams"), false); assert_eq!( bundle_json.contains("MockExperimentalMethodResponse"), @@ -2731,7 +2722,6 @@ export type Config = { stableField: Keep, unstableField: string | null } & ({ [k fs::read_to_string(output_dir.join("codex_app_server_protocol.v2.schemas.json"))?; assert_eq!(flat_v2_bundle_json.contains("mockExperimentalField"), false); assert_eq!(flat_v2_bundle_json.contains("additionalPermissions"), false); - assert_eq!(flat_v2_bundle_json.contains("skillMetadata"), false); assert_eq!( flat_v2_bundle_json.contains("MockExperimentalMethodParams"), false diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index 5a9215f2ee..6bd44562e0 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -120,6 +120,41 @@ macro_rules! client_request_definitions { } } + /// Typed response from the server to the client. + #[derive(Serialize, Deserialize, Debug, Clone)] + #[serde(tag = "method", rename_all = "camelCase")] + pub enum ClientResponse { + $( + $(#[doc = $variant_doc])* + $(#[serde(rename = $wire)])? + $variant { + #[serde(rename = "id")] + request_id: RequestId, + response: $response, + }, + )* + } + + impl ClientResponse { + pub fn id(&self) -> &RequestId { + match self { + $(Self::$variant { request_id, .. } => request_id,)* + } + } + + pub fn method(&self) -> String { + serde_json::to_value(self) + .ok() + .and_then(|value| { + value + .get("method") + .and_then(serde_json::Value::as_str) + .map(str::to_owned) + }) + .unwrap_or_else(|| "".to_string()) + } + } + impl crate::experimental_api::ExperimentalApi for ClientRequest { fn experimental_reason(&self) -> Option<&'static str> { match self { @@ -1265,6 +1300,84 @@ mod tests { Ok(()) } + #[test] + fn serialize_client_response() -> Result<()> { + let response = ClientResponse::ThreadStart { + request_id: RequestId::Integer(7), + response: v2::ThreadStartResponse { + thread: v2::Thread { + id: "67e55044-10b1-426f-9247-bb680e5fe0c8".to_string(), + preview: "first prompt".to_string(), + ephemeral: true, + model_provider: "openai".to_string(), + created_at: 1, + updated_at: 2, + status: v2::ThreadStatus::Idle, + path: None, + cwd: PathBuf::from("/tmp"), + cli_version: "0.0.0".to_string(), + source: v2::SessionSource::Exec, + agent_nickname: None, + agent_role: None, + git_info: None, + name: None, + turns: Vec::new(), + }, + model: "gpt-5".to_string(), + model_provider: "openai".to_string(), + service_tier: None, + cwd: PathBuf::from("/tmp"), + approval_policy: v2::AskForApproval::OnFailure, + approvals_reviewer: v2::ApprovalsReviewer::User, + sandbox: v2::SandboxPolicy::DangerFullAccess, + reasoning_effort: None, + }, + }; + + assert_eq!(response.id(), &RequestId::Integer(7)); + assert_eq!(response.method(), "thread/start"); + assert_eq!( + json!({ + "method": "thread/start", + "id": 7, + "response": { + "thread": { + "id": "67e55044-10b1-426f-9247-bb680e5fe0c8", + "preview": "first prompt", + "ephemeral": true, + "modelProvider": "openai", + "createdAt": 1, + "updatedAt": 2, + "status": { + "type": "idle" + }, + "path": null, + "cwd": "/tmp", + "cliVersion": "0.0.0", + "source": "exec", + "agentNickname": null, + "agentRole": null, + "gitInfo": null, + "name": null, + "turns": [] + }, + "model": "gpt-5", + "modelProvider": "openai", + "serviceTier": null, + "cwd": "/tmp", + "approvalPolicy": "on-failure", + "approvalsReviewer": "user", + "sandbox": { + "type": "dangerFullAccess" + }, + "reasoningEffort": null + } + }), + serde_json::to_value(&response)?, + ); + Ok(()) + } + #[test] fn serialize_config_requirements_read() -> Result<()> { let request = ClientRequest::ConfigRequirementsRead { @@ -1703,9 +1816,7 @@ mod tests { read: Some(vec![absolute_path("/tmp/allowed")]), write: None, }), - macos: None, }), - skill_metadata: None, proposed_execpolicy_amendment: None, proposed_network_policy_amendments: None, available_decisions: None, @@ -1716,31 +1827,4 @@ mod tests { Some("item/commandExecution/requestApproval.additionalPermissions") ); } - - #[test] - fn command_execution_request_approval_skill_metadata_is_marked_experimental() { - let params = v2::CommandExecutionRequestApprovalParams { - thread_id: "thr_123".to_string(), - turn_id: "turn_123".to_string(), - item_id: "call_123".to_string(), - approval_id: None, - reason: None, - network_approval_context: None, - command: Some("cat file".to_string()), - cwd: None, - command_actions: None, - additional_permissions: None, - skill_metadata: Some(v2::CommandExecutionRequestApprovalSkillMetadata { - path_to_skills_md: PathBuf::from("/tmp/SKILLS.md"), - }), - proposed_execpolicy_amendment: None, - proposed_network_policy_amendments: None, - available_decisions: None, - }; - let reason = crate::experimental_api::ExperimentalApi::experimental_reason(¶ms); - assert_eq!( - reason, - Some("item/commandExecution/requestApproval.skillMetadata") - ); - } } diff --git a/codex-rs/app-server-protocol/src/protocol/v2.rs b/codex-rs/app-server-protocol/src/protocol/v2.rs index c699d32fdf..f666df8a5d 100644 --- a/codex-rs/app-server-protocol/src/protocol/v2.rs +++ b/codex-rs/app-server-protocol/src/protocol/v2.rs @@ -7,7 +7,6 @@ use crate::protocol::common::AuthMode; use codex_experimental_api_macros::ExperimentalApi; use codex_protocol::account::PlanType; use codex_protocol::approvals::ElicitationRequest as CoreElicitationRequest; -use codex_protocol::approvals::ExecApprovalRequestSkillMetadata as CoreExecApprovalRequestSkillMetadata; use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment; use codex_protocol::approvals::NetworkApprovalContext as CoreNetworkApprovalContext; use codex_protocol::approvals::NetworkApprovalProtocol as CoreNetworkApprovalProtocol; @@ -33,10 +32,6 @@ use codex_protocol::mcp::Tool as McpTool; use codex_protocol::memory_citation::MemoryCitation as CoreMemoryCitation; use codex_protocol::memory_citation::MemoryCitationEntry as CoreMemoryCitationEntry; use codex_protocol::models::FileSystemPermissions as CoreFileSystemPermissions; -use codex_protocol::models::MacOsAutomationPermission as CoreMacOsAutomationPermission; -use codex_protocol::models::MacOsContactsPermission as CoreMacOsContactsPermission; -use codex_protocol::models::MacOsPreferencesPermission as CoreMacOsPreferencesPermission; -use codex_protocol::models::MacOsSeatbeltProfileExtensions as CoreMacOsSeatbeltProfileExtensions; use codex_protocol::models::MessagePhase; use codex_protocol::models::NetworkPermissions as CoreNetworkPermissions; use codex_protocol::models::PermissionProfile as CorePermissionProfile; @@ -378,7 +373,7 @@ v2_enum_from_core!( v2_enum_from_core!( pub enum HookEventName from CoreHookEventName { - PreToolUse, SessionStart, UserPromptSubmit, Stop + PreToolUse, PostToolUse, SessionStart, UserPromptSubmit, Stop } ); @@ -1087,47 +1082,6 @@ impl From for CoreFileSystemPermissions { } } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] -#[serde(rename_all = "camelCase")] -#[ts(export_to = "v2/")] -pub struct AdditionalMacOsPermissions { - pub preferences: CoreMacOsPreferencesPermission, - pub automations: CoreMacOsAutomationPermission, - pub launch_services: bool, - pub accessibility: bool, - pub calendar: bool, - pub reminders: bool, - pub contacts: CoreMacOsContactsPermission, -} - -impl From for AdditionalMacOsPermissions { - fn from(value: CoreMacOsSeatbeltProfileExtensions) -> Self { - Self { - preferences: value.macos_preferences, - automations: value.macos_automation, - launch_services: value.macos_launch_services, - accessibility: value.macos_accessibility, - calendar: value.macos_calendar, - reminders: value.macos_reminders, - contacts: value.macos_contacts, - } - } -} - -impl From for CoreMacOsSeatbeltProfileExtensions { - fn from(value: AdditionalMacOsPermissions) -> Self { - Self { - macos_preferences: value.preferences, - macos_automation: value.automations, - macos_launch_services: value.launch_services, - macos_accessibility: value.accessibility, - macos_calendar: value.calendar, - macos_reminders: value.reminders, - macos_contacts: value.contacts, - } - } -} - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -1184,7 +1138,6 @@ impl From for CoreRequestPermissionProfile { pub struct AdditionalPermissionProfile { pub network: Option, pub file_system: Option, - pub macos: Option, } impl From for AdditionalPermissionProfile { @@ -1192,7 +1145,6 @@ impl From for AdditionalPermissionProfile { Self { network: value.network.map(AdditionalNetworkPermissions::from), file_system: value.file_system.map(AdditionalFileSystemPermissions::from), - macos: value.macos.map(AdditionalMacOsPermissions::from), } } } @@ -1202,7 +1154,6 @@ impl From for CorePermissionProfile { Self { network: value.network.map(CoreNetworkPermissions::from), file_system: value.file_system.map(CoreFileSystemPermissions::from), - macos: value.macos.map(CoreMacOsSeatbeltProfileExtensions::from), } } } @@ -1224,7 +1175,6 @@ impl From for CorePermissionProfile { Self { network: value.network.map(CoreNetworkPermissions::from), file_system: value.file_system.map(CoreFileSystemPermissions::from), - macos: None, } } } @@ -3520,14 +3470,6 @@ impl From for SkillMetadata { } } -impl From for CommandExecutionRequestApprovalSkillMetadata { - fn from(value: CoreExecApprovalRequestSkillMetadata) -> Self { - Self { - path_to_skills_md: value.path_to_skills_md, - } - } -} - impl From for SkillInterface { fn from(value: CoreSkillInterface) -> Self { Self { @@ -5245,11 +5187,6 @@ pub struct CommandExecutionRequestApprovalParams { #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional = nullable)] pub additional_permissions: Option, - /// Optional skill metadata when the approval was triggered by a skill script. - #[experimental("item/commandExecution/requestApproval.skillMetadata")] - #[serde(default, skip_serializing_if = "Option::is_none")] - #[ts(optional = nullable)] - pub skill_metadata: Option, /// Optional proposed execpolicy amendment to allow similar commands without prompting. #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional = nullable)] @@ -5271,17 +5208,9 @@ impl CommandExecutionRequestApprovalParams { // We need a generic outbound compatibility design for stripping or // otherwise handling experimental server->client payloads. self.additional_permissions = None; - self.skill_metadata = None; } } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] -#[serde(rename_all = "camelCase")] -#[ts(export_to = "v2/")] -pub struct CommandExecutionRequestApprovalSkillMetadata { - pub path_to_skills_md: PathBuf, -} - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -6102,10 +6031,8 @@ mod tests { "fileSystem": { "read": ["relative/path"], "write": null - }, - "macos": null + } }, - "skillMetadata": null, "proposedExecpolicyAmendment": null, "proposedNetworkPolicyAmendments": null, "availableDecisions": null @@ -6118,121 +6045,6 @@ mod tests { ); } - #[test] - fn command_execution_request_approval_accepts_macos_automation_bundle_ids_object() { - let params = serde_json::from_value::(json!({ - "threadId": "thr_123", - "turnId": "turn_123", - "itemId": "call_123", - "command": "cat file", - "cwd": "/tmp", - "commandActions": null, - "reason": null, - "networkApprovalContext": null, - "additionalPermissions": { - "network": null, - "fileSystem": null, - "macos": { - "preferences": "read_only", - "automations": { - "bundle_ids": ["com.apple.Notes"] - }, - "launchServices": false, - "accessibility": false, - "calendar": false, - "reminders": false, - "contacts": "read_only" - } - }, - "skillMetadata": null, - "proposedExecpolicyAmendment": null, - "proposedNetworkPolicyAmendments": null, - "availableDecisions": null - })) - .expect("bundle_ids object should deserialize"); - - assert_eq!( - params - .additional_permissions - .and_then(|permissions| permissions.macos) - .map(|macos| (macos.automations, macos.launch_services, macos.contacts)), - Some(( - CoreMacOsAutomationPermission::BundleIds(vec!["com.apple.Notes".to_string(),]), - false, - CoreMacOsContactsPermission::ReadOnly, - )) - ); - } - - #[test] - fn command_execution_request_approval_accepts_macos_reminders_permission() { - let params = serde_json::from_value::(json!({ - "threadId": "thr_123", - "turnId": "turn_123", - "itemId": "call_123", - "command": "cat file", - "cwd": "/tmp", - "commandActions": null, - "reason": null, - "networkApprovalContext": null, - "additionalPermissions": { - "network": null, - "fileSystem": null, - "macos": { - "preferences": "read_only", - "automations": "none", - "launchServices": false, - "accessibility": false, - "calendar": false, - "reminders": true, - "contacts": "none" - } - }, - "skillMetadata": null, - "proposedExecpolicyAmendment": null, - "proposedNetworkPolicyAmendments": null, - "availableDecisions": null - })) - .expect("reminders permission should deserialize"); - - assert_eq!( - params - .additional_permissions - .and_then(|permissions| permissions.macos) - .map(|macos| macos.reminders), - Some(true) - ); - } - - #[test] - fn command_execution_request_approval_accepts_skill_metadata() { - let params = serde_json::from_value::(json!({ - "threadId": "thr_123", - "turnId": "turn_123", - "itemId": "call_123", - "command": "cat file", - "cwd": "/tmp", - "commandActions": null, - "reason": null, - "networkApprovalContext": null, - "additionalPermissions": null, - "skillMetadata": { - "pathToSkillsMd": "/tmp/SKILLS.md" - }, - "proposedExecpolicyAmendment": null, - "proposedNetworkPolicyAmendments": null, - "availableDecisions": null - })) - .expect("skill metadata should deserialize"); - - assert_eq!( - params.skill_metadata, - Some(CommandExecutionRequestApprovalSkillMetadata { - path_to_skills_md: PathBuf::from("/tmp/SKILLS.md"), - }) - ); - } - #[test] fn permissions_request_approval_uses_request_permission_profile() { let read_only_path = if cfg!(windows) { @@ -6390,7 +6202,6 @@ mod tests { .expect("path must be absolute"), ]), }), - macos: None, } ); } diff --git a/codex-rs/app-server-test-client/src/lib.rs b/codex-rs/app-server-test-client/src/lib.rs index 05e193d484..ccbd681587 100644 --- a/codex-rs/app-server-test-client/src/lib.rs +++ b/codex-rs/app-server-test-client/src/lib.rs @@ -1923,7 +1923,6 @@ impl CodexClient { cwd, command_actions, additional_permissions, - skill_metadata, proposed_execpolicy_amendment, proposed_network_policy_amendments, available_decisions, @@ -1958,9 +1957,6 @@ impl CodexClient { if let Some(additional_permissions) = additional_permissions.as_ref() { println!("< additional permissions: {additional_permissions:?}"); } - if let Some(skill_metadata) = skill_metadata.as_ref() { - println!("< skill metadata: {skill_metadata:?}"); - } if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() { println!("< proposed execpolicy amendment: {execpolicy_amendment:?}"); } diff --git a/codex-rs/app-server/Cargo.toml b/codex-rs/app-server/Cargo.toml index f54bb7ad03..ee12e87acb 100644 --- a/codex-rs/app-server/Cargo.toml +++ b/codex-rs/app-server/Cargo.toml @@ -49,6 +49,7 @@ codex-feedback = { workspace = true } codex-rmcp-client = { workspace = true } codex-sandboxing = { workspace = true } codex-state = { workspace = true } +codex-tools = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-utils-json-to-toml = { workspace = true } chrono = { workspace = true } diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index a1856af46d..cabea022a1 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -1012,9 +1012,14 @@ Order of messages: `turnId` is best-effort. When the elicitation is correlated with an active turn, the request includes that turn id; otherwise it is `null`. +For MCP tool approval elicitations, form request `meta` includes +`codex_approval_kind: "mcp_tool_call"` and may include `persist: "session"`, +`persist: "always"`, or `persist: ["session", "always"]` to advertise whether +the client can offer session-scoped and/or persistent approval choices. + ### Permission requests -The built-in `request_permissions` tool sends an `item/permissions/requestApproval` JSON-RPC request to the client with the requested permission profile. This v2 payload mirrors the standalone tool's narrower permission shape, so it can request network access and additional filesystem access but does not include the broader `macos` branch used by command-execution `additionalPermissions`. +The built-in `request_permissions` tool sends an `item/permissions/requestApproval` JSON-RPC request to the client with the requested permission profile. This v2 payload mirrors the command-execution `additionalPermissions` shape: it can request network access and additional filesystem access. ```json { @@ -1050,7 +1055,7 @@ The client responds with `result.permissions`, which should be the granted subse } ``` -Only the granted subset matters on the wire. Any permissions omitted from `result.permissions` are treated as denied, including omitted nested keys inside `result.permissions.macos`, so a sparse response like `{ "permissions": { "macos": { "accessibility": true } } }` grants only accessibility. Any permissions not present in the original request are ignored by the server. +Only the granted subset matters on the wire. Any permissions omitted from `result.permissions` are treated as denied. Any permissions not present in the original request are ignored by the server. Within the same turn, granted permissions are sticky: later shell-like tool calls can automatically reuse the granted subset without reissuing a separate permission request. diff --git a/codex-rs/app-server/src/bespoke_event_handling.rs b/codex-rs/app-server/src/bespoke_event_handling.rs index 15484d3d5f..9a850c0b1e 100644 --- a/codex-rs/app-server/src/bespoke_event_handling.rs +++ b/codex-rs/app-server/src/bespoke_event_handling.rs @@ -26,7 +26,6 @@ use codex_app_server_protocol::CommandExecutionApprovalDecision; use codex_app_server_protocol::CommandExecutionOutputDeltaNotification; use codex_app_server_protocol::CommandExecutionRequestApprovalParams; use codex_app_server_protocol::CommandExecutionRequestApprovalResponse; -use codex_app_server_protocol::CommandExecutionRequestApprovalSkillMetadata; use codex_app_server_protocol::CommandExecutionSource; use codex_app_server_protocol::CommandExecutionStatus; use codex_app_server_protocol::ContextCompactedNotification; @@ -608,7 +607,6 @@ pub(crate) async fn apply_bespoke_event_handling( proposed_execpolicy_amendment, proposed_network_policy_amendments, additional_permissions, - skill_metadata, parsed_cmd, .. } = ev; @@ -680,8 +678,6 @@ pub(crate) async fn apply_bespoke_event_handling( }); let additional_permissions = additional_permissions.map(V2AdditionalPermissionProfile::from); - let skill_metadata = - skill_metadata.map(CommandExecutionRequestApprovalSkillMetadata::from); let params = CommandExecutionRequestApprovalParams { thread_id: conversation_id.to_string(), @@ -694,7 +690,6 @@ pub(crate) async fn apply_bespoke_event_handling( cwd, command_actions, additional_permissions, - skill_metadata, proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2, proposed_network_policy_amendments: proposed_network_policy_amendments_v2, available_decisions: Some(available_decisions), diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 252a5ef7b5..cbbd9e919a 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -545,6 +545,7 @@ impl CodexMessageProcessor { data: None, })?; apply_runtime_feature_enablement(&mut config, &self.current_runtime_feature_enablement()); + config.codex_self_exe = self.arg0_paths.codex_self_exe.clone(); config.codex_linux_sandbox_exe = self.arg0_paths.codex_linux_sandbox_exe.clone(); config.main_execve_wrapper_exe = self.arg0_paths.main_execve_wrapper_exe.clone(); Ok(config) @@ -1411,7 +1412,12 @@ impl CodexMessageProcessor { requires_openai_auth: Some(false), } } else { - match self.auth_manager.auth().await { + let auth = if do_refresh { + self.auth_manager.auth_cached() + } else { + self.auth_manager.auth().await + }; + match auth { Some(auth) => { let permanent_refresh_failure = self.auth_manager.refresh_failure_for_auth(&auth).is_some(); @@ -7948,7 +7954,7 @@ fn validate_dynamic_tools(tools: &[ApiDynamicToolSpec]) -> Result<(), String> { return Err(format!("duplicate dynamic tool name: {name}")); } - if let Err(err) = codex_core::parse_tool_input_schema(&tool.input_schema) { + if let Err(err) = codex_tools::parse_tool_input_schema(&tool.input_schema) { return Err(format!( "dynamic tool input schema is not supported for {name}: {err}" )); diff --git a/codex-rs/app-server/src/config_api.rs b/codex-rs/app-server/src/config_api.rs index 2be0bb859c..7f6acc54d1 100644 --- a/codex-rs/app-server/src/config_api.rs +++ b/codex-rs/app-server/src/config_api.rs @@ -130,7 +130,7 @@ impl ConfigApi { .unwrap_or_default() } - async fn load_latest_config( + pub(crate) async fn load_latest_config( &self, fallback_cwd: Option, ) -> Result { diff --git a/codex-rs/app-server/src/in_process.rs b/codex-rs/app-server/src/in_process.rs index 79f32c9083..0405c72259 100644 --- a/codex-rs/app-server/src/in_process.rs +++ b/codex-rs/app-server/src/in_process.rs @@ -77,6 +77,7 @@ use codex_arg0::Arg0DispatchPaths; use codex_core::config::Config; use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::LoaderOverrides; +use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; use codex_protocol::protocol::SessionSource; use tokio::sync::mpsc; @@ -383,6 +384,7 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { outgoing: Arc::clone(&processor_outgoing), arg0_paths: args.arg0_paths, config: args.config, + environment_manager: Arc::new(EnvironmentManager::from_env()), cli_overrides: args.cli_overrides, loader_overrides: args.loader_overrides, cloud_requirements: args.cloud_requirements, diff --git a/codex-rs/app-server/src/lib.rs b/codex-rs/app-server/src/lib.rs index 247270a26c..e85a8f1290 100644 --- a/codex-rs/app-server/src/lib.rs +++ b/codex-rs/app-server/src/lib.rs @@ -40,6 +40,7 @@ use codex_core::ExecPolicyError; use codex_core::check_execpolicy_for_warnings; use codex_core::config_loader::ConfigLoadError; use codex_core::config_loader::TextRange as CoreTextRange; +use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; use codex_protocol::protocol::SessionSource; use codex_state::log_db; @@ -355,44 +356,13 @@ pub async fn run_main_with_transport( session_source: SessionSource, auth: AppServerWebsocketAuthSettings, ) -> IoResult<()> { + let environment_manager = Arc::new(EnvironmentManager::from_env()); let (transport_event_tx, mut transport_event_rx) = mpsc::channel::(CHANNEL_CAPACITY); let (outgoing_tx, mut outgoing_rx) = mpsc::channel::(CHANNEL_CAPACITY); let (outbound_control_tx, mut outbound_control_rx) = mpsc::channel::(CHANNEL_CAPACITY); - enum TransportRuntime { - Stdio, - WebSocket { - accept_handle: JoinHandle<()>, - shutdown_token: CancellationToken, - }, - } - - let mut stdio_handles = Vec::>::new(); - let transport_runtime = match transport { - AppServerTransport::Stdio => { - start_stdio_connection(transport_event_tx.clone(), &mut stdio_handles).await?; - TransportRuntime::Stdio - } - AppServerTransport::WebSocket { bind_address } => { - let shutdown_token = CancellationToken::new(); - let accept_handle = start_websocket_acceptor( - bind_address, - transport_event_tx.clone(), - shutdown_token.clone(), - policy_from_settings(&auth)?, - ) - .await?; - TransportRuntime::WebSocket { - accept_handle, - shutdown_token, - } - } - }; - let single_client_mode = matches!(&transport_runtime, TransportRuntime::Stdio); - let shutdown_when_no_connections = single_client_mode; - let graceful_signal_restart_enabled = !single_client_mode; // Parse CLI overrides once and derive the base Config eagerly so later // components do not need to work with raw TOML values. let cli_kv_overrides = cli_config_overrides.parse_overrides().map_err(|e| { @@ -556,6 +526,30 @@ pub async fn run_main_with_transport( } } + let transport_shutdown_token = CancellationToken::new(); + let mut transport_accept_handles = Vec::>::new(); + + let single_client_mode = matches!(&transport, AppServerTransport::Stdio); + let shutdown_when_no_connections = single_client_mode; + let graceful_signal_restart_enabled = !single_client_mode; + + match transport { + AppServerTransport::Stdio => { + start_stdio_connection(transport_event_tx.clone(), &mut transport_accept_handles) + .await?; + } + AppServerTransport::WebSocket { bind_address } => { + let accept_handle = start_websocket_acceptor( + bind_address, + transport_event_tx.clone(), + transport_shutdown_token.clone(), + policy_from_settings(&auth)?, + ) + .await?; + transport_accept_handles.push(accept_handle); + } + } + let outbound_handle = tokio::spawn(async move { let mut outbound_connections = HashMap::::new(); loop { @@ -620,6 +614,7 @@ pub async fn run_main_with_transport( outgoing: outgoing_message_sender, arg0_paths, config: Arc::new(config), + environment_manager, cli_overrides, loader_overrides, cloud_requirements: cloud_requirements.clone(), @@ -632,10 +627,7 @@ pub async fn run_main_with_transport( let mut thread_created_rx = processor.thread_created_receiver(); let mut running_turn_count_rx = processor.subscribe_running_assistant_turn_count(); let mut connections = HashMap::::new(); - let websocket_accept_shutdown = match &transport_runtime { - TransportRuntime::WebSocket { shutdown_token, .. } => Some(shutdown_token.clone()), - TransportRuntime::Stdio => None, - }; + let transport_shutdown_token = transport_shutdown_token.clone(); async move { let mut listen_for_threads = true; let mut shutdown_state = ShutdownState::default(); @@ -648,9 +640,7 @@ pub async fn run_main_with_transport( shutdown_state.update(running_turn_count, connections.len()), ShutdownAction::Finish ) { - if let Some(shutdown_token) = &websocket_accept_shutdown { - shutdown_token.cancel(); - } + transport_shutdown_token.cancel(); let _ = outbound_control_tx .send(OutboundControlEvent::DisconnectAll) .await; @@ -844,16 +834,8 @@ pub async fn run_main_with_transport( let _ = processor_handle.await; let _ = outbound_handle.await; - if let TransportRuntime::WebSocket { - accept_handle, - shutdown_token, - } = transport_runtime - { - shutdown_token.cancel(); - let _ = accept_handle.await; - } - - for handle in stdio_handles { + transport_shutdown_token.cancel(); + for handle in transport_accept_handles { let _ = handle.await; } diff --git a/codex-rs/app-server/src/message_processor.rs b/codex-rs/app-server/src/message_processor.rs index 6c062d9e93..61dea0f7d9 100644 --- a/codex-rs/app-server/src/message_processor.rs +++ b/codex-rs/app-server/src/message_processor.rs @@ -20,6 +20,7 @@ use crate::outgoing_message::OutgoingMessageSender; use crate::outgoing_message::RequestContext; use crate::transport::AppServerTransport; use async_trait::async_trait; +use codex_app_server_protocol::AppListUpdatedNotification; use codex_app_server_protocol::ChatgptAuthTokensRefreshParams; use codex_app_server_protocol::ChatgptAuthTokensRefreshReason; use codex_app_server_protocol::ChatgptAuthTokensRefreshResponse; @@ -53,6 +54,7 @@ use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::ServerRequestPayload; use codex_app_server_protocol::experimental_required_message; use codex_arg0::Arg0DispatchPaths; +use codex_chatgpt::connectors; use codex_core::AnalyticsEventsClient; use codex_core::AuthManager; use codex_core::ThreadManager; @@ -66,6 +68,7 @@ use codex_core::default_client::get_codex_user_agent; use codex_core::default_client::set_default_client_residency_requirement; use codex_core::default_client::set_default_originator; use codex_core::models_manager::collaboration_mode_presets::CollaborationModesConfig; +use codex_exec_server::EnvironmentManager; use codex_features::Feature; use codex_feedback::CodexFeedback; use codex_login::auth::ExternalAuthRefreshContext; @@ -177,6 +180,7 @@ pub(crate) struct MessageProcessorArgs { pub(crate) outgoing: Arc, pub(crate) arg0_paths: Arg0DispatchPaths, pub(crate) config: Arc, + pub(crate) environment_manager: Arc, pub(crate) cli_overrides: Vec<(String, TomlValue)>, pub(crate) loader_overrides: LoaderOverrides, pub(crate) cloud_requirements: CloudRequirementsLoader, @@ -195,6 +199,7 @@ impl MessageProcessor { outgoing, arg0_paths, config, + environment_manager, cli_overrides, loader_overrides, cloud_requirements, @@ -218,6 +223,7 @@ impl MessageProcessor { .features .enabled(Feature::DefaultModeRequestUserInput), }, + environment_manager, )); auth_manager.set_forced_chatgpt_workspace_id(config.forced_chatgpt_workspace_id.clone()); auth_manager.set_external_auth_refresher(Arc::new(ExternalAuthRefreshBridge { @@ -882,13 +888,87 @@ impl MessageProcessor { request_id: ConnectionRequestId, params: ExperimentalFeatureEnablementSetParams, ) { - self.handle_config_mutation_result( - request_id, - self.config_api - .set_experimental_feature_enablement(params) - .await, - ) - .await; + let should_refresh_apps_list = params.enablement.get("apps").copied() == Some(true); + match self + .config_api + .set_experimental_feature_enablement(params) + .await + { + Ok(response) => { + self.codex_message_processor.clear_plugin_related_caches(); + self.codex_message_processor + .maybe_start_plugin_startup_tasks_for_latest_config() + .await; + self.outgoing.send_response(request_id, response).await; + if should_refresh_apps_list { + self.refresh_apps_list_after_experimental_feature_enablement_set() + .await; + } + } + Err(error) => self.outgoing.send_error(request_id, error).await, + } + } + + async fn refresh_apps_list_after_experimental_feature_enablement_set(&self) { + let config = match self + .config_api + .load_latest_config(/*fallback_cwd*/ None) + .await + { + Ok(config) => config, + Err(error) => { + tracing::warn!( + "failed to load config for apps list refresh after experimental feature enablement: {}", + error.message + ); + return; + } + }; + if !config.features.apps_enabled(Some(&self.auth_manager)).await { + return; + } + + let outgoing = Arc::clone(&self.outgoing); + tokio::spawn(async move { + let (all_connectors_result, accessible_connectors_result) = tokio::join!( + connectors::list_all_connectors_with_options(&config, /*force_refetch*/ true), + connectors::list_accessible_connectors_from_mcp_tools_with_options( + &config, /*force_refetch*/ true, + ), + ); + let all_connectors = match all_connectors_result { + Ok(connectors) => connectors, + Err(err) => { + tracing::warn!( + "failed to force-refresh directory apps after experimental feature enablement: {err:#}" + ); + return; + } + }; + let accessible_connectors = match accessible_connectors_result { + Ok(connectors) => connectors, + Err(err) => { + tracing::warn!( + "failed to force-refresh accessible apps after experimental feature enablement: {err:#}" + ); + return; + } + }; + + let data = connectors::with_app_enabled_state( + connectors::merge_connectors_with_accessible( + all_connectors, + accessible_connectors, + /*all_connectors_loaded*/ true, + ), + &config, + ); + outgoing + .send_server_notification(ServerNotification::AppListUpdated( + AppListUpdatedNotification { data }, + )) + .await; + }); } async fn handle_config_mutation_result( diff --git a/codex-rs/app-server/src/message_processor/tracing_tests.rs b/codex-rs/app-server/src/message_processor/tracing_tests.rs index 0b4ae47b34..bf048f2a98 100644 --- a/codex-rs/app-server/src/message_processor/tracing_tests.rs +++ b/codex-rs/app-server/src/message_processor/tracing_tests.rs @@ -24,6 +24,7 @@ use codex_core::config::Config; use codex_core::config::ConfigBuilder; use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::LoaderOverrides; +use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::W3cTraceContext; @@ -236,6 +237,7 @@ fn build_test_processor( outgoing, arg0_paths: Arg0DispatchPaths::default(), config, + environment_manager: Arc::new(EnvironmentManager::new(/*exec_server_url*/ None)), cli_overrides: Vec::new(), loader_overrides: LoaderOverrides::default(), cloud_requirements: CloudRequirementsLoader::default(), diff --git a/codex-rs/app-server/src/transport.rs b/codex-rs/app-server/src/transport/mod.rs similarity index 72% rename from codex-rs/app-server/src/transport.rs rename to codex-rs/app-server/src/transport/mod.rs index 020e441da5..c0653b903b 100644 --- a/codex-rs/app-server/src/transport.rs +++ b/codex-rs/app-server/src/transport/mod.rs @@ -1,8 +1,5 @@ pub(crate) mod auth; -use self::auth::WebsocketAuthPolicy; -use self::auth::authorize_upgrade; -use self::auth::should_warn_about_unauthenticated_non_loopback_listener; use crate::error_code::OVERLOADED_ERROR_CODE; use crate::message_processor::ConnectionSessionState; use crate::outgoing_message::ConnectionId; @@ -10,53 +7,20 @@ use crate::outgoing_message::OutgoingEnvelope; use crate::outgoing_message::OutgoingError; use crate::outgoing_message::OutgoingMessage; use crate::outgoing_message::QueuedOutgoingMessage; -use axum::Router; -use axum::body::Body; -use axum::extract::ConnectInfo; -use axum::extract::State; -use axum::extract::ws::Message as WebSocketMessage; -use axum::extract::ws::WebSocket; -use axum::extract::ws::WebSocketUpgrade; -use axum::http::HeaderMap; -use axum::http::Request; -use axum::http::StatusCode; -use axum::http::header::ORIGIN; -use axum::middleware; -use axum::middleware::Next; -use axum::response::IntoResponse; -use axum::response::Response; -use axum::routing::any; -use axum::routing::get; use codex_app_server_protocol::JSONRPCErrorError; use codex_app_server_protocol::JSONRPCMessage; use codex_app_server_protocol::ServerRequest; -use futures::SinkExt; -use futures::StreamExt; -use owo_colors::OwoColorize; -use owo_colors::Stream; -use owo_colors::Style; use std::collections::HashMap; use std::collections::HashSet; -use std::io::ErrorKind; -use std::io::Result as IoResult; use std::net::SocketAddr; use std::str::FromStr; use std::sync::Arc; use std::sync::RwLock; use std::sync::atomic::AtomicBool; -use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; -use tokio::io::AsyncBufReadExt; -use tokio::io::AsyncWriteExt; -use tokio::io::BufReader; -use tokio::io::{self}; -use tokio::net::TcpListener; use tokio::sync::mpsc; -use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; -use tracing::debug; use tracing::error; -use tracing::info; use tracing::warn; /// Size of the bounded channels used to communicate between tasks. The value @@ -64,85 +28,11 @@ use tracing::warn; /// plenty for an interactive CLI. pub(crate) const CHANNEL_CAPACITY: usize = 128; -fn colorize(text: &str, style: Style) -> String { - text.if_supports_color(Stream::Stderr, |value| value.style(style)) - .to_string() -} +mod stdio; +mod websocket; -#[allow(clippy::print_stderr)] -fn print_websocket_startup_banner(addr: SocketAddr) { - let title = colorize("codex app-server (WebSockets)", Style::new().bold().cyan()); - let listening_label = colorize("listening on:", Style::new().dimmed()); - let listen_url = colorize(&format!("ws://{addr}"), Style::new().green()); - let ready_label = colorize("readyz:", Style::new().dimmed()); - let ready_url = colorize(&format!("http://{addr}/readyz"), Style::new().green()); - let health_label = colorize("healthz:", Style::new().dimmed()); - let health_url = colorize(&format!("http://{addr}/healthz"), Style::new().green()); - let note_label = colorize("note:", Style::new().dimmed()); - eprintln!("{title}"); - eprintln!(" {listening_label} {listen_url}"); - eprintln!(" {ready_label} {ready_url}"); - eprintln!(" {health_label} {health_url}"); - if addr.ip().is_loopback() { - eprintln!( - " {note_label} binds localhost only (use SSH port-forwarding for remote access)" - ); - } else { - eprintln!( - " {note_label} websocket auth is opt-in in this build; configure `--ws-auth ...` before real remote use" - ); - } -} - -#[derive(Clone)] -struct WebSocketListenerState { - transport_event_tx: mpsc::Sender, - connection_counter: Arc, - auth_policy: Arc, -} - -async fn health_check_handler() -> StatusCode { - StatusCode::OK -} - -async fn reject_requests_with_origin_header( - request: Request, - next: Next, -) -> Result { - if request.headers().contains_key(ORIGIN) { - warn!( - method = %request.method(), - uri = %request.uri(), - "rejecting websocket listener request with Origin header" - ); - Err(StatusCode::FORBIDDEN) - } else { - Ok(next.run(request).await) - } -} - -async fn websocket_upgrade_handler( - websocket: WebSocketUpgrade, - ConnectInfo(peer_addr): ConnectInfo, - State(state): State, - headers: HeaderMap, -) -> impl IntoResponse { - if let Err(err) = authorize_upgrade(&headers, state.auth_policy.as_ref()) { - warn!( - %peer_addr, - message = err.message(), - "rejecting websocket client during upgrade" - ); - return (err.status_code(), err.message()).into_response(); - } - let connection_id = ConnectionId(state.connection_counter.fetch_add(1, Ordering::Relaxed)); - info!(%peer_addr, "websocket client connected"); - websocket - .on_upgrade(move |stream| async move { - run_websocket_connection(connection_id, stream, state.transport_event_tx).await; - }) - .into_response() -} +pub(crate) use stdio::start_stdio_connection; +pub(crate) use websocket::start_websocket_acceptor; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum AppServerTransport { @@ -276,262 +166,6 @@ impl OutboundConnectionState { } } -pub(crate) async fn start_stdio_connection( - transport_event_tx: mpsc::Sender, - stdio_handles: &mut Vec>, -) -> IoResult<()> { - let connection_id = ConnectionId(0); - let (writer_tx, mut writer_rx) = mpsc::channel::(CHANNEL_CAPACITY); - let writer_tx_for_reader = writer_tx.clone(); - transport_event_tx - .send(TransportEvent::ConnectionOpened { - connection_id, - writer: writer_tx, - disconnect_sender: None, - }) - .await - .map_err(|_| std::io::Error::new(ErrorKind::BrokenPipe, "processor unavailable"))?; - - let transport_event_tx_for_reader = transport_event_tx.clone(); - stdio_handles.push(tokio::spawn(async move { - let stdin = io::stdin(); - let reader = BufReader::new(stdin); - let mut lines = reader.lines(); - - loop { - match lines.next_line().await { - Ok(Some(line)) => { - if !forward_incoming_message( - &transport_event_tx_for_reader, - &writer_tx_for_reader, - connection_id, - &line, - ) - .await - { - break; - } - } - Ok(None) => break, - Err(err) => { - error!("Failed reading stdin: {err}"); - break; - } - } - } - - let _ = transport_event_tx_for_reader - .send(TransportEvent::ConnectionClosed { connection_id }) - .await; - debug!("stdin reader finished (EOF)"); - })); - - stdio_handles.push(tokio::spawn(async move { - let mut stdout = io::stdout(); - while let Some(queued_message) = writer_rx.recv().await { - let Some(mut json) = serialize_outgoing_message(queued_message.message) else { - continue; - }; - json.push('\n'); - if let Err(err) = stdout.write_all(json.as_bytes()).await { - error!("Failed to write to stdout: {err}"); - break; - } - if let Some(write_complete_tx) = queued_message.write_complete_tx { - let _ = write_complete_tx.send(()); - } - } - info!("stdout writer exited (channel closed)"); - })); - - Ok(()) -} - -pub(crate) async fn start_websocket_acceptor( - bind_address: SocketAddr, - transport_event_tx: mpsc::Sender, - shutdown_token: CancellationToken, - auth_policy: WebsocketAuthPolicy, -) -> IoResult> { - if should_warn_about_unauthenticated_non_loopback_listener(bind_address, &auth_policy) { - warn!( - %bind_address, - "starting non-loopback websocket listener without auth; websocket auth is opt-in for now and will become the default in a future release" - ); - } - let listener = TcpListener::bind(bind_address).await?; - let local_addr = listener.local_addr()?; - print_websocket_startup_banner(local_addr); - info!("app-server websocket listening on ws://{local_addr}"); - - let router = Router::new() - .route("/readyz", get(health_check_handler)) - .route("/healthz", get(health_check_handler)) - .fallback(any(websocket_upgrade_handler)) - .layer(middleware::from_fn(reject_requests_with_origin_header)) - .with_state(WebSocketListenerState { - transport_event_tx, - connection_counter: Arc::new(AtomicU64::new(1)), - auth_policy: Arc::new(auth_policy), - }); - let server = axum::serve( - listener, - router.into_make_service_with_connect_info::(), - ) - .with_graceful_shutdown(async move { - shutdown_token.cancelled().await; - }); - Ok(tokio::spawn(async move { - if let Err(err) = server.await { - error!("websocket acceptor failed: {err}"); - } - info!("websocket acceptor shutting down"); - })) -} - -async fn run_websocket_connection( - connection_id: ConnectionId, - websocket_stream: WebSocket, - transport_event_tx: mpsc::Sender, -) { - let (writer_tx, writer_rx) = mpsc::channel::(CHANNEL_CAPACITY); - let writer_tx_for_reader = writer_tx.clone(); - let disconnect_token = CancellationToken::new(); - if transport_event_tx - .send(TransportEvent::ConnectionOpened { - connection_id, - writer: writer_tx, - disconnect_sender: Some(disconnect_token.clone()), - }) - .await - .is_err() - { - return; - } - - let (websocket_writer, websocket_reader) = websocket_stream.split(); - let (writer_control_tx, writer_control_rx) = - mpsc::channel::(CHANNEL_CAPACITY); - let mut outbound_task = tokio::spawn(run_websocket_outbound_loop( - websocket_writer, - writer_rx, - writer_control_rx, - disconnect_token.clone(), - )); - let mut inbound_task = tokio::spawn(run_websocket_inbound_loop( - websocket_reader, - transport_event_tx.clone(), - writer_tx_for_reader, - writer_control_tx, - connection_id, - disconnect_token.clone(), - )); - - tokio::select! { - _ = &mut outbound_task => { - disconnect_token.cancel(); - inbound_task.abort(); - } - _ = &mut inbound_task => { - disconnect_token.cancel(); - outbound_task.abort(); - } - } - - let _ = transport_event_tx - .send(TransportEvent::ConnectionClosed { connection_id }) - .await; -} - -async fn run_websocket_outbound_loop( - mut websocket_writer: futures::stream::SplitSink, - mut writer_rx: mpsc::Receiver, - mut writer_control_rx: mpsc::Receiver, - disconnect_token: CancellationToken, -) { - loop { - tokio::select! { - _ = disconnect_token.cancelled() => { - break; - } - message = writer_control_rx.recv() => { - let Some(message) = message else { - break; - }; - if websocket_writer.send(message).await.is_err() { - break; - } - } - queued_message = writer_rx.recv() => { - let Some(queued_message) = queued_message else { - break; - }; - let Some(json) = serialize_outgoing_message(queued_message.message) else { - continue; - }; - if websocket_writer.send(WebSocketMessage::Text(json.into())).await.is_err() { - break; - } - if let Some(write_complete_tx) = queued_message.write_complete_tx { - let _ = write_complete_tx.send(()); - } - } - } - } -} - -async fn run_websocket_inbound_loop( - mut websocket_reader: futures::stream::SplitStream, - transport_event_tx: mpsc::Sender, - writer_tx_for_reader: mpsc::Sender, - writer_control_tx: mpsc::Sender, - connection_id: ConnectionId, - disconnect_token: CancellationToken, -) { - loop { - tokio::select! { - _ = disconnect_token.cancelled() => { - break; - } - incoming_message = websocket_reader.next() => { - match incoming_message { - Some(Ok(WebSocketMessage::Text(text))) => { - if !forward_incoming_message( - &transport_event_tx, - &writer_tx_for_reader, - connection_id, - text.as_ref(), - ) - .await - { - break; - } - } - Some(Ok(WebSocketMessage::Ping(payload))) => { - match writer_control_tx.try_send(WebSocketMessage::Pong(payload)) { - Ok(()) => {} - Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => break, - Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { - warn!("websocket control queue full while replying to ping; closing connection"); - break; - } - } - } - Some(Ok(WebSocketMessage::Pong(_))) => {} - Some(Ok(WebSocketMessage::Close(_))) | None => break, - Some(Ok(WebSocketMessage::Binary(_))) => { - warn!("dropping unsupported binary websocket message"); - } - Some(Err(err)) => { - warn!("websocket receive error: {err}"); - break; - } - } - } - } - } -} - async fn forward_incoming_message( transport_event_tx: &mpsc::Sender, writer: &mpsc::Sender, @@ -745,7 +379,6 @@ pub(crate) async fn route_outgoing_envelope( mod tests { use super::*; use crate::error_code::OVERLOADED_ERROR_CODE; - use codex_app_server_protocol::CommandExecutionRequestApprovalSkillMetadata; use codex_app_server_protocol::ConfigWarningNotification; use codex_app_server_protocol::ServerNotification; use codex_utils_absolute_path::AbsolutePathBuf; @@ -1127,7 +760,7 @@ mod tests { } #[tokio::test] - async fn command_execution_request_approval_strips_experimental_fields_without_capability() { + async fn command_execution_request_approval_strips_additional_permissions_without_capability() { let connection_id = ConnectionId(8); let (writer_tx, mut writer_rx) = mpsc::channel(1); @@ -1168,12 +801,8 @@ mod tests { write: None, }, ), - macos: None, }, ), - skill_metadata: Some(CommandExecutionRequestApprovalSkillMetadata { - path_to_skills_md: PathBuf::from("/tmp/SKILLS.md"), - }), proposed_execpolicy_amendment: None, proposed_network_policy_amendments: None, available_decisions: None, @@ -1190,11 +819,10 @@ mod tests { .expect("request should be delivered to the connection"); let json = serde_json::to_value(message.message).expect("request should serialize"); assert_eq!(json["params"].get("additionalPermissions"), None); - assert_eq!(json["params"].get("skillMetadata"), None); } #[tokio::test] - async fn command_execution_request_approval_keeps_experimental_fields_with_capability() { + async fn command_execution_request_approval_keeps_additional_permissions_with_capability() { let connection_id = ConnectionId(9); let (writer_tx, mut writer_rx) = mpsc::channel(1); @@ -1235,12 +863,8 @@ mod tests { write: None, }, ), - macos: None, }, ), - skill_metadata: Some(CommandExecutionRequestApprovalSkillMetadata { - path_to_skills_md: PathBuf::from("/tmp/SKILLS.md"), - }), proposed_execpolicy_amendment: None, proposed_network_policy_amendments: None, available_decisions: None, @@ -1263,15 +887,8 @@ mod tests { "network": null, "fileSystem": { "read": [allowed_path], - "write": null, + "write": null, }, - "macos": null, - }) - ); - assert_eq!( - json["params"]["skillMetadata"], - json!({ - "pathToSkillsMd": "/tmp/SKILLS.md", }) ); } diff --git a/codex-rs/app-server/src/transport/stdio.rs b/codex-rs/app-server/src/transport/stdio.rs new file mode 100644 index 0000000000..4f2bf26745 --- /dev/null +++ b/codex-rs/app-server/src/transport/stdio.rs @@ -0,0 +1,88 @@ +use super::CHANNEL_CAPACITY; +use super::TransportEvent; +use super::forward_incoming_message; +use super::serialize_outgoing_message; +use crate::outgoing_message::ConnectionId; +use crate::outgoing_message::QueuedOutgoingMessage; +use std::io::ErrorKind; +use std::io::Result as IoResult; +use tokio::io; +use tokio::io::AsyncBufReadExt; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; +use tracing::debug; +use tracing::error; +use tracing::info; + +pub(crate) async fn start_stdio_connection( + transport_event_tx: mpsc::Sender, + stdio_handles: &mut Vec>, +) -> IoResult<()> { + let connection_id = ConnectionId(0); + let (writer_tx, mut writer_rx) = mpsc::channel::(CHANNEL_CAPACITY); + let writer_tx_for_reader = writer_tx.clone(); + transport_event_tx + .send(TransportEvent::ConnectionOpened { + connection_id, + writer: writer_tx, + disconnect_sender: None, + }) + .await + .map_err(|_| std::io::Error::new(ErrorKind::BrokenPipe, "processor unavailable"))?; + + let transport_event_tx_for_reader = transport_event_tx.clone(); + stdio_handles.push(tokio::spawn(async move { + let stdin = io::stdin(); + let reader = BufReader::new(stdin); + let mut lines = reader.lines(); + + loop { + match lines.next_line().await { + Ok(Some(line)) => { + if !forward_incoming_message( + &transport_event_tx_for_reader, + &writer_tx_for_reader, + connection_id, + &line, + ) + .await + { + break; + } + } + Ok(None) => break, + Err(err) => { + error!("Failed reading stdin: {err}"); + break; + } + } + } + + let _ = transport_event_tx_for_reader + .send(TransportEvent::ConnectionClosed { connection_id }) + .await; + debug!("stdin reader finished (EOF)"); + })); + + stdio_handles.push(tokio::spawn(async move { + let mut stdout = io::stdout(); + while let Some(queued_message) = writer_rx.recv().await { + let Some(mut json) = serialize_outgoing_message(queued_message.message) else { + continue; + }; + json.push('\n'); + if let Err(err) = stdout.write_all(json.as_bytes()).await { + error!("Failed to write to stdout: {err}"); + break; + } + if let Some(write_complete_tx) = queued_message.write_complete_tx { + let _ = write_complete_tx.send(()); + } + } + info!("stdout writer exited (channel closed)"); + })); + + Ok(()) +} diff --git a/codex-rs/app-server/src/transport/websocket.rs b/codex-rs/app-server/src/transport/websocket.rs new file mode 100644 index 0000000000..05dfe24b05 --- /dev/null +++ b/codex-rs/app-server/src/transport/websocket.rs @@ -0,0 +1,308 @@ +use super::CHANNEL_CAPACITY; +use super::TransportEvent; +use super::auth::WebsocketAuthPolicy; +use super::auth::authorize_upgrade; +use super::auth::should_warn_about_unauthenticated_non_loopback_listener; +use super::forward_incoming_message; +use super::serialize_outgoing_message; +use crate::outgoing_message::ConnectionId; +use crate::outgoing_message::QueuedOutgoingMessage; +use axum::Router; +use axum::body::Body; +use axum::extract::ConnectInfo; +use axum::extract::State; +use axum::extract::ws::Message as WebSocketMessage; +use axum::extract::ws::WebSocket; +use axum::extract::ws::WebSocketUpgrade; +use axum::http::HeaderMap; +use axum::http::Request; +use axum::http::StatusCode; +use axum::http::header::ORIGIN; +use axum::middleware; +use axum::middleware::Next; +use axum::response::IntoResponse; +use axum::response::Response; +use axum::routing::any; +use axum::routing::get; +use futures::SinkExt; +use futures::StreamExt; +use owo_colors::OwoColorize; +use owo_colors::Stream; +use owo_colors::Style; +use std::io::Result as IoResult; +use std::net::SocketAddr; +use std::sync::Arc; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; +use tokio::net::TcpListener; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use tracing::error; +use tracing::info; +use tracing::warn; + +fn colorize(text: &str, style: Style) -> String { + text.if_supports_color(Stream::Stderr, |value| value.style(style)) + .to_string() +} + +#[allow(clippy::print_stderr)] +fn print_websocket_startup_banner(addr: SocketAddr) { + let title = colorize("codex app-server (WebSockets)", Style::new().bold().cyan()); + let listening_label = colorize("listening on:", Style::new().dimmed()); + let listen_url = colorize(&format!("ws://{addr}"), Style::new().green()); + let ready_label = colorize("readyz:", Style::new().dimmed()); + let ready_url = colorize(&format!("http://{addr}/readyz"), Style::new().green()); + let health_label = colorize("healthz:", Style::new().dimmed()); + let health_url = colorize(&format!("http://{addr}/healthz"), Style::new().green()); + let note_label = colorize("note:", Style::new().dimmed()); + eprintln!("{title}"); + eprintln!(" {listening_label} {listen_url}"); + eprintln!(" {ready_label} {ready_url}"); + eprintln!(" {health_label} {health_url}"); + if addr.ip().is_loopback() { + eprintln!( + " {note_label} binds localhost only (use SSH port-forwarding for remote access)" + ); + } else { + eprintln!( + " {note_label} websocket auth is opt-in in this build; configure `--ws-auth ...` before real remote use" + ); + } +} + +#[derive(Clone)] +struct WebSocketListenerState { + transport_event_tx: mpsc::Sender, + connection_counter: Arc, + auth_policy: Arc, +} + +async fn health_check_handler() -> StatusCode { + StatusCode::OK +} + +async fn reject_requests_with_origin_header( + request: Request, + next: Next, +) -> Result { + if request.headers().contains_key(ORIGIN) { + warn!( + method = %request.method(), + uri = %request.uri(), + "rejecting websocket listener request with Origin header" + ); + Err(StatusCode::FORBIDDEN) + } else { + Ok(next.run(request).await) + } +} + +async fn websocket_upgrade_handler( + websocket: WebSocketUpgrade, + ConnectInfo(peer_addr): ConnectInfo, + State(state): State, + headers: HeaderMap, +) -> impl IntoResponse { + if let Err(err) = authorize_upgrade(&headers, state.auth_policy.as_ref()) { + warn!( + %peer_addr, + message = err.message(), + "rejecting websocket client during upgrade" + ); + return (err.status_code(), err.message()).into_response(); + } + let connection_id = ConnectionId(state.connection_counter.fetch_add(1, Ordering::Relaxed)); + info!(%peer_addr, "websocket client connected"); + websocket + .on_upgrade(move |stream| async move { + run_websocket_connection(connection_id, stream, state.transport_event_tx).await; + }) + .into_response() +} + +pub(crate) async fn start_websocket_acceptor( + bind_address: SocketAddr, + transport_event_tx: mpsc::Sender, + shutdown_token: CancellationToken, + auth_policy: WebsocketAuthPolicy, +) -> IoResult> { + if should_warn_about_unauthenticated_non_loopback_listener(bind_address, &auth_policy) { + warn!( + %bind_address, + "starting non-loopback websocket listener without auth; websocket auth is opt-in for now and will become the default in a future release" + ); + } + let listener = TcpListener::bind(bind_address).await?; + let local_addr = listener.local_addr()?; + print_websocket_startup_banner(local_addr); + info!("app-server websocket listening on ws://{local_addr}"); + + let router = Router::new() + .route("/readyz", get(health_check_handler)) + .route("/healthz", get(health_check_handler)) + .fallback(any(websocket_upgrade_handler)) + .layer(middleware::from_fn(reject_requests_with_origin_header)) + .with_state(WebSocketListenerState { + transport_event_tx, + connection_counter: Arc::new(AtomicU64::new(1)), + auth_policy: Arc::new(auth_policy), + }); + let server = axum::serve( + listener, + router.into_make_service_with_connect_info::(), + ) + .with_graceful_shutdown(async move { + shutdown_token.cancelled().await; + }); + Ok(tokio::spawn(async move { + if let Err(err) = server.await { + error!("websocket acceptor failed: {err}"); + } + info!("websocket acceptor shutting down"); + })) +} + +async fn run_websocket_connection( + connection_id: ConnectionId, + websocket_stream: WebSocket, + transport_event_tx: mpsc::Sender, +) { + let (writer_tx, writer_rx) = mpsc::channel::(CHANNEL_CAPACITY); + let writer_tx_for_reader = writer_tx.clone(); + let disconnect_token = CancellationToken::new(); + if transport_event_tx + .send(TransportEvent::ConnectionOpened { + connection_id, + writer: writer_tx, + disconnect_sender: Some(disconnect_token.clone()), + }) + .await + .is_err() + { + return; + } + + let (websocket_writer, websocket_reader) = websocket_stream.split(); + let (writer_control_tx, writer_control_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let mut outbound_task = tokio::spawn(run_websocket_outbound_loop( + websocket_writer, + writer_rx, + writer_control_rx, + disconnect_token.clone(), + )); + let mut inbound_task = tokio::spawn(run_websocket_inbound_loop( + websocket_reader, + transport_event_tx.clone(), + writer_tx_for_reader, + writer_control_tx, + connection_id, + disconnect_token.clone(), + )); + + tokio::select! { + _ = &mut outbound_task => { + disconnect_token.cancel(); + inbound_task.abort(); + } + _ = &mut inbound_task => { + disconnect_token.cancel(); + outbound_task.abort(); + } + } + + let _ = transport_event_tx + .send(TransportEvent::ConnectionClosed { connection_id }) + .await; +} + +async fn run_websocket_outbound_loop( + mut websocket_writer: futures::stream::SplitSink, + mut writer_rx: mpsc::Receiver, + mut writer_control_rx: mpsc::Receiver, + disconnect_token: CancellationToken, +) { + loop { + tokio::select! { + _ = disconnect_token.cancelled() => { + break; + } + message = writer_control_rx.recv() => { + let Some(message) = message else { + break; + }; + if websocket_writer.send(message).await.is_err() { + break; + } + } + queued_message = writer_rx.recv() => { + let Some(queued_message) = queued_message else { + break; + }; + let Some(json) = serialize_outgoing_message(queued_message.message) else { + continue; + }; + if websocket_writer.send(WebSocketMessage::Text(json.into())).await.is_err() { + break; + } + if let Some(write_complete_tx) = queued_message.write_complete_tx { + let _ = write_complete_tx.send(()); + } + } + } + } +} + +async fn run_websocket_inbound_loop( + mut websocket_reader: futures::stream::SplitStream, + transport_event_tx: mpsc::Sender, + writer_tx_for_reader: mpsc::Sender, + writer_control_tx: mpsc::Sender, + connection_id: ConnectionId, + disconnect_token: CancellationToken, +) { + loop { + tokio::select! { + _ = disconnect_token.cancelled() => { + break; + } + incoming_message = websocket_reader.next() => { + match incoming_message { + Some(Ok(WebSocketMessage::Text(text))) => { + if !forward_incoming_message( + &transport_event_tx, + &writer_tx_for_reader, + connection_id, + text.as_ref(), + ) + .await + { + break; + } + } + Some(Ok(WebSocketMessage::Ping(payload))) => { + match writer_control_tx.try_send(WebSocketMessage::Pong(payload)) { + Ok(()) => {} + Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => break, + Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { + warn!("websocket control queue full while replying to ping; closing connection"); + break; + } + } + } + Some(Ok(WebSocketMessage::Pong(_))) => {} + Some(Ok(WebSocketMessage::Close(_))) | None => break, + Some(Ok(WebSocketMessage::Binary(_))) => { + warn!("dropping unsupported binary websocket message"); + } + Some(Err(err)) => { + warn!("websocket receive error: {err}"); + break; + } + } + } + } + } +} diff --git a/codex-rs/app-server/tests/common/Cargo.toml b/codex-rs/app-server/tests/common/Cargo.toml index 851ba9556d..4eef03e969 100644 --- a/codex-rs/app-server/tests/common/Cargo.toml +++ b/codex-rs/app-server/tests/common/Cargo.toml @@ -14,6 +14,7 @@ chrono = { workspace = true } codex-app-server-protocol = { workspace = true } codex-core = { workspace = true } codex-features = { workspace = true } +codex-login = { workspace = true } codex-protocol = { workspace = true } codex-utils-cargo-bin = { workspace = true } serde = { workspace = true } diff --git a/codex-rs/app-server/tests/common/auth_fixtures.rs b/codex-rs/app-server/tests/common/auth_fixtures.rs index c52109ac5b..d814dece46 100644 --- a/codex-rs/app-server/tests/common/auth_fixtures.rs +++ b/codex-rs/app-server/tests/common/auth_fixtures.rs @@ -10,8 +10,8 @@ use codex_app_server_protocol::AuthMode; use codex_core::auth::AuthCredentialsStoreMode; use codex_core::auth::AuthDotJson; use codex_core::auth::save_auth; -use codex_core::token_data::TokenData; -use codex_core::token_data::parse_chatgpt_jwt_claims; +use codex_login::token_data::TokenData; +use codex_login::token_data::parse_chatgpt_jwt_claims; use serde_json::json; /// Builder for writing a fake ChatGPT auth.json in tests. diff --git a/codex-rs/app-server/tests/suite/v2/app_list.rs b/codex-rs/app-server/tests/suite/v2/app_list.rs index a19cdad857..23ffa80a67 100644 --- a/codex-rs/app-server/tests/suite/v2/app_list.rs +++ b/codex-rs/app-server/tests/suite/v2/app_list.rs @@ -1,4 +1,5 @@ use std::borrow::Cow; +use std::collections::BTreeMap; use std::collections::HashMap; use std::sync::Arc; use std::sync::Mutex as StdMutex; @@ -27,6 +28,7 @@ use codex_app_server_protocol::AppScreenshot; use codex_app_server_protocol::AppsListParams; use codex_app_server_protocol::AppsListResponse; use codex_app_server_protocol::AuthMode; +use codex_app_server_protocol::ExperimentalFeatureEnablementSetParams; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; @@ -1201,6 +1203,108 @@ async fn list_apps_force_refetch_patches_updates_from_cached_snapshots() -> Resu Ok(()) } +#[tokio::test] +async fn experimental_feature_enablement_set_refreshes_apps_list_when_apps_turn_on() -> Result<()> { + let initial_connectors = vec![AppInfo { + id: "alpha".to_string(), + name: "Alpha".to_string(), + description: Some("Alpha v1".to_string()), + logo_url: None, + logo_url_dark: None, + distribution_channel: None, + branding: None, + app_metadata: None, + labels: None, + install_url: None, + is_accessible: false, + is_enabled: true, + plugin_display_names: Vec::new(), + }]; + let (server_url, server_handle, server_control) = start_apps_server_with_delays_and_control( + initial_connectors, + Vec::new(), + Duration::ZERO, + Duration::ZERO, + ) + .await?; + + let codex_home = TempDir::new()?; + write_connectors_config(codex_home.path(), &server_url)?; + write_chatgpt_auth( + codex_home.path(), + ChatGptAuthFixture::new("chatgpt-token") + .account_id("account-123") + .chatgpt_user_id("user-enable-refresh") + .chatgpt_account_id("account-123"), + AuthCredentialsStoreMode::File, + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; + + let disable_request = mcp + .send_experimental_feature_enablement_set_request(ExperimentalFeatureEnablementSetParams { + enablement: BTreeMap::from([("apps".to_string(), false)]), + }) + .await?; + let _disable_response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(disable_request)), + ) + .await??; + + server_control.set_connectors(vec![AppInfo { + id: "alpha".to_string(), + name: "Alpha".to_string(), + description: Some("Alpha v2".to_string()), + logo_url: None, + logo_url_dark: None, + distribution_channel: None, + branding: None, + app_metadata: None, + labels: None, + install_url: None, + is_accessible: false, + is_enabled: true, + plugin_display_names: Vec::new(), + }]); + server_control.set_tools(vec![connector_tool("alpha", "Alpha App")?]); + + let enable_request = mcp + .send_experimental_feature_enablement_set_request(ExperimentalFeatureEnablementSetParams { + enablement: BTreeMap::from([("apps".to_string(), true)]), + }) + .await?; + let _enable_response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(enable_request)), + ) + .await??; + + let update = read_app_list_updated_notification(&mut mcp).await?; + assert_eq!( + update.data, + vec![AppInfo { + id: "alpha".to_string(), + name: "Alpha".to_string(), + description: Some("Alpha v2".to_string()), + logo_url: None, + logo_url_dark: None, + distribution_channel: None, + branding: None, + app_metadata: None, + labels: None, + install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()), + is_accessible: true, + is_enabled: true, + plugin_display_names: Vec::new(), + }] + ); + + server_handle.abort(); + Ok(()) +} + async fn read_app_list_updated_notification( mcp: &mut McpProcess, ) -> Result { diff --git a/codex-rs/arg0/Cargo.toml b/codex-rs/arg0/Cargo.toml index abe7277d94..cd409fedd8 100644 --- a/codex-rs/arg0/Cargo.toml +++ b/codex-rs/arg0/Cargo.toml @@ -15,6 +15,7 @@ workspace = true anyhow = { workspace = true } codex-apply-patch = { workspace = true } codex-linux-sandbox = { workspace = true } +codex-sandboxing = { workspace = true } codex-shell-escalation = { workspace = true } codex-utils-home-dir = { workspace = true } dotenvy = { workspace = true } diff --git a/codex-rs/arg0/src/lib.rs b/codex-rs/arg0/src/lib.rs index e210783a35..efad6c2481 100644 --- a/codex-rs/arg0/src/lib.rs +++ b/codex-rs/arg0/src/lib.rs @@ -4,12 +4,12 @@ use std::path::Path; use std::path::PathBuf; use codex_apply_patch::CODEX_CORE_APPLY_PATCH_ARG1; +use codex_sandboxing::landlock::CODEX_LINUX_SANDBOX_ARG0; use codex_utils_home_dir::find_codex_home; #[cfg(unix)] use std::os::unix::fs::symlink; use tempfile::TempDir; -const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox"; const APPLY_PATCH_ARG0: &str = "apply_patch"; const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch"; #[cfg(unix)] @@ -19,6 +19,12 @@ const TOKIO_WORKER_STACK_SIZE_BYTES: usize = 16 * 1024 * 1024; #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct Arg0DispatchPaths { + /// Stable path to the current Codex executable for child re-execs. + /// + /// Prefer this over [`std::env::current_exe()`] in code that may run under + /// a test harness, where `current_exe()` can point at the harness binary + /// instead of the real Codex CLI. + pub codex_self_exe: Option, pub codex_linux_sandbox_exe: Option, pub main_execve_wrapper_exe: Option, } @@ -79,7 +85,7 @@ pub fn arg0_dispatch() -> Option { } } - if exe_name == LINUX_SANDBOX_ARG0 { + if exe_name == CODEX_LINUX_SANDBOX_ARG0 { // Safety: [`run_main`] never returns. codex_linux_sandbox::run_main(); } else if exe_name == APPLY_PATCH_ARG0 || exe_name == MISSPELLED_APPLY_PATCH_ARG0 { @@ -133,8 +139,10 @@ pub fn arg0_dispatch() -> Option { /// /// 1. Load `.env` values from `~/.codex/.env` before creating any threads. /// 2. Construct a Tokio multi-thread runtime. -/// 3. Derive the path to the current executable (so children can re-invoke the -/// sandbox) when running on Linux. +/// 3. Capture the current executable path and derive the +/// `codex-linux-sandbox` helper path (falling back to the current +/// executable if needed) so children can re-invoke the sandbox when running +/// on Linux. /// 4. Execute the provided async `main_fn` inside that runtime, forwarding any /// error. Note that `main_fn` receives [`Arg0DispatchPaths`], which /// contains the helper executable paths needed to construct @@ -150,7 +158,7 @@ where // Retain the TempDir so it exists for the lifetime of the invocation of // this executable. Admittedly, we could invoke `keep()` on it, but it // would be nice to avoid leaving temporary directories behind, if possible. - let path_entry = arg0_dispatch(); + let path_entry_guard = arg0_dispatch(); // Regular invocation – create a Tokio runtime and execute the provided // async entry-point. @@ -158,16 +166,13 @@ where runtime.block_on(async move { let current_exe = std::env::current_exe().ok(); let paths = Arg0DispatchPaths { + codex_self_exe: current_exe.clone(), codex_linux_sandbox_exe: if cfg!(target_os = "linux") { - current_exe.or_else(|| { - path_entry - .as_ref() - .and_then(|path_entry| path_entry.paths().codex_linux_sandbox_exe.clone()) - }) + linux_sandbox_exe_path(path_entry_guard.as_ref(), current_exe) } else { None }, - main_execve_wrapper_exe: path_entry + main_execve_wrapper_exe: path_entry_guard .as_ref() .and_then(|path_entry| path_entry.paths().main_execve_wrapper_exe.clone()), }; @@ -176,6 +181,18 @@ where }) } +fn linux_sandbox_exe_path( + path_entry_guard: Option<&Arg0PathEntryGuard>, + current_exe: Option, +) -> Option { + // Prefer the `codex-linux-sandbox` alias when available so callers can + // re-exec through a path whose basename still triggers arg0 dispatch on + // bubblewrap builds that do not support `--argv0`. + path_entry_guard + .and_then(|path_entry| path_entry.paths().codex_linux_sandbox_exe.clone()) + .or(current_exe) +} + fn build_runtime() -> anyhow::Result { let mut builder = tokio::runtime::Builder::new_multi_thread(); builder.enable_all(); @@ -276,7 +293,7 @@ pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result std::io::Result std::io::Result> { #[cfg(test)] mod tests { + use super::Arg0DispatchPaths; + use super::Arg0PathEntryGuard; use super::LOCK_FILENAME; use super::janitor_cleanup; + use super::linux_sandbox_exe_path; use std::fs; use std::fs::File; use std::path::Path; + use std::path::PathBuf; + use tempfile::TempDir; fn create_lock(dir: &Path) -> std::io::Result { let lock_path = dir.join(LOCK_FILENAME); @@ -413,6 +436,28 @@ mod tests { .open(lock_path) } + #[test] + fn linux_sandbox_exe_path_prefers_codex_linux_sandbox_alias() -> std::io::Result<()> { + let temp_dir = TempDir::new()?; + let lock_file = create_lock(temp_dir.path())?; + let alias_path = temp_dir.path().join("codex-linux-sandbox"); + let path_entry = Arg0PathEntryGuard::new( + temp_dir, + lock_file, + Arg0DispatchPaths { + codex_self_exe: Some(PathBuf::from("/usr/bin/codex")), + codex_linux_sandbox_exe: Some(alias_path.clone()), + main_execve_wrapper_exe: None, + }, + ); + + assert_eq!( + linux_sandbox_exe_path(Some(&path_entry), Some(PathBuf::from("/usr/bin/codex"))), + Some(alias_path), + ); + Ok(()) + } + #[test] fn janitor_skips_dirs_without_lock_file() -> std::io::Result<()> { let root = tempfile::tempdir()?; diff --git a/codex-rs/artifacts/Cargo.toml b/codex-rs/artifacts/Cargo.toml deleted file mode 100644 index 0c5bbfc25b..0000000000 --- a/codex-rs/artifacts/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "codex-artifacts" -version.workspace = true -edition.workspace = true -license.workspace = true - -[dependencies] -codex-package-manager = { workspace = true } -reqwest = { workspace = true } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -tempfile = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["fs", "io-util", "process", "time"] } -url = { workspace = true } -which = { workspace = true } - -[lints] -workspace = true - -[dev-dependencies] -flate2 = { workspace = true } -pretty_assertions = { workspace = true } -sha2 = { workspace = true } -tar = { workspace = true } -tokio = { workspace = true, features = ["fs", "io-util", "macros", "process", "rt", "rt-multi-thread", "time"] } -wiremock = { workspace = true } -zip = { workspace = true } diff --git a/codex-rs/artifacts/README.md b/codex-rs/artifacts/README.md deleted file mode 100644 index dae83f3e8e..0000000000 --- a/codex-rs/artifacts/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# codex-artifacts - -Runtime and process-management helpers for Codex artifact generation. - -This crate has two main responsibilities: - -- locating, validating, and optionally downloading the pinned artifact runtime -- spawning the artifact build or render command against that runtime - -## Module layout - -- `src/client.rs` - Runs build and render commands once a runtime has been resolved. -- `src/runtime/manager.rs` - Defines the release locator and the package-manager-backed runtime installer. -- `src/runtime/installed.rs` - Loads an extracted runtime from disk and validates its manifest and entrypoints. -- `src/runtime/js_runtime.rs` - Chooses the JavaScript executable to use for artifact execution. -- `src/runtime/manifest.rs` - Manifest types for release metadata and extracted runtimes. -- `src/runtime/error.rs` - Public runtime-loading and installation errors. -- `src/tests.rs` - Crate-level tests that exercise the public API and integration seams. - -## Public API - -- `ArtifactRuntimeManager` - Resolves or installs a runtime package into `~/.codex/packages/artifacts/...`. -- `load_cached_runtime` - Reads a previously installed runtime from a caller-provided cache root without attempting a download. -- `is_js_runtime_available` - Checks whether artifact execution is possible with either a cached runtime or a host JS runtime. -- `ArtifactsClient` - Executes artifact build or render requests using either a managed or preinstalled runtime. diff --git a/codex-rs/artifacts/src/client.rs b/codex-rs/artifacts/src/client.rs deleted file mode 100644 index d0a10ed129..0000000000 --- a/codex-rs/artifacts/src/client.rs +++ /dev/null @@ -1,229 +0,0 @@ -use crate::ArtifactRuntimeError; -use crate::ArtifactRuntimeManager; -use crate::InstalledArtifactRuntime; -use std::collections::BTreeMap; -use std::path::PathBuf; -use std::process::Stdio; -use std::time::Duration; -use tempfile::TempDir; -use thiserror::Error; -use tokio::fs; -use tokio::io::AsyncReadExt; -use tokio::process::Command; -use tokio::time::timeout; -use url::Url; - -const DEFAULT_EXECUTION_TIMEOUT: Duration = Duration::from_secs(30); - -/// Executes artifact build commands against a resolved runtime. -#[derive(Clone, Debug)] -pub struct ArtifactsClient { - runtime_source: RuntimeSource, -} - -#[derive(Clone, Debug)] -#[allow(clippy::large_enum_variant)] -enum RuntimeSource { - Managed(ArtifactRuntimeManager), - Installed(InstalledArtifactRuntime), -} - -impl ArtifactsClient { - /// Creates a client that lazily resolves or downloads the runtime on demand. - pub fn from_runtime_manager(runtime_manager: ArtifactRuntimeManager) -> Self { - Self { - runtime_source: RuntimeSource::Managed(runtime_manager), - } - } - - /// Creates a client pinned to an already loaded runtime. - pub fn from_installed_runtime(runtime: InstalledArtifactRuntime) -> Self { - Self { - runtime_source: RuntimeSource::Installed(runtime), - } - } - - /// Executes artifact-building JavaScript against the configured runtime. - pub async fn execute_build( - &self, - request: ArtifactBuildRequest, - ) -> Result { - let runtime = self.resolve_runtime().await?; - let js_runtime = runtime.resolve_js_runtime()?; - let staging_dir = TempDir::new().map_err(|source| ArtifactsError::Io { - context: "failed to create build staging directory".to_string(), - source, - })?; - let script_path = staging_dir.path().join("artifact-build.mjs"); - let build_entrypoint_url = - Url::from_file_path(runtime.build_js_path()).map_err(|()| ArtifactsError::Io { - context: format!( - "failed to convert artifact build entrypoint to a file URL: {}", - runtime.build_js_path().display() - ), - source: std::io::Error::new( - std::io::ErrorKind::InvalidInput, - "invalid artifact build entrypoint path", - ), - })?; - let wrapped_script = build_wrapped_script(&build_entrypoint_url, &request.source); - fs::write(&script_path, wrapped_script) - .await - .map_err(|source| ArtifactsError::Io { - context: format!("failed to write {}", script_path.display()), - source, - })?; - - let mut command = Command::new(js_runtime.executable_path()); - command.arg(&script_path).current_dir(&request.cwd); - command.stdout(Stdio::piped()).stderr(Stdio::piped()); - if js_runtime.requires_electron_run_as_node() { - command.env("ELECTRON_RUN_AS_NODE", "1"); - } - for (key, value) in &request.env { - command.env(key, value); - } - - run_command( - command, - request.timeout.unwrap_or(DEFAULT_EXECUTION_TIMEOUT), - ) - .await - } - - async fn resolve_runtime(&self) -> Result { - match &self.runtime_source { - RuntimeSource::Installed(runtime) => Ok(runtime.clone()), - RuntimeSource::Managed(manager) => manager.ensure_installed().await.map_err(Into::into), - } - } -} - -/// Request payload for the artifact build command. -#[derive(Clone, Debug, Default)] -pub struct ArtifactBuildRequest { - pub source: String, - pub cwd: PathBuf, - pub timeout: Option, - pub env: BTreeMap, -} - -/// Captured stdout, stderr, and exit status from an artifact subprocess. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ArtifactCommandOutput { - pub exit_code: Option, - pub stdout: String, - pub stderr: String, -} - -impl ArtifactCommandOutput { - /// Returns whether the subprocess exited successfully. - pub fn success(&self) -> bool { - self.exit_code == Some(0) - } -} - -/// Errors raised while spawning or awaiting artifact subprocesses. -#[derive(Debug, Error)] -pub enum ArtifactsError { - #[error(transparent)] - Runtime(#[from] ArtifactRuntimeError), - #[error("{context}")] - Io { - context: String, - #[source] - source: std::io::Error, - }, - #[error("artifact command timed out after {timeout:?}")] - TimedOut { timeout: Duration }, -} - -fn build_wrapped_script(build_entrypoint_url: &Url, source: &str) -> String { - let mut wrapped = String::new(); - wrapped.push_str("const artifactTool = await import("); - wrapped.push_str( - &serde_json::to_string(build_entrypoint_url.as_str()).unwrap_or_else(|error| { - panic!("artifact build entrypoint URL must serialize: {error}") - }), - ); - wrapped.push_str(");\n"); - wrapped.push_str( - r#"globalThis.artifactTool = artifactTool; -for (const [name, value] of Object.entries(artifactTool)) { - if (name === "default" || Object.prototype.hasOwnProperty.call(globalThis, name)) { - continue; - } - globalThis[name] = value; -} -"#, - ); - wrapped.push_str(source); - wrapped.push('\n'); - wrapped -} - -async fn run_command( - mut command: Command, - execution_timeout: Duration, -) -> Result { - let mut child = command.spawn().map_err(|source| ArtifactsError::Io { - context: "failed to spawn artifact command".to_string(), - source, - })?; - let mut stdout = child.stdout.take().ok_or_else(|| ArtifactsError::Io { - context: "artifact command stdout was not captured".to_string(), - source: std::io::Error::other("missing stdout pipe"), - })?; - let mut stderr = child.stderr.take().ok_or_else(|| ArtifactsError::Io { - context: "artifact command stderr was not captured".to_string(), - source: std::io::Error::other("missing stderr pipe"), - })?; - let stdout_task = tokio::spawn(async move { - let mut bytes = Vec::new(); - stdout.read_to_end(&mut bytes).await.map(|_| bytes) - }); - let stderr_task = tokio::spawn(async move { - let mut bytes = Vec::new(); - stderr.read_to_end(&mut bytes).await.map(|_| bytes) - }); - - let status = match timeout(execution_timeout, child.wait()).await { - Ok(result) => result.map_err(|source| ArtifactsError::Io { - context: "failed while waiting for artifact command".to_string(), - source, - })?, - Err(_) => { - let _ = child.kill().await; - let _ = child.wait().await; - return Err(ArtifactsError::TimedOut { - timeout: execution_timeout, - }); - } - }; - let stdout_bytes = stdout_task - .await - .map_err(|source| ArtifactsError::Io { - context: "failed to join stdout reader".to_string(), - source: std::io::Error::other(source.to_string()), - })? - .map_err(|source| ArtifactsError::Io { - context: "failed to read artifact command stdout".to_string(), - source, - })?; - let stderr_bytes = stderr_task - .await - .map_err(|source| ArtifactsError::Io { - context: "failed to join stderr reader".to_string(), - source: std::io::Error::other(source.to_string()), - })? - .map_err(|source| ArtifactsError::Io { - context: "failed to read artifact command stderr".to_string(), - source, - })?; - - Ok(ArtifactCommandOutput { - exit_code: status.code(), - stdout: String::from_utf8_lossy(&stdout_bytes).into_owned(), - stderr: String::from_utf8_lossy(&stderr_bytes).into_owned(), - }) -} diff --git a/codex-rs/artifacts/src/lib.rs b/codex-rs/artifacts/src/lib.rs deleted file mode 100644 index 812c3db853..0000000000 --- a/codex-rs/artifacts/src/lib.rs +++ /dev/null @@ -1,24 +0,0 @@ -mod client; -mod runtime; -#[cfg(all(test, not(windows)))] -mod tests; - -pub use client::ArtifactBuildRequest; -pub use client::ArtifactCommandOutput; -pub use client::ArtifactsClient; -pub use client::ArtifactsError; -pub use runtime::ArtifactRuntimeError; -pub use runtime::ArtifactRuntimeManager; -pub use runtime::ArtifactRuntimeManagerConfig; -pub use runtime::ArtifactRuntimePlatform; -pub use runtime::ArtifactRuntimeReleaseLocator; -pub use runtime::DEFAULT_CACHE_ROOT_RELATIVE; -pub use runtime::DEFAULT_RELEASE_BASE_URL; -pub use runtime::DEFAULT_RELEASE_TAG_PREFIX; -pub use runtime::InstalledArtifactRuntime; -pub use runtime::JsRuntime; -pub use runtime::JsRuntimeKind; -pub use runtime::ReleaseManifest; -pub use runtime::can_manage_artifact_runtime; -pub use runtime::is_js_runtime_available; -pub use runtime::load_cached_runtime; diff --git a/codex-rs/artifacts/src/runtime/error.rs b/codex-rs/artifacts/src/runtime/error.rs deleted file mode 100644 index 9a7090d468..0000000000 --- a/codex-rs/artifacts/src/runtime/error.rs +++ /dev/null @@ -1,28 +0,0 @@ -use codex_package_manager::PackageManagerError; -use std::path::PathBuf; -use thiserror::Error; - -/// Errors raised while locating, validating, or installing an artifact runtime. -#[derive(Debug, Error)] -pub enum ArtifactRuntimeError { - #[error(transparent)] - PackageManager(#[from] PackageManagerError), - #[error("{context}")] - Io { - context: String, - #[source] - source: std::io::Error, - }, - #[error("invalid package metadata at {path}")] - InvalidPackageMetadata { - path: PathBuf, - #[source] - source: serde_json::Error, - }, - #[error("runtime path `{0}` is invalid")] - InvalidRuntimePath(String), - #[error( - "no compatible JavaScript runtime found for artifact runtime at {root_dir}; install Node or the Codex desktop app" - )] - MissingJsRuntime { root_dir: PathBuf }, -} diff --git a/codex-rs/artifacts/src/runtime/installed.rs b/codex-rs/artifacts/src/runtime/installed.rs deleted file mode 100644 index 76e3c6d132..0000000000 --- a/codex-rs/artifacts/src/runtime/installed.rs +++ /dev/null @@ -1,283 +0,0 @@ -use super::ArtifactRuntimeError; -use super::ArtifactRuntimePlatform; -use super::JsRuntime; -use super::codex_app_runtime_candidates; -use super::resolve_js_runtime_from_candidates; -use super::system_electron_runtime; -use super::system_node_runtime; -use std::collections::BTreeMap; -use std::path::Component; -use std::path::Path; -use std::path::PathBuf; - -const ARTIFACT_TOOL_PACKAGE_NAME: &str = "@oai/artifact-tool"; - -/// Loads a previously installed runtime from a caller-provided cache root. -pub fn load_cached_runtime( - cache_root: &Path, - runtime_version: &str, -) -> Result { - let platform = ArtifactRuntimePlatform::detect_current()?; - let install_dir = cached_runtime_install_dir(cache_root, runtime_version, platform); - if !install_dir.exists() { - return Err(ArtifactRuntimeError::Io { - context: format!( - "artifact runtime {runtime_version} is not installed at {}", - install_dir.display() - ), - source: std::io::Error::new(std::io::ErrorKind::NotFound, "missing artifact runtime"), - }); - } - - InstalledArtifactRuntime::load(install_dir, platform) -} - -/// A validated runtime installation extracted into the local package cache. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct InstalledArtifactRuntime { - root_dir: PathBuf, - runtime_version: String, - platform: ArtifactRuntimePlatform, - build_js_path: PathBuf, -} - -impl InstalledArtifactRuntime { - /// Creates an installed-runtime value from prevalidated paths. - pub fn new( - root_dir: PathBuf, - runtime_version: String, - platform: ArtifactRuntimePlatform, - build_js_path: PathBuf, - ) -> Self { - Self { - root_dir, - runtime_version, - platform, - build_js_path, - } - } - - /// Loads and validates an extracted runtime directory. - pub fn load( - root_dir: PathBuf, - platform: ArtifactRuntimePlatform, - ) -> Result { - let package_metadata = load_package_metadata(&root_dir)?; - let build_js_path = - resolve_relative_runtime_path(&root_dir, &package_metadata.build_js_relative_path)?; - verify_required_runtime_path(&build_js_path)?; - - Ok(Self::new( - root_dir, - package_metadata.version, - platform, - build_js_path, - )) - } - - /// Returns the extracted runtime root directory. - pub fn root_dir(&self) -> &Path { - &self.root_dir - } - - /// Returns the runtime version recorded in `package.json`. - pub fn runtime_version(&self) -> &str { - &self.runtime_version - } - - /// Returns the platform this runtime was installed for. - pub fn platform(&self) -> ArtifactRuntimePlatform { - self.platform - } - - /// Returns the artifact build entrypoint path. - pub fn build_js_path(&self) -> &Path { - &self.build_js_path - } - - /// Resolves the best executable to use for artifact commands. - /// - /// Preference order is a machine Node install, then Electron from the - /// machine or a Codex desktop app bundle. - pub fn resolve_js_runtime(&self) -> Result { - resolve_js_runtime_from_candidates( - system_node_runtime(), - system_electron_runtime(), - codex_app_runtime_candidates(), - ) - .ok_or_else(|| ArtifactRuntimeError::MissingJsRuntime { - root_dir: self.root_dir.clone(), - }) - } -} - -pub(crate) fn cached_runtime_install_dir( - cache_root: &Path, - runtime_version: &str, - platform: ArtifactRuntimePlatform, -) -> PathBuf { - cache_root.join(runtime_version).join(platform.as_str()) -} - -pub(crate) fn default_cached_runtime_root(codex_home: &Path) -> PathBuf { - codex_home.join(super::DEFAULT_CACHE_ROOT_RELATIVE) -} - -fn resolve_relative_runtime_path( - root_dir: &Path, - relative_path: &str, -) -> Result { - let relative = Path::new(relative_path); - if relative.as_os_str().is_empty() || relative.is_absolute() { - return Err(ArtifactRuntimeError::InvalidRuntimePath( - relative_path.to_string(), - )); - } - if relative.components().any(|component| { - matches!( - component, - Component::ParentDir | Component::Prefix(_) | Component::RootDir - ) - }) { - return Err(ArtifactRuntimeError::InvalidRuntimePath( - relative_path.to_string(), - )); - } - Ok(root_dir.join(relative)) -} - -fn verify_required_runtime_path(path: &Path) -> Result<(), ArtifactRuntimeError> { - if path.is_file() { - return Ok(()); - } - - Err(ArtifactRuntimeError::Io { - context: format!("required runtime file is missing: {}", path.display()), - source: std::io::Error::new(std::io::ErrorKind::NotFound, "missing runtime file"), - }) -} - -pub(crate) fn detect_runtime_root(extraction_root: &Path) -> Result { - if is_runtime_root(extraction_root) { - return Ok(extraction_root.to_path_buf()); - } - - let mut directory_candidates = Vec::new(); - for entry in std::fs::read_dir(extraction_root).map_err(|source| ArtifactRuntimeError::Io { - context: format!("failed to read {}", extraction_root.display()), - source, - })? { - let entry = entry.map_err(|source| ArtifactRuntimeError::Io { - context: format!("failed to read entry in {}", extraction_root.display()), - source, - })?; - let path = entry.path(); - if path.is_dir() { - directory_candidates.push(path); - } - } - - if directory_candidates.len() == 1 { - let candidate = &directory_candidates[0]; - if is_runtime_root(candidate) { - return Ok(candidate.clone()); - } - } - - Err(ArtifactRuntimeError::Io { - context: format!( - "failed to detect artifact runtime root under {}", - extraction_root.display() - ), - source: std::io::Error::new( - std::io::ErrorKind::NotFound, - "missing artifact runtime root", - ), - }) -} - -fn is_runtime_root(root_dir: &Path) -> bool { - let Ok(package_metadata) = load_package_metadata(root_dir) else { - return false; - }; - let Ok(build_js_path) = - resolve_relative_runtime_path(root_dir, &package_metadata.build_js_relative_path) - else { - return false; - }; - - build_js_path.is_file() -} - -struct PackageMetadata { - version: String, - build_js_relative_path: String, -} - -fn load_package_metadata(root_dir: &Path) -> Result { - #[derive(serde::Deserialize)] - struct PackageJson { - name: String, - version: String, - exports: PackageExports, - } - - #[derive(serde::Deserialize)] - #[serde(untagged)] - enum PackageExports { - Main(String), - Map(BTreeMap), - } - - impl PackageExports { - fn build_entrypoint(&self) -> Option<&str> { - match self { - Self::Main(path) => Some(path), - Self::Map(exports) => exports.get(".").map(String::as_str), - } - } - } - - let package_json_path = root_dir.join("package.json"); - let package_json_bytes = - std::fs::read(&package_json_path).map_err(|source| ArtifactRuntimeError::Io { - context: format!("failed to read {}", package_json_path.display()), - source, - })?; - let package_json = - serde_json::from_slice::(&package_json_bytes).map_err(|source| { - ArtifactRuntimeError::InvalidPackageMetadata { - path: package_json_path.clone(), - source, - } - })?; - - if package_json.name != ARTIFACT_TOOL_PACKAGE_NAME { - return Err(ArtifactRuntimeError::Io { - context: format!( - "unsupported artifact runtime package at {}; expected name `{ARTIFACT_TOOL_PACKAGE_NAME}`, got `{}`", - package_json_path.display(), - package_json.name - ), - source: std::io::Error::new( - std::io::ErrorKind::InvalidData, - "unsupported package name", - ), - }); - } - - let Some(build_js_relative_path) = package_json.exports.build_entrypoint() else { - return Err(ArtifactRuntimeError::Io { - context: format!( - "unsupported artifact runtime package at {}; expected `exports[\".\"]` to point at the JS entrypoint", - package_json_path.display() - ), - source: std::io::Error::new(std::io::ErrorKind::InvalidData, "missing package export"), - }); - }; - - Ok(PackageMetadata { - version: package_json.version, - build_js_relative_path: build_js_relative_path.trim_start_matches("./").to_string(), - }) -} diff --git a/codex-rs/artifacts/src/runtime/js_runtime.rs b/codex-rs/artifacts/src/runtime/js_runtime.rs deleted file mode 100644 index 228747e473..0000000000 --- a/codex-rs/artifacts/src/runtime/js_runtime.rs +++ /dev/null @@ -1,171 +0,0 @@ -use crate::ArtifactRuntimePlatform; -use crate::runtime::default_cached_runtime_root; -use crate::runtime::load_cached_runtime; -use std::path::Path; -use std::path::PathBuf; -use which::which; - -const CODEX_APP_PRODUCT_NAMES: [&str; 6] = [ - "Codex", - "Codex (Dev)", - "Codex (Agent)", - "Codex (Nightly)", - "Codex (Alpha)", - "Codex (Beta)", -]; - -/// The JavaScript runtime used to execute the artifact tool. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum JsRuntimeKind { - Node, - Electron, -} - -/// A discovered JavaScript executable and the way it should be invoked. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct JsRuntime { - executable_path: PathBuf, - kind: JsRuntimeKind, -} - -impl JsRuntime { - pub(crate) fn node(executable_path: PathBuf) -> Self { - Self { - executable_path, - kind: JsRuntimeKind::Node, - } - } - - pub(crate) fn electron(executable_path: PathBuf) -> Self { - Self { - executable_path, - kind: JsRuntimeKind::Electron, - } - } - - /// Returns the executable to spawn for artifact commands. - pub fn executable_path(&self) -> &Path { - &self.executable_path - } - - /// Returns whether the command must set `ELECTRON_RUN_AS_NODE=1`. - pub fn requires_electron_run_as_node(&self) -> bool { - self.kind == JsRuntimeKind::Electron - } -} - -/// Returns `true` when artifact execution can find both runtime assets and a JS executable. -pub fn is_js_runtime_available(codex_home: &Path, runtime_version: &str) -> bool { - load_cached_runtime(&default_cached_runtime_root(codex_home), runtime_version) - .ok() - .and_then(|runtime| runtime.resolve_js_runtime().ok()) - .or_else(resolve_machine_js_runtime) - .is_some() -} - -/// Returns `true` when this machine can use the managed artifact runtime flow. -/// -/// This is a platform capability check, not a cache or binary availability check. -/// Callers that rely on `ArtifactRuntimeManager::ensure_installed()` should use this -/// to decide whether the feature can be exposed on the current machine. -pub fn can_manage_artifact_runtime() -> bool { - ArtifactRuntimePlatform::detect_current().is_ok() -} - -pub(crate) fn resolve_machine_js_runtime() -> Option { - resolve_js_runtime_from_candidates( - system_node_runtime(), - system_electron_runtime(), - codex_app_runtime_candidates(), - ) -} - -pub(crate) fn resolve_js_runtime_from_candidates( - node_runtime: Option, - electron_runtime: Option, - codex_app_candidates: Vec, -) -> Option { - node_runtime.or(electron_runtime).or_else(|| { - codex_app_candidates - .into_iter() - .find_map(|candidate| electron_runtime_from_path(&candidate)) - }) -} - -pub(crate) fn system_node_runtime() -> Option { - which("node") - .ok() - .and_then(|path| node_runtime_from_path(&path)) -} - -pub(crate) fn system_electron_runtime() -> Option { - which("electron") - .ok() - .and_then(|path| electron_runtime_from_path(&path)) -} - -pub(crate) fn node_runtime_from_path(path: &Path) -> Option { - path.is_file().then(|| JsRuntime::node(path.to_path_buf())) -} - -pub(crate) fn electron_runtime_from_path(path: &Path) -> Option { - path.is_file() - .then(|| JsRuntime::electron(path.to_path_buf())) -} - -pub(crate) fn codex_app_runtime_candidates() -> Vec { - match std::env::consts::OS { - "macos" => { - let mut roots = vec![PathBuf::from("/Applications")]; - if let Some(home) = std::env::var_os("HOME") { - roots.push(PathBuf::from(home).join("Applications")); - } - - roots - .into_iter() - .flat_map(|root| { - CODEX_APP_PRODUCT_NAMES - .into_iter() - .map(move |product_name| { - root.join(format!("{product_name}.app")) - .join("Contents") - .join("MacOS") - .join(product_name) - }) - }) - .collect() - } - "windows" => { - let mut roots = Vec::new(); - if let Some(local_app_data) = std::env::var_os("LOCALAPPDATA") { - roots.push(PathBuf::from(local_app_data).join("Programs")); - } - if let Some(program_files) = std::env::var_os("ProgramFiles") { - roots.push(PathBuf::from(program_files)); - } - if let Some(program_files_x86) = std::env::var_os("ProgramFiles(x86)") { - roots.push(PathBuf::from(program_files_x86)); - } - - roots - .into_iter() - .flat_map(|root| { - CODEX_APP_PRODUCT_NAMES - .into_iter() - .map(move |product_name| { - root.join(product_name).join(format!("{product_name}.exe")) - }) - }) - .collect() - } - "linux" => [PathBuf::from("/opt"), PathBuf::from("/usr/lib")] - .into_iter() - .flat_map(|root| { - CODEX_APP_PRODUCT_NAMES - .into_iter() - .map(move |product_name| root.join(product_name).join(product_name)) - }) - .collect(), - _ => Vec::new(), - } -} diff --git a/codex-rs/artifacts/src/runtime/manager.rs b/codex-rs/artifacts/src/runtime/manager.rs deleted file mode 100644 index b0a1c60ef3..0000000000 --- a/codex-rs/artifacts/src/runtime/manager.rs +++ /dev/null @@ -1,255 +0,0 @@ -use super::ArtifactRuntimeError; -use super::ArtifactRuntimePlatform; -use super::InstalledArtifactRuntime; -use super::ReleaseManifest; -use super::detect_runtime_root; -use codex_package_manager::ManagedPackage; -use codex_package_manager::PackageManager; -use codex_package_manager::PackageManagerConfig; -use codex_package_manager::PackageManagerError; -use codex_package_manager::PackageReleaseArchive; -use reqwest::Client; -use std::path::Path; -use std::path::PathBuf; -use url::Url; - -/// Release tag prefix used for artifact runtime assets. -pub const DEFAULT_RELEASE_TAG_PREFIX: &str = "artifact-runtime-v"; - -/// Relative cache root for installed artifact runtimes under `codex_home`. -pub const DEFAULT_CACHE_ROOT_RELATIVE: &str = "packages/artifacts"; - -/// Base URL used by default when downloading runtime assets from GitHub releases. -pub const DEFAULT_RELEASE_BASE_URL: &str = "https://github.com/openai/codex/releases/download/"; - -/// Describes where a particular artifact runtime release can be downloaded from. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ArtifactRuntimeReleaseLocator { - base_url: Url, - runtime_version: String, - release_tag_prefix: String, -} - -impl ArtifactRuntimeReleaseLocator { - /// Creates a locator for a runtime version under a release base URL. - pub fn new(base_url: Url, runtime_version: impl Into) -> Self { - Self { - base_url, - runtime_version: runtime_version.into(), - release_tag_prefix: DEFAULT_RELEASE_TAG_PREFIX.to_string(), - } - } - - /// Overrides the release-tag prefix used when constructing asset names. - pub fn with_tag_prefix(mut self, release_tag_prefix: impl Into) -> Self { - self.release_tag_prefix = release_tag_prefix.into(); - self - } - - /// Returns the release asset base URL. - pub fn base_url(&self) -> &Url { - &self.base_url - } - - /// Returns the expected runtime version. - pub fn runtime_version(&self) -> &str { - &self.runtime_version - } - - /// Returns the full release tag for the runtime version. - pub fn release_tag(&self) -> String { - format!("{}{}", self.release_tag_prefix, self.runtime_version) - } - - /// Returns the expected manifest filename for the release. - pub fn manifest_file_name(&self) -> String { - format!("{}-manifest.json", self.release_tag()) - } - - /// Returns the manifest URL for this runtime release. - pub fn manifest_url(&self) -> Result { - self.base_url - .join(&format!( - "{}/{}", - self.release_tag(), - self.manifest_file_name() - )) - .map_err(PackageManagerError::InvalidBaseUrl) - } - - /// Returns the default GitHub-release locator for a runtime version. - pub fn default(runtime_version: impl Into) -> Self { - Self::new( - Url::parse(DEFAULT_RELEASE_BASE_URL).unwrap_or_else(|error| { - panic!("hard-coded artifact runtime release base URL must be valid: {error}") - }), - runtime_version, - ) - } -} - -/// Configuration for resolving artifact runtimes under a Codex home directory. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ArtifactRuntimeManagerConfig { - package_manager: PackageManagerConfig, - release: ArtifactRuntimeReleaseLocator, -} - -impl ArtifactRuntimeManagerConfig { - /// Creates a runtime-manager config from a Codex home and explicit release locator. - pub fn new(codex_home: PathBuf, release: ArtifactRuntimeReleaseLocator) -> Self { - Self { - package_manager: PackageManagerConfig::new( - codex_home, - ArtifactRuntimePackage::new(release.clone()), - ), - release, - } - } - - /// Creates a runtime-manager config that downloads from the default GitHub release location. - pub fn with_default_release(codex_home: PathBuf, runtime_version: impl Into) -> Self { - Self::new( - codex_home, - ArtifactRuntimeReleaseLocator::default(runtime_version), - ) - } - - /// Overrides the runtime cache root. - pub fn with_cache_root(mut self, cache_root: PathBuf) -> Self { - self.package_manager = self.package_manager.with_cache_root(cache_root); - self - } - - /// Returns the runtime cache root. - pub fn cache_root(&self) -> PathBuf { - self.package_manager.cache_root() - } - - /// Returns the release locator used by this config. - pub fn release(&self) -> &ArtifactRuntimeReleaseLocator { - &self.release - } -} - -/// Package-manager-backed artifact runtime resolver and installer. -#[derive(Clone, Debug)] -pub struct ArtifactRuntimeManager { - package_manager: PackageManager, - config: ArtifactRuntimeManagerConfig, -} - -impl ArtifactRuntimeManager { - /// Creates a runtime manager using the default `reqwest` client. - pub fn new(config: ArtifactRuntimeManagerConfig) -> Self { - let package_manager = PackageManager::new(config.package_manager.clone()); - Self { - package_manager, - config, - } - } - - /// Creates a runtime manager with a caller-provided HTTP client. - pub fn with_client(config: ArtifactRuntimeManagerConfig, client: Client) -> Self { - let package_manager = PackageManager::with_client(config.package_manager.clone(), client); - Self { - package_manager, - config, - } - } - - /// Returns the manager configuration. - pub fn config(&self) -> &ArtifactRuntimeManagerConfig { - &self.config - } - - /// Returns the installed runtime if it is already present and valid. - pub async fn resolve_cached( - &self, - ) -> Result, ArtifactRuntimeError> { - self.package_manager.resolve_cached().await - } - - /// Returns the installed runtime, downloading and caching it if necessary. - pub async fn ensure_installed(&self) -> Result { - self.package_manager.ensure_installed().await - } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -struct ArtifactRuntimePackage { - release: ArtifactRuntimeReleaseLocator, -} - -impl ArtifactRuntimePackage { - fn new(release: ArtifactRuntimeReleaseLocator) -> Self { - Self { release } - } -} - -impl ManagedPackage for ArtifactRuntimePackage { - type Error = ArtifactRuntimeError; - type Installed = InstalledArtifactRuntime; - type ReleaseManifest = ReleaseManifest; - - fn default_cache_root_relative(&self) -> &str { - DEFAULT_CACHE_ROOT_RELATIVE - } - - fn version(&self) -> &str { - self.release.runtime_version() - } - - fn manifest_url(&self) -> Result { - self.release.manifest_url() - } - - fn archive_url(&self, archive: &PackageReleaseArchive) -> Result { - self.release - .base_url() - .join(&format!( - "{}/{}", - self.release.release_tag(), - archive.archive - )) - .map_err(PackageManagerError::InvalidBaseUrl) - } - - fn release_version<'a>(&self, manifest: &'a Self::ReleaseManifest) -> &'a str { - &manifest.runtime_version - } - - fn platform_archive( - &self, - manifest: &Self::ReleaseManifest, - platform: ArtifactRuntimePlatform, - ) -> Result { - manifest - .platforms - .get(platform.as_str()) - .cloned() - .ok_or_else(|| { - PackageManagerError::MissingPlatform(platform.as_str().to_string()).into() - }) - } - - fn install_dir(&self, cache_root: &Path, platform: ArtifactRuntimePlatform) -> PathBuf { - cache_root.join(self.version()).join(platform.as_str()) - } - - fn installed_version<'a>(&self, package: &'a Self::Installed) -> &'a str { - package.runtime_version() - } - - fn load_installed( - &self, - root_dir: PathBuf, - platform: ArtifactRuntimePlatform, - ) -> Result { - InstalledArtifactRuntime::load(root_dir, platform) - } - - fn detect_extracted_root(&self, extraction_root: &Path) -> Result { - detect_runtime_root(extraction_root) - } -} diff --git a/codex-rs/artifacts/src/runtime/manifest.rs b/codex-rs/artifacts/src/runtime/manifest.rs deleted file mode 100644 index ad02afa898..0000000000 --- a/codex-rs/artifacts/src/runtime/manifest.rs +++ /dev/null @@ -1,15 +0,0 @@ -use codex_package_manager::PackageReleaseArchive; -use serde::Deserialize; -use serde::Serialize; -use std::collections::BTreeMap; - -/// Release metadata published alongside the packaged artifact runtime. -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -pub struct ReleaseManifest { - pub schema_version: u32, - pub runtime_version: String, - pub release_tag: String, - #[serde(default)] - pub node_version: Option, - pub platforms: BTreeMap, -} diff --git a/codex-rs/artifacts/src/runtime/mod.rs b/codex-rs/artifacts/src/runtime/mod.rs deleted file mode 100644 index 41fd1a48fc..0000000000 --- a/codex-rs/artifacts/src/runtime/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -mod error; -mod installed; -mod js_runtime; -mod manager; -mod manifest; - -pub use codex_package_manager::PackagePlatform as ArtifactRuntimePlatform; -pub use error::ArtifactRuntimeError; -pub use installed::InstalledArtifactRuntime; -pub use installed::load_cached_runtime; -pub use js_runtime::JsRuntime; -pub use js_runtime::JsRuntimeKind; -pub use js_runtime::can_manage_artifact_runtime; -pub use js_runtime::is_js_runtime_available; -pub use manager::ArtifactRuntimeManager; -pub use manager::ArtifactRuntimeManagerConfig; -pub use manager::ArtifactRuntimeReleaseLocator; -pub use manager::DEFAULT_CACHE_ROOT_RELATIVE; -pub use manager::DEFAULT_RELEASE_BASE_URL; -pub use manager::DEFAULT_RELEASE_TAG_PREFIX; -pub use manifest::ReleaseManifest; - -pub(crate) use installed::default_cached_runtime_root; -pub(crate) use installed::detect_runtime_root; -pub(crate) use js_runtime::codex_app_runtime_candidates; -pub(crate) use js_runtime::resolve_js_runtime_from_candidates; -pub(crate) use js_runtime::system_electron_runtime; -pub(crate) use js_runtime::system_node_runtime; diff --git a/codex-rs/artifacts/src/tests.rs b/codex-rs/artifacts/src/tests.rs deleted file mode 100644 index 3db8a0bcc2..0000000000 --- a/codex-rs/artifacts/src/tests.rs +++ /dev/null @@ -1,453 +0,0 @@ -use crate::ArtifactBuildRequest; -use crate::ArtifactCommandOutput; -use crate::ArtifactRuntimeManager; -use crate::ArtifactRuntimeManagerConfig; -use crate::ArtifactRuntimePlatform; -use crate::ArtifactRuntimeReleaseLocator; -use crate::ArtifactsClient; -use crate::DEFAULT_CACHE_ROOT_RELATIVE; -use crate::ReleaseManifest; -use crate::load_cached_runtime; -use codex_package_manager::ArchiveFormat; -use codex_package_manager::PackageReleaseArchive; -use flate2::Compression; -use flate2::write::GzEncoder; -use pretty_assertions::assert_eq; -use sha2::Digest; -use sha2::Sha256; -use std::collections::BTreeMap; -use std::fs; -use std::io::Cursor; -use std::io::Write; -use std::path::Path; -use std::time::Duration; -use tar::Builder as TarBuilder; -use tempfile::TempDir; -use wiremock::Mock; -use wiremock::MockServer; -use wiremock::ResponseTemplate; -use wiremock::matchers::method; -use wiremock::matchers::path; -use zip::ZipWriter; -use zip::write::SimpleFileOptions; - -#[test] -fn release_locator_builds_manifest_url() { - let locator = ArtifactRuntimeReleaseLocator::new( - url::Url::parse("https://example.test/releases/").unwrap_or_else(|error| panic!("{error}")), - "0.1.0", - ); - let url = locator - .manifest_url() - .unwrap_or_else(|error| panic!("{error}")); - assert_eq!( - url.as_str(), - "https://example.test/releases/artifact-runtime-v0.1.0/artifact-runtime-v0.1.0-manifest.json" - ); -} - -#[test] -fn default_release_locator_uses_openai_codex_github_releases() { - let locator = ArtifactRuntimeReleaseLocator::default("0.1.0"); - let url = locator - .manifest_url() - .unwrap_or_else(|error| panic!("{error}")); - - assert_eq!( - url.as_str(), - "https://github.com/openai/codex/releases/download/artifact-runtime-v0.1.0/artifact-runtime-v0.1.0-manifest.json" - ); -} - -#[test] -fn load_cached_runtime_reads_installed_runtime() { - let codex_home = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let runtime_version = "2.5.6"; - let platform = - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")); - let install_dir = codex_home - .path() - .join(DEFAULT_CACHE_ROOT_RELATIVE) - .join(runtime_version) - .join(platform.as_str()); - write_installed_runtime(&install_dir, runtime_version); - - let runtime = load_cached_runtime( - &codex_home.path().join(DEFAULT_CACHE_ROOT_RELATIVE), - runtime_version, - ) - .unwrap_or_else(|error| panic!("{error}")); - - assert_eq!(runtime.runtime_version(), runtime_version); - assert_eq!(runtime.platform(), platform); - assert!( - runtime - .build_js_path() - .ends_with(Path::new("dist/artifact_tool.mjs")) - ); -} - -#[test] -fn load_cached_runtime_requires_build_entrypoint() { - let codex_home = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let runtime_version = "2.5.6"; - let platform = - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")); - let install_dir = codex_home - .path() - .join(DEFAULT_CACHE_ROOT_RELATIVE) - .join(runtime_version) - .join(platform.as_str()); - write_installed_runtime(&install_dir, runtime_version); - fs::remove_file(install_dir.join("dist/artifact_tool.mjs")) - .unwrap_or_else(|error| panic!("{error}")); - - let error = load_cached_runtime( - &codex_home.path().join(DEFAULT_CACHE_ROOT_RELATIVE), - runtime_version, - ) - .unwrap_err(); - - assert_eq!( - error.to_string(), - format!( - "required runtime file is missing: {}", - install_dir.join("dist/artifact_tool.mjs").display() - ) - ); -} - -#[tokio::test] -async fn ensure_installed_downloads_and_extracts_zip_runtime() { - let server = MockServer::start().await; - let runtime_version = "2.5.6"; - let platform = - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")); - let archive_name = format!( - "artifact-runtime-v{runtime_version}-{}.zip", - platform.as_str() - ); - let archive_bytes = build_zip_archive(runtime_version); - let archive_sha = format!("{:x}", Sha256::digest(&archive_bytes)); - let manifest = ReleaseManifest { - schema_version: 1, - runtime_version: runtime_version.to_string(), - release_tag: format!("artifact-runtime-v{runtime_version}"), - node_version: None, - platforms: BTreeMap::from([( - platform.as_str().to_string(), - PackageReleaseArchive { - archive: archive_name.clone(), - sha256: archive_sha, - format: ArchiveFormat::Zip, - size_bytes: Some(archive_bytes.len() as u64), - }, - )]), - }; - Mock::given(method("GET")) - .and(path(format!( - "/artifact-runtime-v{runtime_version}/artifact-runtime-v{runtime_version}-manifest.json" - ))) - .respond_with(ResponseTemplate::new(200).set_body_json(&manifest)) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path(format!( - "/artifact-runtime-v{runtime_version}/{archive_name}" - ))) - .respond_with(ResponseTemplate::new(200).set_body_bytes(archive_bytes)) - .mount(&server) - .await; - - let codex_home = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let locator = ArtifactRuntimeReleaseLocator::new( - url::Url::parse(&format!("{}/", server.uri())).unwrap_or_else(|error| panic!("{error}")), - runtime_version, - ); - let manager = ArtifactRuntimeManager::new(ArtifactRuntimeManagerConfig::new( - codex_home.path().to_path_buf(), - locator, - )); - - let runtime = manager - .ensure_installed() - .await - .unwrap_or_else(|error| panic!("{error}")); - - assert_eq!(runtime.runtime_version(), runtime_version); - assert_eq!(runtime.platform(), platform); - assert!( - runtime - .build_js_path() - .ends_with(Path::new("dist/artifact_tool.mjs")) - ); -} - -#[test] -fn load_cached_runtime_requires_package_export() { - let codex_home = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let runtime_version = "2.5.6"; - let platform = - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")); - let install_dir = codex_home - .path() - .join(DEFAULT_CACHE_ROOT_RELATIVE) - .join(runtime_version) - .join(platform.as_str()); - write_installed_runtime(&install_dir, runtime_version); - fs::write( - install_dir.join("package.json"), - serde_json::json!({ - "name": "@oai/artifact-tool", - "version": runtime_version, - "type": "module", - }) - .to_string(), - ) - .unwrap_or_else(|error| panic!("{error}")); - - let error = load_cached_runtime( - &codex_home.path().join(DEFAULT_CACHE_ROOT_RELATIVE), - runtime_version, - ) - .unwrap_err(); - - assert_eq!( - error.to_string(), - format!( - "invalid package metadata at {}", - install_dir.join("package.json").display() - ) - ); -} - -#[tokio::test] -async fn ensure_installed_downloads_and_extracts_tar_gz_runtime() { - let server = MockServer::start().await; - let runtime_version = "2.5.6"; - let platform = - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")); - let archive_name = format!( - "artifact-runtime-v{runtime_version}-{}.tar.gz", - platform.as_str() - ); - let archive_bytes = build_tar_gz_archive(runtime_version); - let archive_sha = format!("{:x}", Sha256::digest(&archive_bytes)); - let manifest = ReleaseManifest { - schema_version: 1, - runtime_version: runtime_version.to_string(), - release_tag: format!("artifact-runtime-v{runtime_version}"), - node_version: None, - platforms: BTreeMap::from([( - platform.as_str().to_string(), - PackageReleaseArchive { - archive: archive_name.clone(), - sha256: archive_sha, - format: ArchiveFormat::TarGz, - size_bytes: Some(archive_bytes.len() as u64), - }, - )]), - }; - Mock::given(method("GET")) - .and(path(format!( - "/artifact-runtime-v{runtime_version}/artifact-runtime-v{runtime_version}-manifest.json" - ))) - .respond_with(ResponseTemplate::new(200).set_body_json(&manifest)) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path(format!( - "/artifact-runtime-v{runtime_version}/{archive_name}" - ))) - .respond_with(ResponseTemplate::new(200).set_body_bytes(archive_bytes)) - .mount(&server) - .await; - - let codex_home = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let locator = ArtifactRuntimeReleaseLocator::new( - url::Url::parse(&format!("{}/", server.uri())).unwrap_or_else(|error| panic!("{error}")), - runtime_version, - ); - let manager = ArtifactRuntimeManager::new(ArtifactRuntimeManagerConfig::new( - codex_home.path().to_path_buf(), - locator, - )); - - let runtime = manager - .ensure_installed() - .await - .unwrap_or_else(|error| panic!("{error}")); - - assert_eq!(runtime.runtime_version(), runtime_version); - assert_eq!(runtime.platform(), platform); - assert!( - runtime - .build_js_path() - .ends_with(Path::new("dist/artifact_tool.mjs")) - ); -} - -#[test] -fn load_cached_runtime_uses_custom_cache_root() { - let codex_home = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let runtime_version = "2.5.6"; - let custom_cache_root = codex_home.path().join("runtime-cache"); - let platform = - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")); - let install_dir = custom_cache_root - .join(runtime_version) - .join(platform.as_str()); - write_installed_runtime(&install_dir, runtime_version); - - let config = ArtifactRuntimeManagerConfig::with_default_release( - codex_home.path().to_path_buf(), - runtime_version, - ) - .with_cache_root(custom_cache_root); - - let runtime = load_cached_runtime(&config.cache_root(), runtime_version) - .unwrap_or_else(|error| panic!("{error}")); - - assert_eq!(runtime.runtime_version(), runtime_version); - assert_eq!(runtime.platform(), platform); -} - -#[tokio::test] -#[cfg(unix)] -async fn artifacts_client_execute_build_writes_wrapped_script_and_env() { - let temp = TempDir::new().unwrap_or_else(|error| panic!("{error}")); - let runtime_root = temp.path().join("runtime"); - write_installed_runtime(&runtime_root, "2.5.6"); - let runtime = crate::InstalledArtifactRuntime::load( - runtime_root, - ArtifactRuntimePlatform::detect_current().unwrap_or_else(|error| panic!("{error}")), - ) - .unwrap_or_else(|error| panic!("{error}")); - let client = ArtifactsClient::from_installed_runtime(runtime); - - let output = client - .execute_build(ArtifactBuildRequest { - source: concat!( - "console.log(typeof artifacts);\n", - "console.log(typeof codexArtifacts);\n", - "console.log(artifactTool.ok);\n", - "console.log(ok);\n", - "console.error('stderr-ok');\n", - "console.log('stdout-ok');\n" - ) - .to_string(), - cwd: temp.path().to_path_buf(), - timeout: Some(Duration::from_secs(5)), - env: BTreeMap::new(), - }) - .await - .unwrap_or_else(|error| panic!("{error}")); - - assert_success(&output); - assert_eq!(output.stderr.trim(), "stderr-ok"); - assert_eq!( - output.stdout.lines().collect::>(), - vec!["undefined", "undefined", "true", "true", "stdout-ok"] - ); -} - -fn assert_success(output: &ArtifactCommandOutput) { - assert!(output.success()); - assert_eq!(output.exit_code, Some(0)); -} - -fn write_installed_runtime(install_dir: &Path, runtime_version: &str) { - fs::create_dir_all(install_dir.join("dist")).unwrap_or_else(|error| panic!("{error}")); - fs::write( - install_dir.join("package.json"), - serde_json::json!({ - "name": "@oai/artifact-tool", - "version": runtime_version, - "type": "module", - "exports": { - ".": "./dist/artifact_tool.mjs", - } - }) - .to_string(), - ) - .unwrap_or_else(|error| panic!("{error}")); - fs::write( - install_dir.join("dist/artifact_tool.mjs"), - "export const ok = true;\n", - ) - .unwrap_or_else(|error| panic!("{error}")); -} - -fn build_zip_archive(runtime_version: &str) -> Vec { - let mut bytes = Cursor::new(Vec::new()); - { - let mut zip = ZipWriter::new(&mut bytes); - let options = SimpleFileOptions::default(); - let package_json = serde_json::json!({ - "name": "@oai/artifact-tool", - "version": runtime_version, - "type": "module", - "exports": { - ".": "./dist/artifact_tool.mjs", - } - }) - .to_string() - .into_bytes(); - zip.start_file("artifact-runtime/package.json", options) - .unwrap_or_else(|error| panic!("{error}")); - zip.write_all(&package_json) - .unwrap_or_else(|error| panic!("{error}")); - zip.start_file("artifact-runtime/dist/artifact_tool.mjs", options) - .unwrap_or_else(|error| panic!("{error}")); - zip.write_all(b"export const ok = true;\n") - .unwrap_or_else(|error| panic!("{error}")); - zip.finish().unwrap_or_else(|error| panic!("{error}")); - } - bytes.into_inner() -} - -fn build_tar_gz_archive(runtime_version: &str) -> Vec { - let mut bytes = Vec::new(); - { - let encoder = GzEncoder::new(&mut bytes, Compression::default()); - let mut archive = TarBuilder::new(encoder); - - let package_json = serde_json::json!({ - "name": "@oai/artifact-tool", - "version": runtime_version, - "type": "module", - "exports": { - ".": "./dist/artifact_tool.mjs", - } - }) - .to_string() - .into_bytes(); - let mut package_header = tar::Header::new_gnu(); - package_header.set_mode(0o644); - package_header.set_size(package_json.len() as u64); - package_header.set_cksum(); - archive - .append_data( - &mut package_header, - "package/package.json", - package_json.as_slice(), - ) - .unwrap_or_else(|error| panic!("{error}")); - - let build_js = b"export const ok = true;\n"; - let mut build_header = tar::Header::new_gnu(); - build_header.set_mode(0o644); - build_header.set_size(build_js.len() as u64); - build_header.set_cksum(); - archive - .append_data( - &mut build_header, - "package/dist/artifact_tool.mjs", - &build_js[..], - ) - .unwrap_or_else(|error| panic!("{error}")); - - archive.finish().unwrap_or_else(|error| panic!("{error}")); - } - bytes -} diff --git a/codex-rs/chatgpt/Cargo.toml b/codex-rs/chatgpt/Cargo.toml index cd14a67009..84c793b536 100644 --- a/codex-rs/chatgpt/Cargo.toml +++ b/codex-rs/chatgpt/Cargo.toml @@ -12,6 +12,7 @@ anyhow = { workspace = true } clap = { workspace = true, features = ["derive"] } codex-connectors = { workspace = true } codex-core = { workspace = true } +codex-login = { workspace = true } codex-utils-cli = { workspace = true } codex-utils-cargo-bin = { workspace = true } serde = { workspace = true, features = ["derive"] } diff --git a/codex-rs/chatgpt/src/chatgpt_token.rs b/codex-rs/chatgpt/src/chatgpt_token.rs index 93aaef7ed2..ceca132e9b 100644 --- a/codex-rs/chatgpt/src/chatgpt_token.rs +++ b/codex-rs/chatgpt/src/chatgpt_token.rs @@ -1,10 +1,10 @@ use codex_core::AuthManager; +use codex_login::token_data::TokenData; use std::path::Path; use std::sync::LazyLock; use std::sync::RwLock; use codex_core::auth::AuthCredentialsStoreMode; -use codex_core::token_data::TokenData; static CHATGPT_TOKEN: LazyLock>> = LazyLock::new(|| RwLock::new(None)); diff --git a/codex-rs/chatgpt/src/connectors.rs b/codex-rs/chatgpt/src/connectors.rs index 4dcf8886c7..3c2b2fe4ef 100644 --- a/codex-rs/chatgpt/src/connectors.rs +++ b/codex-rs/chatgpt/src/connectors.rs @@ -1,6 +1,6 @@ use codex_core::AuthManager; use codex_core::config::Config; -use codex_core::token_data::TokenData; +use codex_login::token_data::TokenData; use std::collections::HashSet; use std::time::Duration; diff --git a/codex-rs/cli/src/debug_sandbox.rs b/codex-rs/cli/src/debug_sandbox.rs index d519bd5f67..f64c45c73f 100644 --- a/codex-rs/cli/src/debug_sandbox.rs +++ b/codex-rs/cli/src/debug_sandbox.rs @@ -18,7 +18,7 @@ use codex_protocol::config_types::SandboxMode; use codex_protocol::permissions::NetworkSandboxPolicy; use codex_sandboxing::landlock::create_linux_sandbox_command_args_for_policies; #[cfg(target_os = "macos")] -use codex_sandboxing::seatbelt::create_seatbelt_command_args_for_policies_with_extensions; +use codex_sandboxing::seatbelt::create_seatbelt_command_args_for_policies; use codex_utils_cli::CliConfigOverrides; use tokio::process::Child; use tokio::process::Command as TokioCommand; @@ -164,14 +164,17 @@ async fn run_command_under_sandbox( let res = tokio::task::spawn_blocking(move || { if use_elevated { run_windows_sandbox_capture_elevated( - policy_str.as_str(), - &sandbox_cwd, - base_dir.as_path(), - command_vec, - &cwd_clone, - env_map, - /*timeout_ms*/ None, - config.permissions.windows_sandbox_private_desktop, + codex_windows_sandbox::ElevatedSandboxCaptureRequest { + policy_json_or_preset: policy_str.as_str(), + sandbox_policy_cwd: &sandbox_cwd, + codex_home: base_dir.as_path(), + command: command_vec, + cwd: &cwd_clone, + env_map, + timeout_ms: None, + use_private_desktop: config.permissions.windows_sandbox_private_desktop, + proxy_enforced: false, + }, ) } else { run_windows_sandbox_capture( @@ -246,14 +249,13 @@ async fn run_command_under_sandbox( let mut child = match sandbox_type { #[cfg(target_os = "macos")] SandboxType::Seatbelt => { - let args = create_seatbelt_command_args_for_policies_with_extensions( + let args = create_seatbelt_command_args_for_policies( command, &config.permissions.file_system_sandbox_policy, config.permissions.network_sandbox_policy, sandbox_policy_cwd.as_path(), /*enforce_managed_network*/ false, network.as_ref(), - /*extensions*/ None, ); let network_policy = config.permissions.network_sandbox_policy; spawn_debug_sandbox_child( diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index 5e0d405eb3..12a531d35d 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -530,6 +530,11 @@ struct InteractiveRemoteOptions { /// Accepted forms: `ws://host:port` or `wss://host:port`. #[arg(long = "remote", value_name = "ADDR")] remote: Option, + + /// Name of the environment variable containing the bearer token to send to + /// a remote app server websocket. + #[arg(long = "remote-auth-token-env", value_name = "ENV_VAR")] + remote_auth_token_env: Option, } impl FeatureToggles { @@ -607,6 +612,7 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { let toggle_overrides = feature_toggles.to_overrides()?; root_config_overrides.raw_overrides.extend(toggle_overrides); let root_remote = remote.remote; + let root_remote_auth_token_env = remote.remote_auth_token_env; match subcommand { None => { @@ -614,12 +620,21 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { &mut interactive.config_overrides, root_config_overrides.clone(), ); - let exit_info = - run_interactive_tui(interactive, root_remote.clone(), arg0_paths.clone()).await?; + let exit_info = run_interactive_tui( + interactive, + root_remote.clone(), + root_remote_auth_token_env.clone(), + arg0_paths.clone(), + ) + .await?; handle_app_exit(exit_info)?; } Some(Subcommand::Exec(mut exec_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "exec")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "exec", + )?; prepend_config_flags( &mut exec_cli.config_overrides, root_config_overrides.clone(), @@ -627,7 +642,11 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { codex_exec::run_main(exec_cli, arg0_paths.clone()).await?; } Some(Subcommand::Review(review_args)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "review")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "review", + )?; let mut exec_cli = ExecCli::try_parse_from(["codex", "exec"])?; exec_cli.command = Some(ExecCommand::Review(review_args)); prepend_config_flags( @@ -637,11 +656,19 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { codex_exec::run_main(exec_cli, arg0_paths.clone()).await?; } Some(Subcommand::McpServer) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "mcp-server")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "mcp-server", + )?; codex_mcp_server::run_main(arg0_paths.clone(), root_config_overrides).await?; } Some(Subcommand::Mcp(mut mcp_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "mcp")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "mcp", + )?; // Propagate any root-level config overrides (e.g. `-c key=value`). prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone()); mcp_cli.run().await?; @@ -653,9 +680,13 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { analytics_default_enabled, auth, } = app_server_cli; + reject_remote_mode_for_app_server_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + subcommand.as_ref(), + )?; match subcommand { None => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "app-server")?; let transport = listen; let auth = auth.try_into_settings()?; codex_app_server::run_main_with_transport( @@ -670,10 +701,6 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { .await?; } Some(AppServerSubcommand::GenerateTs(gen_cli)) => { - reject_remote_mode_for_subcommand( - root_remote.as_deref(), - "app-server generate-ts", - )?; let options = codex_app_server_protocol::GenerateTsOptions { experimental_api: gen_cli.experimental, ..Default::default() @@ -685,10 +712,6 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { )?; } Some(AppServerSubcommand::GenerateJsonSchema(gen_cli)) => { - reject_remote_mode_for_subcommand( - root_remote.as_deref(), - "app-server generate-json-schema", - )?; codex_app_server_protocol::generate_json_with_experimental( &gen_cli.out_dir, gen_cli.experimental, @@ -701,7 +724,11 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { } #[cfg(target_os = "macos")] Some(Subcommand::App(app_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "app")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "app", + )?; app_cmd::run_app(app_cli).await?; } Some(Subcommand::Resume(ResumeCommand { @@ -724,6 +751,9 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { let exit_info = run_interactive_tui( interactive, remote.remote.or(root_remote.clone()), + remote + .remote_auth_token_env + .or(root_remote_auth_token_env.clone()), arg0_paths.clone(), ) .await?; @@ -747,13 +777,20 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { let exit_info = run_interactive_tui( interactive, remote.remote.or(root_remote.clone()), + remote + .remote_auth_token_env + .or(root_remote_auth_token_env.clone()), arg0_paths.clone(), ) .await?; handle_app_exit(exit_info)?; } Some(Subcommand::Login(mut login_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "login")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "login", + )?; prepend_config_flags( &mut login_cli.config_overrides, root_config_overrides.clone(), @@ -785,7 +822,11 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { } } Some(Subcommand::Logout(mut logout_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "logout")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "logout", + )?; prepend_config_flags( &mut logout_cli.config_overrides, root_config_overrides.clone(), @@ -793,11 +834,19 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { run_logout(logout_cli.config_overrides).await; } Some(Subcommand::Completion(completion_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "completion")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "completion", + )?; print_completion(completion_cli); } Some(Subcommand::Cloud(mut cloud_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "cloud")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "cloud", + )?; prepend_config_flags( &mut cloud_cli.config_overrides, root_config_overrides.clone(), @@ -807,7 +856,11 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { } Some(Subcommand::Sandbox(sandbox_args)) => match sandbox_args.cmd { SandboxCommand::Macos(mut seatbelt_cli) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "sandbox macos")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "sandbox macos", + )?; prepend_config_flags( &mut seatbelt_cli.config_overrides, root_config_overrides.clone(), @@ -819,7 +872,11 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { .await?; } SandboxCommand::Linux(mut landlock_cli) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "sandbox linux")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "sandbox linux", + )?; prepend_config_flags( &mut landlock_cli.config_overrides, root_config_overrides.clone(), @@ -831,7 +888,11 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { .await?; } SandboxCommand::Windows(mut windows_cli) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "sandbox windows")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "sandbox windows", + )?; prepend_config_flags( &mut windows_cli.config_overrides, root_config_overrides.clone(), @@ -845,22 +906,38 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { }, Some(Subcommand::Debug(DebugCommand { subcommand })) => match subcommand { DebugSubcommand::AppServer(cmd) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "debug app-server")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "debug app-server", + )?; run_debug_app_server_command(cmd).await?; } DebugSubcommand::ClearMemories => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "debug clear-memories")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "debug clear-memories", + )?; run_debug_clear_memories_command(&root_config_overrides, &interactive).await?; } }, Some(Subcommand::Execpolicy(ExecpolicyCommand { sub })) => match sub { ExecpolicySubcommand::Check(cmd) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "execpolicy check")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "execpolicy check", + )?; run_execpolicycheck(cmd)? } }, Some(Subcommand::Apply(mut apply_cli)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "apply")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "apply", + )?; prepend_config_flags( &mut apply_cli.config_overrides, root_config_overrides.clone(), @@ -868,19 +945,31 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { run_apply_command(apply_cli, /*cwd*/ None).await?; } Some(Subcommand::ResponsesApiProxy(args)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "responses-api-proxy")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "responses-api-proxy", + )?; tokio::task::spawn_blocking(move || codex_responses_api_proxy::run_main(args)) .await??; } Some(Subcommand::StdioToUds(cmd)) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "stdio-to-uds")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "stdio-to-uds", + )?; let socket_path = cmd.socket_path; tokio::task::spawn_blocking(move || codex_stdio_to_uds::run(socket_path.as_path())) .await??; } Some(Subcommand::Features(FeaturesCli { sub })) => match sub { FeaturesSubcommand::List => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "features list")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "features list", + )?; // Respect root-level `-c` overrides plus top-level flags like `--profile`. let mut cli_kv_overrides = root_config_overrides .parse_overrides() @@ -923,11 +1012,19 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { } } FeaturesSubcommand::Enable(FeatureSetArgs { feature }) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "features enable")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "features enable", + )?; enable_feature_in_config(&interactive, &feature).await?; } FeaturesSubcommand::Disable(FeatureSetArgs { feature }) => { - reject_remote_mode_for_subcommand(root_remote.as_deref(), "features disable")?; + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "features disable", + )?; disable_feature_in_config(&interactive, &feature).await?; } }, @@ -1046,18 +1143,64 @@ fn prepend_config_flags( .splice(0..0, cli_config_overrides.raw_overrides); } -fn reject_remote_mode_for_subcommand(remote: Option<&str>, subcommand: &str) -> anyhow::Result<()> { +fn reject_remote_mode_for_subcommand( + remote: Option<&str>, + remote_auth_token_env: Option<&str>, + subcommand: &str, +) -> anyhow::Result<()> { if let Some(remote) = remote { anyhow::bail!( "`--remote {remote}` is only supported for interactive TUI commands, not `codex {subcommand}`" ); } + if remote_auth_token_env.is_some() { + anyhow::bail!( + "`--remote-auth-token-env` is only supported for interactive TUI commands, not `codex {subcommand}`" + ); + } Ok(()) } +fn reject_remote_mode_for_app_server_subcommand( + remote: Option<&str>, + remote_auth_token_env: Option<&str>, + subcommand: Option<&AppServerSubcommand>, +) -> anyhow::Result<()> { + let subcommand_name = match subcommand { + None => "app-server", + Some(AppServerSubcommand::GenerateTs(_)) => "app-server generate-ts", + Some(AppServerSubcommand::GenerateJsonSchema(_)) => "app-server generate-json-schema", + Some(AppServerSubcommand::GenerateInternalJsonSchema(_)) => { + "app-server generate-internal-json-schema" + } + }; + reject_remote_mode_for_subcommand(remote, remote_auth_token_env, subcommand_name) +} + +fn read_remote_auth_token_from_env_var_with( + env_var_name: &str, + get_var: F, +) -> anyhow::Result +where + F: FnOnce(&str) -> Result, +{ + let auth_token = get_var(env_var_name) + .map_err(|_| anyhow::anyhow!("environment variable `{env_var_name}` is not set"))?; + let auth_token = auth_token.trim().to_string(); + if auth_token.is_empty() { + anyhow::bail!("environment variable `{env_var_name}` is empty"); + } + Ok(auth_token) +} + +fn read_remote_auth_token_from_env_var(env_var_name: &str) -> anyhow::Result { + read_remote_auth_token_from_env_var_with(env_var_name, |name| std::env::var(name)) +} + async fn run_interactive_tui( mut interactive: TuiCli, remote: Option, + remote_auth_token_env: Option, arg0_paths: Arg0DispatchPaths, ) -> std::io::Result { if let Some(prompt) = interactive.prompt.take() { @@ -1089,17 +1232,28 @@ async fn run_interactive_tui( .map(codex_tui_app_server::normalize_remote_addr) .transpose() .map_err(std::io::Error::other)?; + if remote_auth_token_env.is_some() && normalized_remote.is_none() { + return Ok(AppExitInfo::fatal( + "`--remote-auth-token-env` requires `--remote`.", + )); + } if normalized_remote.is_some() && !use_app_server_tui { return Ok(AppExitInfo::fatal( "`--remote` requires the `tui_app_server` feature flag to be enabled.", )); } if use_app_server_tui { + let remote_auth_token = remote_auth_token_env + .as_deref() + .map(read_remote_auth_token_from_env_var) + .transpose() + .map_err(std::io::Error::other)?; codex_tui_app_server::run_main( into_app_server_tui_cli(interactive), arg0_paths, codex_core::config_loader::LoaderOverrides::default(), normalized_remote, + remote_auth_token, ) .await .map(into_legacy_app_exit_info) @@ -1661,6 +1815,22 @@ mod tests { assert_eq!(cli.remote.remote.as_deref(), Some("ws://127.0.0.1:4500")); } + #[test] + fn remote_auth_token_env_flag_parses_for_interactive_root() { + let cli = MultitoolCli::try_parse_from([ + "codex", + "--remote-auth-token-env", + "CODEX_REMOTE_AUTH_TOKEN", + "--remote", + "ws://127.0.0.1:4500", + ]) + .expect("parse"); + assert_eq!( + cli.remote.remote_auth_token_env.as_deref(), + Some("CODEX_REMOTE_AUTH_TOKEN") + ); + } + #[test] fn remote_flag_parses_for_resume_subcommand() { let cli = @@ -1676,7 +1846,7 @@ mod tests { #[test] fn reject_remote_mode_for_non_interactive_subcommands() { - let err = reject_remote_mode_for_subcommand(Some("127.0.0.1:4500"), "exec") + let err = reject_remote_mode_for_subcommand(Some("127.0.0.1:4500"), None, "exec") .expect_err("non-interactive subcommands should reject --remote"); assert!( err.to_string() @@ -1684,6 +1854,59 @@ mod tests { ); } + #[test] + fn reject_remote_auth_token_env_for_non_interactive_subcommands() { + let err = reject_remote_mode_for_subcommand(None, Some("CODEX_REMOTE_AUTH_TOKEN"), "exec") + .expect_err("non-interactive subcommands should reject --remote-auth-token-env"); + assert!( + err.to_string() + .contains("only supported for interactive TUI commands") + ); + } + + #[test] + fn reject_remote_auth_token_env_for_app_server_generate_internal_json_schema() { + let subcommand = + AppServerSubcommand::GenerateInternalJsonSchema(GenerateInternalJsonSchemaCommand { + out_dir: PathBuf::from("/tmp/out"), + }); + let err = reject_remote_mode_for_app_server_subcommand( + None, + Some("CODEX_REMOTE_AUTH_TOKEN"), + Some(&subcommand), + ) + .expect_err("non-interactive app-server subcommands should reject --remote-auth-token-env"); + assert!(err.to_string().contains("generate-internal-json-schema")); + } + + #[test] + fn read_remote_auth_token_from_env_var_reports_missing_values() { + let err = read_remote_auth_token_from_env_var_with("CODEX_REMOTE_AUTH_TOKEN", |_| { + Err(std::env::VarError::NotPresent) + }) + .expect_err("missing env vars should be rejected"); + assert!(err.to_string().contains("is not set")); + } + + #[test] + fn read_remote_auth_token_from_env_var_trims_values() { + let auth_token = + read_remote_auth_token_from_env_var_with("CODEX_REMOTE_AUTH_TOKEN", |_| { + Ok(" bearer-token ".to_string()) + }) + .expect("env var should parse"); + assert_eq!(auth_token, "bearer-token"); + } + + #[test] + fn read_remote_auth_token_from_env_var_rejects_empty_values() { + let err = read_remote_auth_token_from_env_var_with("CODEX_REMOTE_AUTH_TOKEN", |_| { + Ok(" \n\t ".to_string()) + }) + .expect_err("empty env vars should be rejected"); + assert!(err.to_string().contains("is empty")); + } + #[test] fn app_server_listen_websocket_url_parses() { let app_server = app_server_from_args( diff --git a/codex-rs/cli/src/mcp_cmd.rs b/codex-rs/cli/src/mcp_cmd.rs index 30a911cb63..52707f1da9 100644 --- a/codex-rs/cli/src/mcp_cmd.rs +++ b/codex-rs/cli/src/mcp_cmd.rs @@ -306,6 +306,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }; servers.insert(name.clone(), new_entry); diff --git a/codex-rs/cloud-requirements/src/lib.rs b/codex-rs/cloud-requirements/src/lib.rs index fb0f62a342..12e62d880f 100644 --- a/codex-rs/cloud-requirements/src/lib.rs +++ b/codex-rs/cloud-requirements/src/lib.rs @@ -1153,6 +1153,34 @@ mod tests { ); } + #[tokio::test] + async fn fetch_cloud_requirements_allows_hc_plan_as_enterprise() { + let codex_home = tempdir().expect("tempdir"); + let service = CloudRequirementsService::new( + auth_manager_with_plan("hc"), + Arc::new(StaticFetcher { + contents: Some("allowed_approval_policies = [\"never\"]".to_string()), + }), + codex_home.path().to_path_buf(), + CLOUD_REQUIREMENTS_TIMEOUT, + ); + assert_eq!( + service.fetch().await, + Ok(Some(ConfigRequirementsToml { + allowed_approval_policies: Some(vec![AskForApproval::Never]), + allowed_sandbox_modes: None, + allowed_web_search_modes: None, + guardian_developer_instructions: None, + feature_requirements: None, + mcp_servers: None, + apps: None, + rules: None, + enforce_residency: None, + network: None, + })) + ); + } + #[tokio::test] async fn fetch_cloud_requirements_handles_missing_contents() { let result = parse_for_fetch(None); diff --git a/codex-rs/core-skills/src/injection_tests.rs b/codex-rs/core-skills/src/injection_tests.rs index 8d66a0af57..b8611de4ef 100644 --- a/codex-rs/core-skills/src/injection_tests.rs +++ b/codex-rs/core-skills/src/injection_tests.rs @@ -11,8 +11,6 @@ fn make_skill(name: &str, path: &str) -> SkillMetadata { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: PathBuf::from(path), scope: codex_protocol::protocol::SkillScope::User, } diff --git a/codex-rs/core-skills/src/invocation_utils_tests.rs b/codex-rs/core-skills/src/invocation_utils_tests.rs index 657582b742..6d74dbe9a7 100644 --- a/codex-rs/core-skills/src/invocation_utils_tests.rs +++ b/codex-rs/core-skills/src/invocation_utils_tests.rs @@ -18,8 +18,6 @@ fn test_skill_metadata(skill_doc_path: PathBuf) -> SkillMetadata { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: skill_doc_path, scope: codex_protocol::protocol::SkillScope::User, } diff --git a/codex-rs/core-skills/src/loader.rs b/codex-rs/core-skills/src/loader.rs index b6ef000845..42de9fb288 100644 --- a/codex-rs/core-skills/src/loader.rs +++ b/codex-rs/core-skills/src/loader.rs @@ -2,7 +2,6 @@ use crate::model::SkillDependencies; use crate::model::SkillError; use crate::model::SkillInterface; use crate::model::SkillLoadOutcome; -use crate::model::SkillManagedNetworkOverride; use crate::model::SkillMetadata; use crate::model::SkillPolicy; use crate::model::SkillToolDependency; @@ -13,10 +12,6 @@ use codex_config::ConfigLayerStackOrdering; use codex_config::default_project_root_markers; use codex_config::merge_toml_values; use codex_config::project_root_markers_from_config; -use codex_protocol::models::FileSystemPermissions; -use codex_protocol::models::MacOsSeatbeltProfileExtensions; -use codex_protocol::models::NetworkPermissions; -use codex_protocol::models::PermissionProfile; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; use codex_utils_absolute_path::AbsolutePathBufGuard; @@ -59,8 +54,6 @@ struct SkillMetadataFile { dependencies: Option, #[serde(default)] policy: Option, - #[serde(default)] - permissions: Option, } #[derive(Default)] @@ -68,28 +61,6 @@ struct LoadedSkillMetadata { interface: Option, dependencies: Option, policy: Option, - permission_profile: Option, - managed_network_override: Option, -} - -#[derive(Debug, Default, Deserialize, PartialEq, Eq)] -struct SkillPermissionProfile { - #[serde(default)] - network: Option, - #[serde(default)] - file_system: Option, - #[serde(default)] - macos: Option, -} - -#[derive(Debug, Default, Deserialize, PartialEq, Eq)] -struct SkillNetworkPermissions { - #[serde(default)] - enabled: Option, - #[serde(default)] - allowed_domains: Option>, - #[serde(default)] - denied_domains: Option>, } #[derive(Debug, Default, Deserialize)] @@ -551,8 +522,6 @@ fn parse_skill_file(path: &Path, scope: SkillScope) -> Result Result LoadedSkillMetadata { interface, dependencies, policy, - permissions, } = parsed; - let (permission_profile, managed_network_override) = normalize_permissions(permissions); LoadedSkillMetadata { interface: resolve_interface(interface, skill_dir), dependencies: resolve_dependencies(dependencies), policy: resolve_policy(policy), - permission_profile, - managed_network_override, } } -fn normalize_permissions( - permissions: Option, -) -> ( - Option, - Option, -) { - let Some(permissions) = permissions else { - return (None, None); - }; - let managed_network_override = permissions - .network - .as_ref() - .map(|network| SkillManagedNetworkOverride { - allowed_domains: network.allowed_domains.clone(), - denied_domains: network.denied_domains.clone(), - }) - .filter(SkillManagedNetworkOverride::has_domain_overrides); - let permission_profile = PermissionProfile { - network: permissions.network.and_then(|network| { - let network = NetworkPermissions { - enabled: network.enabled, - }; - (!network.is_empty()).then_some(network) - }), - file_system: permissions - .file_system - .filter(|file_system| !file_system.is_empty()), - macos: permissions.macos, - }; - - ( - (!permission_profile.is_empty()).then_some(permission_profile), - managed_network_override, - ) -} - fn resolve_interface(interface: Option, skill_dir: &Path) -> Option { let interface = interface?; let interface = SkillInterface { diff --git a/codex-rs/core-skills/src/loader_tests.rs b/codex-rs/core-skills/src/loader_tests.rs index 5875b1a043..ad196df670 100644 --- a/codex-rs/core-skills/src/loader_tests.rs +++ b/codex-rs/core-skills/src/loader_tests.rs @@ -4,12 +4,6 @@ use codex_config::ConfigLayerEntry; use codex_config::ConfigLayerStack; use codex_config::ConfigRequirements; use codex_config::ConfigRequirementsToml; -use codex_protocol::models::FileSystemPermissions; -use codex_protocol::models::MacOsAutomationPermission; -use codex_protocol::models::MacOsContactsPermission; -use codex_protocol::models::MacOsPreferencesPermission; -use codex_protocol::models::MacOsSeatbeltProfileExtensions; -use codex_protocol::models::PermissionProfile; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; use codex_utils_absolute_path::AbsolutePathBuf; @@ -290,8 +284,6 @@ fn loads_skills_from_home_agents_dir_for_user_scope() -> anyhow::Result<()> { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -442,8 +434,6 @@ async fn loads_skill_dependencies_metadata_from_yaml() { ], }), policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -499,8 +489,6 @@ interface: }), dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(skill_path.as_path()), scope: SkillScope::User, }] @@ -610,340 +598,6 @@ policy: ); } -#[tokio::test] -async fn loads_skill_permissions_from_yaml() { - let codex_home = tempfile::tempdir().expect("tempdir"); - let skill_path = write_skill(&codex_home, "demo", "permissions-skill", "from yaml"); - let skill_dir = skill_path.parent().expect("skill dir"); - fs::create_dir_all(skill_dir.join("data")).expect("create read path"); - fs::create_dir_all(skill_dir.join("output")).expect("create write path"); - - write_skill_metadata_at( - skill_dir, - r#" -permissions: - network: - enabled: true - file_system: - read: - - "./data" - write: - - "./output" -"#, - ); - - let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); - - assert!( - outcome.errors.is_empty(), - "unexpected errors: {:?}", - outcome.errors - ); - assert_eq!(outcome.skills.len(), 1); - assert_eq!( - outcome.skills[0].permission_profile, - Some(PermissionProfile { - network: Some(NetworkPermissions { - enabled: Some(true), - }), - file_system: Some(FileSystemPermissions { - read: Some(vec![ - AbsolutePathBuf::try_from(normalized(skill_dir.join("data").as_path())) - .expect("absolute data path"), - ]), - write: Some(vec![ - AbsolutePathBuf::try_from(normalized(skill_dir.join("output").as_path())) - .expect("absolute output path"), - ]), - }), - macos: None, - }) - ); - assert_eq!(outcome.skills[0].managed_network_override, None); -} - -#[tokio::test] -async fn empty_skill_permissions_do_not_create_profile() { - let codex_home = tempfile::tempdir().expect("tempdir"); - let skill_path = write_skill(&codex_home, "demo", "permissions-empty", "from yaml"); - let skill_dir = skill_path.parent().expect("skill dir"); - - write_skill_metadata_at( - skill_dir, - r#" -permissions: {} -"#, - ); - - let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); - - assert!( - outcome.errors.is_empty(), - "unexpected errors: {:?}", - outcome.errors - ); - assert_eq!(outcome.skills.len(), 1); - assert_eq!(outcome.skills[0].permission_profile, None); -} - -#[test] -fn normalize_permissions_splits_managed_network_overrides() { - let (permission_profile, managed_network_override) = - normalize_permissions(Some(SkillPermissionProfile { - network: Some(SkillNetworkPermissions { - enabled: Some(true), - allowed_domains: Some(vec!["skill.example.com".to_string()]), - denied_domains: Some(vec!["blocked.skill.example.com".to_string()]), - }), - file_system: None, - macos: None, - })); - - assert_eq!( - permission_profile, - Some(PermissionProfile { - network: Some(NetworkPermissions { - enabled: Some(true), - }), - file_system: None, - macos: None, - }) - ); - assert_eq!( - managed_network_override, - Some(SkillManagedNetworkOverride { - allowed_domains: Some(vec!["skill.example.com".to_string()]), - denied_domains: Some(vec!["blocked.skill.example.com".to_string()]), - }) - ); -} - -#[test] -fn normalize_permissions_preserves_network_gate_separately_from_overrides() { - let (permission_profile, managed_network_override) = - normalize_permissions(Some(SkillPermissionProfile { - network: Some(SkillNetworkPermissions { - enabled: Some(false), - allowed_domains: Some(vec!["skill.example.com".to_string()]), - denied_domains: None, - }), - file_system: None, - macos: None, - })); - - assert_eq!( - permission_profile, - Some(PermissionProfile { - network: Some(NetworkPermissions { - enabled: Some(false), - }), - file_system: None, - macos: None, - }) - ); - assert_eq!( - managed_network_override, - Some(SkillManagedNetworkOverride { - allowed_domains: Some(vec!["skill.example.com".to_string()]), - denied_domains: None, - }) - ); -} - -#[test] -fn skill_metadata_parses_macos_permissions_yaml() { - let parsed = serde_yaml::from_str::( - r#" -permissions: - macos: - macos_preferences: "read_write" - macos_automation: - - "com.apple.Notes" - macos_launch_services: true - macos_accessibility: true - macos_calendar: true -"#, - ) - .expect("parse skill metadata"); - - assert_eq!( - parsed.permissions, - Some(SkillPermissionProfile { - network: None, - file_system: None, - macos: Some(MacOsSeatbeltProfileExtensions { - macos_preferences: MacOsPreferencesPermission::ReadWrite, - macos_automation: MacOsAutomationPermission::BundleIds(vec![ - "com.apple.Notes".to_string(), - ]), - macos_launch_services: true, - macos_accessibility: true, - macos_calendar: true, - macos_reminders: false, - macos_contacts: MacOsContactsPermission::None, - }), - }) - ); -} - -#[test] -fn skill_metadata_parses_macos_reminders_permission_yaml() { - let parsed = serde_yaml::from_str::( - r#" -permissions: - macos: - macos_reminders: true -"#, - ) - .expect("parse reminders skill metadata"); - - assert_eq!( - parsed.permissions, - Some(SkillPermissionProfile { - network: None, - file_system: None, - macos: Some(MacOsSeatbeltProfileExtensions { - macos_preferences: MacOsPreferencesPermission::ReadOnly, - macos_automation: MacOsAutomationPermission::None, - macos_launch_services: false, - macos_accessibility: false, - macos_calendar: false, - macos_reminders: true, - macos_contacts: MacOsContactsPermission::None, - }), - }) - ); -} - -#[test] -fn skill_metadata_parses_network_domain_overrides_under_permissions() { - let parsed = serde_yaml::from_str::( - r#" -permissions: - network: - enabled: true - allowed_domains: - - "skill.example.com" - denied_domains: - - "blocked.skill.example.com" -"#, - ) - .expect("parse network skill metadata"); - - assert_eq!( - parsed.permissions, - Some(SkillPermissionProfile { - network: Some(SkillNetworkPermissions { - enabled: Some(true), - allowed_domains: Some(vec!["skill.example.com".to_string()]), - denied_domains: Some(vec!["blocked.skill.example.com".to_string()]), - }), - file_system: None, - macos: None, - }) - ); -} - -#[cfg(target_os = "macos")] -#[tokio::test] -async fn loads_skill_macos_permissions_from_yaml() { - let codex_home = tempfile::tempdir().expect("tempdir"); - let skill_path = write_skill(&codex_home, "demo", "permissions-macos", "from yaml"); - let skill_dir = skill_path.parent().expect("skill dir"); - - write_skill_metadata_at( - skill_dir, - r#" -permissions: - macos: - macos_preferences: "read_write" - macos_automation: - - "com.apple.Notes" - macos_launch_services: true - macos_accessibility: true - macos_calendar: true -"#, - ); - - let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); - - assert!( - outcome.errors.is_empty(), - "unexpected errors: {:?}", - outcome.errors - ); - assert_eq!(outcome.skills.len(), 1); - assert_eq!( - outcome.skills[0].permission_profile, - Some(PermissionProfile { - macos: Some(MacOsSeatbeltProfileExtensions { - macos_preferences: MacOsPreferencesPermission::ReadWrite, - macos_automation: MacOsAutomationPermission::BundleIds(vec![ - "com.apple.Notes".to_string() - ],), - macos_launch_services: true, - macos_accessibility: true, - macos_calendar: true, - macos_reminders: false, - macos_contacts: MacOsContactsPermission::None, - }), - ..Default::default() - }) - ); -} - -#[cfg(not(target_os = "macos"))] -#[tokio::test] -async fn loads_skill_macos_permissions_from_yaml_non_macos_does_not_create_profile() { - let codex_home = tempfile::tempdir().expect("tempdir"); - let skill_path = write_skill(&codex_home, "demo", "permissions-macos", "from yaml"); - let skill_dir = skill_path.parent().expect("skill dir"); - - write_skill_metadata_at( - skill_dir, - r#" -permissions: - macos: - macos_preferences: "read_write" - macos_automation: - - "com.apple.Notes" - macos_launch_services: true - macos_accessibility: true - macos_calendar: true -"#, - ); - - let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); - - assert!( - outcome.errors.is_empty(), - "unexpected errors: {:?}", - outcome.errors - ); - assert_eq!(outcome.skills.len(), 1); - assert_eq!( - outcome.skills[0].permission_profile, - Some(PermissionProfile { - macos: Some(MacOsSeatbeltProfileExtensions { - macos_preferences: MacOsPreferencesPermission::ReadWrite, - macos_automation: MacOsAutomationPermission::BundleIds(vec![ - "com.apple.Notes".to_string() - ],), - macos_launch_services: true, - macos_accessibility: true, - macos_calendar: true, - macos_reminders: false, - macos_contacts: MacOsContactsPermission::None, - }), - ..Default::default() - }) - ); -} - #[tokio::test] async fn accepts_icon_paths_under_assets_dir() { let codex_home = tempfile::tempdir().expect("tempdir"); @@ -988,8 +642,6 @@ async fn accepts_icon_paths_under_assets_dir() { }), dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1030,8 +682,6 @@ async fn ignores_invalid_brand_color() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1085,8 +735,6 @@ async fn ignores_default_prompt_over_max_length() { }), dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1128,8 +776,6 @@ async fn drops_interface_when_icons_are_invalid() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1174,8 +820,6 @@ async fn loads_skills_via_symlinked_subdir_for_user_scope() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&shared_skill_path), scope: SkillScope::User, }] @@ -1235,8 +879,6 @@ async fn does_not_loop_on_symlink_cycle_for_user_scope() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1273,8 +915,6 @@ fn loads_skills_via_symlinked_subdir_for_admin_scope() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&shared_skill_path), scope: SkillScope::Admin, }] @@ -1314,8 +954,6 @@ async fn loads_skills_via_symlinked_subdir_for_repo_scope() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&linked_skill_path), scope: SkillScope::Repo, }] @@ -1383,8 +1021,6 @@ async fn respects_max_scan_depth_for_user_scope() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&within_depth_path), scope: SkillScope::User, }] @@ -1412,8 +1048,6 @@ async fn loads_valid_skill() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1446,8 +1080,6 @@ async fn falls_back_to_directory_name_when_skill_name_is_missing() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1489,8 +1121,6 @@ async fn namespaces_plugin_skills_using_plugin_name() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1522,8 +1152,6 @@ async fn loads_short_description_from_metadata() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::User, }] @@ -1636,8 +1264,6 @@ async fn loads_skills_from_repo_root() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::Repo, }] @@ -1673,8 +1299,6 @@ async fn loads_skills_from_agents_dir_without_codex_dir() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::Repo, }] @@ -1728,8 +1352,6 @@ async fn loads_skills_from_all_codex_dirs_under_project_root() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&nested_skill_path), scope: SkillScope::Repo, }, @@ -1740,8 +1362,6 @@ async fn loads_skills_from_all_codex_dirs_under_project_root() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&root_skill_path), scope: SkillScope::Repo, }, @@ -1781,8 +1401,6 @@ async fn loads_skills_from_codex_dir_when_not_git_repo() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::Repo, }] @@ -1820,8 +1438,6 @@ async fn deduplicates_by_path_preferring_first_root() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::Repo, }] @@ -1863,8 +1479,6 @@ async fn keeps_duplicate_names_from_repo_and_user() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&repo_skill_path), scope: SkillScope::Repo, }, @@ -1875,8 +1489,6 @@ async fn keeps_duplicate_names_from_repo_and_user() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&user_skill_path), scope: SkillScope::User, }, @@ -1940,8 +1552,6 @@ async fn keeps_duplicate_names_from_nested_codex_dirs() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: first_path, scope: SkillScope::Repo, }, @@ -1952,8 +1562,6 @@ async fn keeps_duplicate_names_from_nested_codex_dirs() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: second_path, scope: SkillScope::Repo, }, @@ -2025,8 +1633,6 @@ async fn loads_skills_when_cwd_is_file_in_repo() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::Repo, }] @@ -2085,8 +1691,6 @@ async fn loads_skills_from_system_cache_when_present() { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: normalized(&skill_path), scope: SkillScope::System, }] diff --git a/codex-rs/core-skills/src/manager_tests.rs b/codex-rs/core-skills/src/manager_tests.rs index 2507fc9823..9e6c60c0e6 100644 --- a/codex-rs/core-skills/src/manager_tests.rs +++ b/codex-rs/core-skills/src/manager_tests.rs @@ -57,8 +57,6 @@ fn test_skill(name: &str, path: PathBuf) -> SkillMetadata { interface: None, dependencies: None, policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: path, scope: SkillScope::User, } diff --git a/codex-rs/core-skills/src/model.rs b/codex-rs/core-skills/src/model.rs index d47904b9c7..319ca4e64e 100644 --- a/codex-rs/core-skills/src/model.rs +++ b/codex-rs/core-skills/src/model.rs @@ -3,22 +3,8 @@ use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; -use codex_protocol::models::PermissionProfile; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; -use serde::Deserialize; - -#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)] -pub struct SkillManagedNetworkOverride { - pub allowed_domains: Option>, - pub denied_domains: Option>, -} - -impl SkillManagedNetworkOverride { - pub fn has_domain_overrides(&self) -> bool { - self.allowed_domains.is_some() || self.denied_domains.is_some() - } -} #[derive(Debug, Clone, PartialEq)] pub struct SkillMetadata { @@ -28,8 +14,6 @@ pub struct SkillMetadata { pub interface: Option, pub dependencies: Option, pub policy: Option, - pub permission_profile: Option, - pub managed_network_override: Option, /// Path to the SKILLS.md file that declares this skill. pub path_to_skills_md: PathBuf, pub scope: SkillScope, diff --git a/codex-rs/core/BUILD.bazel b/codex-rs/core/BUILD.bazel index e8773b42fd..591eb1f28a 100644 --- a/codex-rs/core/BUILD.bazel +++ b/codex-rs/core/BUILD.bazel @@ -32,9 +32,8 @@ codex_rust_crate( "//codex-rs:node-version.txt", ], rustc_env = { - # Askama resolves template paths relative to CARGO_MANIFEST_DIR. In - # Bazel, the Cargo-provided absolute source path points outside the - # sandbox, so keep the manifest root anchored inside the execroot. + # Keep manifest-root path lookups inside the Bazel execroot for code + # that relies on env!("CARGO_MANIFEST_DIR"). "CARGO_MANIFEST_DIR": "codex-rs/core", }, integration_compile_data_extra = [ diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 25e44d7631..e40066027d 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -18,10 +18,9 @@ workspace = true [dependencies] anyhow = { workspace = true } -arc-swap = "1.8.2" +arc-swap = { workspace = true } async-channel = { workspace = true } async-trait = { workspace = true } -askama = { workspace = true } base64 = { workspace = true } bm25 = { workspace = true } chardetng = { workspace = true } @@ -46,7 +45,6 @@ codex-hooks = { workspace = true } codex-instructions = { workspace = true } codex-network-proxy = { workspace = true } codex-otel = { workspace = true } -codex-artifacts = { workspace = true } codex-plugin = { workspace = true } codex-protocol = { workspace = true } codex-rollout = { workspace = true } @@ -54,6 +52,7 @@ codex-rmcp-client = { workspace = true } codex-sandboxing = { workspace = true } codex-state = { workspace = true } codex-terminal-detection = { workspace = true } +codex-tools = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-utils-cache = { workspace = true } codex-utils-image = { workspace = true } @@ -66,6 +65,7 @@ codex-utils-readiness = { workspace = true } codex-secrets = { workspace = true } codex-utils-string = { workspace = true } codex-utils-stream-parser = { workspace = true } +codex-utils-template = { workspace = true } codex-windows-sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" } csv = { workspace = true } dirs = { workspace = true } @@ -149,7 +149,6 @@ codex-arg0 = { workspace = true } codex-otel = { workspace = true, features = [ "disable-default-metrics-exporter", ] } -codex-test-macros = { workspace = true } codex-utils-cargo-bin = { workspace = true } core_test_support = { workspace = true } ctor = { workspace = true } diff --git a/codex-rs/core/README.md b/codex-rs/core/README.md index c154ef463d..63cd3b6f36 100644 --- a/codex-rs/core/README.md +++ b/codex-rs/core/README.md @@ -17,32 +17,8 @@ pointer file), the resolved `gitdir:` target, and `.codex` read-only. Network access and filesystem read/write roots are controlled by `SandboxPolicy`. Seatbelt consumes the resolved policy and enforces it. -Seatbelt also supports macOS permission-profile extensions layered on top of -`SandboxPolicy`: - -- no extension profile provided: - keeps legacy default preferences read access (`user-preference-read`). -- extension profile provided with no `macos_preferences` grant: - does not add preferences access clauses. -- `macos_preferences = "readonly"`: - enables cfprefs read clauses and `user-preference-read`. -- `macos_preferences = "readwrite"`: - includes readonly clauses plus `user-preference-write` and cfprefs shm write - clauses. -- `macos_automation = true`: - enables broad Apple Events send permissions. -- `macos_automation = ["com.apple.Notes", ...]`: - enables Apple Events send only to listed bundle IDs. -- `macos_launch_services = true`: - enables LaunchServices lookups and open/launch operations. -- `macos_accessibility = true`: - enables `com.apple.axserver` mach lookup. -- `macos_calendar = true`: - enables `com.apple.CalendarAgent` mach lookup. -- `macos_contacts = "read_only"`: - enables Address Book read access and Contacts read services. -- `macos_contacts = "read_write"`: - includes the readonly Contacts clauses plus Address Book writes and keychain/temp helpers required for writes. +Seatbelt also keeps the legacy default preferences read access +(`user-preference-read`) needed for cfprefs-backed macOS behavior. ### Linux @@ -59,12 +35,13 @@ only when the split filesystem policy round-trips through the legacy cases like `/repo = write`, `/repo/a = none`, `/repo/a/b = write`, where the more specific writable child must reopen under a denied parent. -The Linux sandbox helper prefers `/usr/bin/bwrap` whenever it is available and -supports the required argv-rewrite flags, and falls back to the vendored -bubblewrap path compiled into the binary otherwise. When `/usr/bin/bwrap` is -missing or too old to support the required flags, Codex also surfaces a startup -warning through its normal notification path instead of printing directly from -the sandbox helper. +The Linux sandbox helper prefers the first `bwrap` found on `PATH` outside the +current working directory whenever it is available. If `bwrap` is present but +too old to support `--argv0`, the helper keeps using system bubblewrap and +switches to a no-`--argv0` compatibility path for the inner re-exec. If +`bwrap` is missing, it falls back to the vendored bubblewrap path compiled into +the binary and Codex surfaces a startup warning through its normal notification +path instead of printing directly from the sandbox helper. ### Windows diff --git a/codex-rs/core/config.schema.json b/codex-rs/core/config.schema.json index 3e2d7ff689..3d091006c9 100644 --- a/codex-rs/core/config.schema.json +++ b/codex-rs/core/config.schema.json @@ -338,9 +338,6 @@ "apps": { "type": "boolean" }, - "artifact": { - "type": "boolean" - }, "child_agents_md": { "type": "boolean" }, @@ -745,6 +742,21 @@ } ] }, + "McpServerToolConfig": { + "additionalProperties": false, + "description": "Per-tool approval settings for a single MCP server tool.", + "properties": { + "approval_mode": { + "allOf": [ + { + "$ref": "#/definitions/AppToolApproval" + } + ], + "description": "Approval mode for this tool." + } + }, + "type": "object" + }, "MemoriesToml": { "additionalProperties": false, "description": "Memories settings loaded from config.toml.", @@ -1313,6 +1325,11 @@ }, "type": "object" }, + "name": { + "default": null, + "description": "Legacy display-name field accepted for backward compatibility.", + "type": "string" + }, "oauth_resource": { "default": null, "type": "string" @@ -1344,6 +1361,13 @@ "format": "double", "type": "number" }, + "tools": { + "additionalProperties": { + "$ref": "#/definitions/McpServerToolConfig" + }, + "default": null, + "type": "object" + }, "url": { "type": "string" } @@ -1914,10 +1938,6 @@ "experimental_compact_prompt_file": { "$ref": "#/definitions/AbsolutePathBuf" }, - "experimental_exec_server_url": { - "description": "Experimental / do not use. Overrides the URL used when connecting to a remote exec server.", - "type": "string" - }, "experimental_realtime_start_instructions": { "description": "Experimental / do not use. Replaces the built-in realtime start instructions inserted into developer messages when realtime becomes active.", "type": "string" @@ -1955,9 +1975,6 @@ "apps": { "type": "boolean" }, - "artifact": { - "type": "boolean" - }, "child_agents_md": { "type": "boolean" }, diff --git a/codex-rs/core/src/agent/control.rs b/codex-rs/core/src/agent/control.rs index a3b9897688..1efd514f62 100644 --- a/codex-rs/core/src/agent/control.rs +++ b/codex-rs/core/src/agent/control.rs @@ -44,6 +44,7 @@ use tracing::warn; const AGENT_NAMES: &str = include_str!("agent_names.txt"); const FORKED_SPAWN_AGENT_OUTPUT_MESSAGE: &str = "You are the newly spawned agent. The prior conversation history was forked from your parent agent. Treat the next user message as your new task, and use the forked history only as background context."; +const ROOT_LAST_TASK_MESSAGE: &str = "Main thread"; #[derive(Clone, Debug, Default)] pub(crate) struct SpawnAgentOptions { @@ -650,12 +651,6 @@ impl AgentControl { let agent_path = current_agent_path .resolve(agent_reference) .map_err(CodexErr::UnsupportedOperation)?; - if agent_path.is_root() { - return Err(CodexErr::UnsupportedOperation( - "root is not a spawned agent".to_string(), - )); - } - if let Some(thread_id) = self.state.agent_id_for_path(&agent_path) { return Ok(thread_id); } @@ -737,7 +732,21 @@ impl AgentControl { }) }); - let mut agents = Vec::with_capacity(live_agents.len()); + let root_path = AgentPath::root(); + let mut agents = Vec::with_capacity(live_agents.len().saturating_add(1)); + if resolved_prefix + .as_ref() + .is_none_or(|prefix| agent_matches_prefix(Some(&root_path), prefix)) + && let Some(root_thread_id) = self.state.agent_id_for_path(&root_path) + && let Ok(root_thread) = state.get_thread(root_thread_id).await + { + agents.push(ListedAgent { + agent_name: root_path.to_string(), + agent_status: root_thread.agent_status().await, + last_task_message: Some(ROOT_LAST_TASK_MESSAGE.to_string()), + }); + } + for metadata in live_agents { let Some(thread_id) = metadata.agent_id else { continue; diff --git a/codex-rs/core/src/agent/control_tests.rs b/codex-rs/core/src/agent/control_tests.rs index 10e9821aea..878d7d0991 100644 --- a/codex-rs/core/src/agent/control_tests.rs +++ b/codex-rs/core/src/agent/control_tests.rs @@ -75,6 +75,9 @@ impl AgentControlHarness { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); Self { @@ -811,6 +814,9 @@ async fn spawn_agent_respects_max_threads_limit() { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); @@ -854,6 +860,9 @@ async fn spawn_agent_releases_slot_after_shutdown() { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); @@ -888,6 +897,9 @@ async fn spawn_agent_limit_shared_across_clones() { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); let cloned = control.clone(); @@ -924,6 +936,9 @@ async fn resume_agent_respects_max_threads_limit() { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); @@ -971,6 +986,9 @@ async fn resume_agent_releases_slot_after_resume_failure() { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); @@ -1361,6 +1379,9 @@ async fn resume_thread_subagent_restores_stored_nickname_and_role() { CodexAuth::from_api_key("dummy"), config.model_provider.clone(), config.codex_home.clone(), + std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), ); let control = manager.agent_control(); let harness = AgentControlHarness { diff --git a/codex-rs/core/src/api_bridge.rs b/codex-rs/core/src/api_bridge.rs index 2060b78cf7..e7826f9ac6 100644 --- a/codex-rs/core/src/api_bridge.rs +++ b/codex-rs/core/src/api_bridge.rs @@ -6,6 +6,7 @@ use codex_api::TransportError; use codex_api::error::ApiError; use codex_api::rate_limits::parse_promo_message; use codex_api::rate_limits::parse_rate_limit_for_limit; +use codex_login::token_data::PlanType; use http::HeaderMap; use serde::Deserialize; use serde_json::Value; @@ -16,7 +17,6 @@ use crate::error::RetryLimitReachedError; use crate::error::UnexpectedResponseError; use crate::error::UsageLimitReachedError; use crate::model_provider_info::ModelProviderInfo; -use crate::token_data::PlanType; pub(crate) fn map_api_error(err: ApiError) -> CodexErr { match err { diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 0c84434590..cd64f65f72 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -53,6 +53,7 @@ use chrono::Utc; use codex_app_server_protocol::McpServerElicitationRequest; use codex_app_server_protocol::McpServerElicitationRequestParams; use codex_exec_server::Environment; +use codex_exec_server::EnvironmentManager; use codex_features::FEATURES; use codex_features::Feature; use codex_features::unstable_features_warning_event; @@ -70,7 +71,6 @@ use codex_otel::current_span_w3c_trace_context; use codex_otel::set_parent_from_w3c_trace_context; use codex_protocol::ThreadId; use codex_protocol::approvals::ElicitationRequestEvent; -use codex_protocol::approvals::ExecApprovalRequestSkillMetadata; use codex_protocol::approvals::ExecPolicyAmendment; use codex_protocol::approvals::NetworkPolicyAmendment; use codex_protocol::approvals::NetworkPolicyRuleAction; @@ -406,6 +406,7 @@ pub(crate) struct CodexSpawnArgs { pub(crate) config: Config, pub(crate) auth_manager: Arc, pub(crate) models_manager: Arc, + pub(crate) environment_manager: Arc, pub(crate) skills_manager: Arc, pub(crate) plugins_manager: Arc, pub(crate) mcp_manager: Arc, @@ -459,6 +460,7 @@ impl Codex { mut config, auth_manager, models_manager, + environment_manager, skills_manager, plugins_manager, mcp_manager, @@ -650,6 +652,7 @@ impl Codex { agent_status_tx.clone(), conversation_history, session_source_clone, + environment_manager, skills_manager, plugins_manager, mcp_manager.clone(), @@ -885,6 +888,7 @@ pub(crate) struct TurnContext { pub(crate) features: ManagedFeatures, pub(crate) ghost_snapshot: GhostSnapshotConfig, pub(crate) final_output_json_schema: Option, + pub(crate) codex_self_exe: Option, pub(crate) codex_linux_sandbox_exe: Option, pub(crate) tool_call_gate: Arc, pub(crate) truncation_policy: TruncationPolicy, @@ -993,6 +997,7 @@ impl TurnContext { features, ghost_snapshot: self.ghost_snapshot.clone(), final_output_json_schema: self.final_output_json_schema.clone(), + codex_self_exe: self.codex_self_exe.clone(), codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(), tool_call_gate: Arc::new(ReadinessFlag::new()), truncation_policy, @@ -1452,6 +1457,7 @@ impl Session { features: per_turn_config.features.clone(), ghost_snapshot: per_turn_config.ghost_snapshot.clone(), final_output_json_schema: None, + codex_self_exe: per_turn_config.codex_self_exe.clone(), codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(), tool_call_gate: Arc::new(ReadinessFlag::new()), truncation_policy: model_info.truncation_policy.into(), @@ -1475,6 +1481,7 @@ impl Session { agent_status: watch::Sender, initial_history: InitialHistory, session_source: SessionSource, + environment_manager: Arc, skills_manager: Arc, plugins_manager: Arc, mcp_manager: Arc, @@ -1887,7 +1894,6 @@ impl Session { session_telemetry, models_manager: Arc::clone(&models_manager), tool_approvals: Mutex::new(ApprovalStore::default()), - execve_session_approvals: RwLock::new(HashMap::new()), skills_manager, plugins_manager: Arc::clone(&plugins_manager), mcp_manager: Arc::clone(&mcp_manager), @@ -1909,9 +1915,7 @@ impl Session { code_mode_service: crate::tools::code_mode::CodeModeService::new( config.js_repl_node_path.clone(), ), - environment: Arc::new( - Environment::create(config.experimental_exec_server_url.clone()).await?, - ), + environment: environment_manager.current().await?, }; let js_repl = Arc::new(JsReplHandle::with_node_path( config.js_repl_node_path.clone(), @@ -2926,7 +2930,6 @@ impl Session { network_approval_context: Option, proposed_execpolicy_amendment: Option, additional_permissions: Option, - skill_metadata: Option, available_decisions: Option>, ) -> ReviewDecision { // command-level approvals use `call_id`. @@ -2980,7 +2983,6 @@ impl Session { proposed_execpolicy_amendment, proposed_network_policy_amendments, additional_permissions, - skill_metadata, available_decisions: Some(available_decisions), parsed_cmd, }); @@ -5524,6 +5526,7 @@ async fn spawn_review_thread( shell_environment_policy: parent_turn_context.shell_environment_policy.clone(), cwd: parent_turn_context.cwd.clone(), final_output_json_schema: None, + codex_self_exe: parent_turn_context.codex_self_exe.clone(), codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(), tool_call_gate: Arc::new(ReadinessFlag::new()), js_repl: Arc::clone(&sess.js_repl), diff --git a/codex-rs/core/src/codex_delegate.rs b/codex-rs/core/src/codex_delegate.rs index d87dd070d5..3f1508cca1 100644 --- a/codex-rs/core/src/codex_delegate.rs +++ b/codex-rs/core/src/codex_delegate.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use async_channel::Receiver; use async_channel::Sender; use codex_async_utils::OrCancelExt; +use codex_exec_server::EnvironmentManager; use codex_protocol::protocol::ApplyPatchApprovalRequestEvent; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; @@ -76,6 +77,9 @@ pub(crate) async fn run_codex_thread_interactive( config, auth_manager, models_manager, + environment_manager: Arc::new(EnvironmentManager::new( + parent_ctx.environment.exec_server_url().map(str::to_owned), + )), skills_manager: Arc::clone(&parent_session.services.skills_manager), plugins_manager: Arc::clone(&parent_session.services.plugins_manager), mcp_manager: Arc::clone(&parent_session.services.mcp_manager), @@ -432,7 +436,6 @@ async fn handle_exec_approval( network_approval_context, proposed_execpolicy_amendment, additional_permissions, - skill_metadata, available_decisions, .. } = event; @@ -476,7 +479,6 @@ async fn handle_exec_approval( network_approval_context, proposed_execpolicy_amendment, additional_permissions, - skill_metadata, available_decisions, ), parent_session, diff --git a/codex-rs/core/src/codex_delegate_tests.rs b/codex-rs/core/src/codex_delegate_tests.rs index 8201424d8e..29efd92d71 100644 --- a/codex-rs/core/src/codex_delegate_tests.rs +++ b/codex-rs/core/src/codex_delegate_tests.rs @@ -286,7 +286,6 @@ async fn handle_exec_approval_uses_call_id_for_guardian_review_and_approval_id_f proposed_execpolicy_amendment: None, proposed_network_policy_amendments: None, additional_permissions: None, - skill_metadata: None, available_decisions: Some(vec![ ReviewDecision::Approved, ReviewDecision::Abort, diff --git a/codex-rs/core/src/codex_tests.rs b/codex-rs/core/src/codex_tests.rs index ad0fcda62e..b702b05fcd 100644 --- a/codex-rs/core/src/codex_tests.rs +++ b/codex-rs/core/src/codex_tests.rs @@ -1162,35 +1162,10 @@ async fn fork_startup_context_then_first_turn_diff_snapshot() -> anyhow::Result< }) .await?; wait_for_event(&initial.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await; - // The parent rollout writer drains asynchronously after turn completion. - // Wait until the persisted JSONL includes the source user turn before forking from it. - let mut source_history_persisted = false; - for _ in 0..100 { - let history = RolloutRecorder::get_rollout_history(&rollout_path).await; - source_history_persisted = history.ok().is_some_and(|history| { - history.get_rollout_items().into_iter().any(|item| { - matches!( - item, - RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) - if role == "user" - && content.iter().any(|content_item| { - matches!( - content_item, - ContentItem::InputText { text } if text == "fork seed" - ) - }) - ) - }) - }); - if source_history_persisted { - break; - } - sleep(StdDuration::from_millis(10)).await; - } - assert!( - source_history_persisted, - "source rollout should contain the completed pre-fork user turn before forking" - ); + // Forking reads the persisted rollout JSONL, so force the completed source turn to disk + // before snapshotting from it. + initial.codex.ensure_rollout_materialized().await; + initial.codex.flush_rollout().await; let mut fork_config = initial.config.clone(); fork_config.permissions.approval_policy = @@ -2551,6 +2526,9 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() { agent_status_tx, InitialHistory::New, SessionSource::Exec, + Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), skills_manager, plugins_manager, mcp_manager, @@ -2644,7 +2622,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone(), true)); let network_approval = Arc::new(NetworkApprovalService::default()); let environment = Arc::new( - codex_exec_server::Environment::create(None) + codex_exec_server::Environment::create(/*exec_server_url*/ None) .await .expect("create environment"), ); @@ -2680,7 +2658,6 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { session_telemetry: session_telemetry.clone(), models_manager: Arc::clone(&models_manager), tool_approvals: Mutex::new(ApprovalStore::default()), - execve_session_approvals: RwLock::new(HashMap::new()), skills_manager, plugins_manager, mcp_manager, @@ -3483,7 +3460,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone(), true)); let network_approval = Arc::new(NetworkApprovalService::default()); let environment = Arc::new( - codex_exec_server::Environment::create(None) + codex_exec_server::Environment::create(/*exec_server_url*/ None) .await .expect("create environment"), ); @@ -3519,7 +3496,6 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( session_telemetry: session_telemetry.clone(), models_manager: Arc::clone(&models_manager), tool_approvals: Mutex::new(ApprovalStore::default()), - execve_session_approvals: RwLock::new(HashMap::new()), skills_manager, plugins_manager, mcp_manager, diff --git a/codex-rs/core/src/codex_tests_guardian.rs b/codex-rs/core/src/codex_tests_guardian.rs index 54f7405be2..18eb3d177b 100644 --- a/codex-rs/core/src/codex_tests_guardian.rs +++ b/codex-rs/core/src/codex_tests_guardian.rs @@ -12,6 +12,7 @@ use crate::sandboxing::SandboxPermissions; use crate::tools::context::FunctionToolOutput; use crate::turn_diff_tracker::TurnDiffTracker; use codex_app_server_protocol::ConfigLayerSource; +use codex_exec_server::EnvironmentManager; use codex_execpolicy::Decision; use codex_execpolicy::Evaluation; use codex_execpolicy::RuleMatch; @@ -159,7 +160,6 @@ async fn guardian_allows_shell_additional_permissions_requests_past_policy_valid enabled: Some(true), }), file_system: None, - macos: None, }, "justification": params.justification.clone(), }) @@ -233,7 +233,7 @@ async fn guardian_allows_unified_exec_additional_permissions_requests_past_polic assert_eq!( output, - "missing `additional_permissions`; provide at least one of `network`, `file_system`, or `macos` when using `with_additional_permissions`" + "missing `additional_permissions`; provide at least one of `network` or `file_system` when using `with_additional_permissions`" ); } @@ -437,6 +437,7 @@ async fn guardian_subagent_does_not_inherit_parent_exec_policy_rules() { config, auth_manager, models_manager, + environment_manager: Arc::new(EnvironmentManager::new(/*exec_server_url*/ None)), skills_manager, plugins_manager, mcp_manager, diff --git a/codex-rs/core/src/codex_thread.rs b/codex-rs/core/src/codex_thread.rs index 9fa7069118..0635718c9c 100644 --- a/codex-rs/core/src/codex_thread.rs +++ b/codex-rs/core/src/codex_thread.rs @@ -74,6 +74,16 @@ impl CodexThread { self.codex.shutdown_and_wait().await } + #[doc(hidden)] + pub async fn ensure_rollout_materialized(&self) { + self.codex.session.ensure_rollout_materialized().await; + } + + #[doc(hidden)] + pub async fn flush_rollout(&self) { + self.codex.session.flush_rollout().await; + } + pub async fn submit_with_trace( &self, op: Op, diff --git a/codex-rs/core/src/config/config_tests.rs b/codex-rs/core/src/config/config_tests.rs index f67c532341..4d6527ab80 100644 --- a/codex-rs/core/src/config/config_tests.rs +++ b/codex-rs/core/src/config/config_tests.rs @@ -1,10 +1,12 @@ use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; use crate::config::edit::apply_blocking; +use crate::config::types::AppToolApproval; use crate::config::types::ApprovalsReviewer; use crate::config::types::BundledSkillsConfig; use crate::config::types::FeedbackConfigToml; use crate::config::types::HistoryPersistence; +use crate::config::types::McpServerToolConfig; use crate::config::types::McpServerTransportConfig; use crate::config::types::MemoriesConfig; use crate::config::types::MemoriesToml; @@ -57,6 +59,7 @@ fn stdio_mcp(command: &str) -> McpServerConfig { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), } } @@ -77,6 +80,7 @@ fn http_mcp(url: &str) -> McpServerConfig { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), } } @@ -1853,6 +1857,7 @@ async fn replace_mcp_servers_round_trips_entries() -> anyhow::Result<()> { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); @@ -1958,6 +1963,76 @@ startup_timeout_ms = 2500 Ok(()) } +#[test] +fn mcp_servers_toml_parses_per_tool_approval_overrides() { + let config = toml::from_str::( + r#" +[mcp_servers.docs] +command = "docs-server" +name = "Docs" + +[mcp_servers.docs.tools.search] +approval_mode = "approve" +"#, + ) + .expect("TOML deserialization should succeed"); + let tool = config + .mcp_servers + .get("docs") + .and_then(|server| server.tools.get("search")) + .expect("docs/search tool config exists"); + + assert_eq!( + tool, + &McpServerToolConfig { + approval_mode: Some(AppToolApproval::Approve), + } + ); +} + +#[test] +fn mcp_servers_toml_ignores_unknown_server_fields() { + let config = toml::from_str::( + r#" +[mcp_servers.docs] +command = "docs-server" +trust_level = "trusted" +"#, + ) + .expect("unknown MCP server fields should be ignored"); + + assert_eq!( + config.mcp_servers.get("docs"), + Some(&stdio_mcp("docs-server")) + ); +} + +#[test] +fn mcp_servers_toml_parses_tool_approval_override_for_reserved_name() { + let config = toml::from_str::( + r#" +[mcp_servers.docs] +command = "docs-server" + +[mcp_servers.docs.tools.command] +approval_mode = "approve" +"#, + ) + .expect("TOML deserialization should succeed"); + let tool = config + .mcp_servers + .get("docs") + .and_then(|server| server.tools.get("command")) + .expect("docs/command tool config exists"); + + assert_eq!( + tool, + &McpServerToolConfig { + approval_mode: Some(AppToolApproval::Approve), + } + ); +} + #[tokio::test] async fn load_global_mcp_servers_rejects_inline_bearer_token() -> anyhow::Result<()> { let codex_home = TempDir::new()?; @@ -2009,6 +2084,7 @@ async fn replace_mcp_servers_serializes_env_sorted() -> anyhow::Result<()> { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2081,6 +2157,7 @@ async fn replace_mcp_servers_serializes_env_vars() -> anyhow::Result<()> { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2133,6 +2210,7 @@ async fn replace_mcp_servers_serializes_cwd() -> anyhow::Result<()> { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2183,6 +2261,7 @@ async fn replace_mcp_servers_streamable_http_serializes_bearer_token() -> anyhow disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2249,6 +2328,7 @@ async fn replace_mcp_servers_streamable_http_serializes_custom_headers() -> anyh disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); apply_blocking( @@ -2327,6 +2407,7 @@ async fn replace_mcp_servers_streamable_http_removes_optional_sections() -> anyh disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2358,6 +2439,7 @@ async fn replace_mcp_servers_streamable_http_removes_optional_sections() -> anyh disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); apply_blocking( @@ -2424,6 +2506,7 @@ async fn replace_mcp_servers_streamable_http_isolates_headers_between_servers() disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ), ( @@ -2445,6 +2528,7 @@ async fn replace_mcp_servers_streamable_http_isolates_headers_between_servers() disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ), ]); @@ -2529,6 +2613,7 @@ async fn replace_mcp_servers_serializes_disabled_flag() -> anyhow::Result<()> { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2575,6 +2660,7 @@ async fn replace_mcp_servers_serializes_required_flag() -> anyhow::Result<()> { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2621,6 +2707,7 @@ async fn replace_mcp_servers_serializes_tool_filters() -> anyhow::Result<()> { disabled_tools: Some(vec!["blocked".to_string()]), scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -2671,6 +2758,7 @@ async fn replace_mcp_servers_streamable_http_serializes_oauth_resource() -> anyh disabled_tools: None, scopes: None, oauth_resource: Some("https://resource.example.com".to_string()), + tools: HashMap::new(), }, )]); @@ -4291,7 +4379,6 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { shell_environment_policy: ShellEnvironmentPolicy::default(), windows_sandbox_mode: None, windows_sandbox_private_desktop: true, - macos_seatbelt_profile_extensions: None, }, approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(None), @@ -4320,6 +4407,7 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { history: History::default(), ephemeral: false, file_opener: UriBasedFileOpener::VsCode, + codex_self_exe: None, codex_linux_sandbox_exe: None, main_execve_wrapper_exe: None, js_repl_node_path: None, @@ -4335,7 +4423,6 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { model_verbosity: None, personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), - experimental_exec_server_url: None, realtime_audio: RealtimeAudioConfig::default(), experimental_realtime_start_instructions: None, experimental_realtime_ws_base_url: None, @@ -4434,7 +4521,6 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { shell_environment_policy: ShellEnvironmentPolicy::default(), windows_sandbox_mode: None, windows_sandbox_private_desktop: true, - macos_seatbelt_profile_extensions: None, }, approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(None), @@ -4463,6 +4549,7 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { history: History::default(), ephemeral: false, file_opener: UriBasedFileOpener::VsCode, + codex_self_exe: None, codex_linux_sandbox_exe: None, main_execve_wrapper_exe: None, js_repl_node_path: None, @@ -4478,7 +4565,6 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { model_verbosity: None, personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), - experimental_exec_server_url: None, realtime_audio: RealtimeAudioConfig::default(), experimental_realtime_start_instructions: None, experimental_realtime_ws_base_url: None, @@ -4575,7 +4661,6 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { shell_environment_policy: ShellEnvironmentPolicy::default(), windows_sandbox_mode: None, windows_sandbox_private_desktop: true, - macos_seatbelt_profile_extensions: None, }, approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(None), @@ -4604,6 +4689,7 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { history: History::default(), ephemeral: false, file_opener: UriBasedFileOpener::VsCode, + codex_self_exe: None, codex_linux_sandbox_exe: None, main_execve_wrapper_exe: None, js_repl_node_path: None, @@ -4619,7 +4705,6 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { model_verbosity: None, personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), - experimental_exec_server_url: None, realtime_audio: RealtimeAudioConfig::default(), experimental_realtime_start_instructions: None, experimental_realtime_ws_base_url: None, @@ -4702,7 +4787,6 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { shell_environment_policy: ShellEnvironmentPolicy::default(), windows_sandbox_mode: None, windows_sandbox_private_desktop: true, - macos_seatbelt_profile_extensions: None, }, approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(None), @@ -4731,6 +4815,7 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { history: History::default(), ephemeral: false, file_opener: UriBasedFileOpener::VsCode, + codex_self_exe: None, codex_linux_sandbox_exe: None, main_execve_wrapper_exe: None, js_repl_node_path: None, @@ -4746,7 +4831,6 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { model_verbosity: Some(Verbosity::High), personality: Some(Personality::Pragmatic), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), - experimental_exec_server_url: None, realtime_audio: RealtimeAudioConfig::default(), experimental_realtime_start_instructions: None, experimental_realtime_ws_base_url: None, @@ -5604,78 +5688,6 @@ shell_tool = true Ok(()) } -#[cfg(target_os = "linux")] -#[test] -fn system_bwrap_warning_reports_missing_system_bwrap() { - let warning = system_bwrap_warning_for_path(Path::new("/definitely/not/a/bwrap")) - .expect("missing system bwrap should emit a warning"); - - assert!(warning.contains("could not find system bubblewrap")); -} - -#[cfg(target_os = "linux")] -#[test] -fn system_bwrap_warning_reports_too_old_system_bwrap() { - let fake_bwrap = write_fake_bwrap( - r#"#!/bin/sh -if [ "$1" = "--help" ]; then - echo 'usage: bwrap [OPTION...] COMMAND' - exit 0 -fi -exit 1 -"#, - ); - let fake_bwrap_path: &Path = fake_bwrap.as_ref(); - let warning = system_bwrap_warning_for_path(fake_bwrap_path) - .expect("old system bwrap should emit a warning"); - - assert!(warning.contains("too old to support `--argv0`")); -} - -#[cfg(target_os = "linux")] -#[test] -fn system_bwrap_warning_skips_supported_system_bwrap() { - let fake_bwrap = write_fake_bwrap( - r#"#!/bin/sh -if [ "$1" = "--help" ]; then - echo ' --argv0 PROGRAM' - exit 0 -fi -exit 1 -"#, - ); - let fake_bwrap_path: &Path = fake_bwrap.as_ref(); - - assert_eq!(system_bwrap_warning_for_path(fake_bwrap_path), None); -} - -#[cfg(not(target_os = "linux"))] -#[test] -fn system_bwrap_warning_is_disabled_off_linux() { - assert!(system_bwrap_warning().is_none()); -} - -#[cfg(target_os = "linux")] -fn write_fake_bwrap(contents: &str) -> tempfile::TempPath { - use std::fs; - use std::os::unix::fs::PermissionsExt; - use tempfile::NamedTempFile; - - // Bazel can mount the OS temp directory `noexec`, so prefer the current - // working directory for fake executables and fall back to the default temp - // dir outside that environment. - let temp_file = std::env::current_dir() - .ok() - .and_then(|dir| NamedTempFile::new_in(dir).ok()) - .unwrap_or_else(|| NamedTempFile::new().expect("temp file")); - // Linux rejects exec-ing a file that is still open for writing. - let path = temp_file.into_temp_path(); - fs::write(&path, contents).expect("write fake bwrap"); - let permissions = fs::Permissions::from_mode(0o755); - fs::set_permissions(&path, permissions).expect("chmod fake bwrap"); - path -} - #[tokio::test] async fn approvals_reviewer_defaults_to_manual_only_without_guardian_feature() -> std::io::Result<()> { @@ -5964,34 +5976,6 @@ experimental_realtime_start_instructions = "start instructions from config" Ok(()) } -#[test] -fn experimental_exec_server_url_loads_from_config_toml() -> std::io::Result<()> { - let cfg: ConfigToml = toml::from_str( - r#" -experimental_exec_server_url = "http://127.0.0.1:8080" -"#, - ) - .expect("TOML deserialization should succeed"); - - assert_eq!( - cfg.experimental_exec_server_url.as_deref(), - Some("http://127.0.0.1:8080") - ); - - let codex_home = TempDir::new()?; - let config = Config::load_from_base_config_with_overrides( - cfg, - ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; - - assert_eq!( - config.experimental_exec_server_url.as_deref(), - Some("http://127.0.0.1:8080") - ); - Ok(()) -} - #[test] fn experimental_realtime_ws_base_url_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index 3f7e3b1170..370c46ce4c 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -125,7 +125,9 @@ pub fn model_availability_nux_count_edits(shown_count: &HashMap) -> // TODO(jif) move to a dedicated file mod document_helpers { + use crate::config::types::AppToolApproval; use crate::config::types::McpServerConfig; + use crate::config::types::McpServerToolConfig; use crate::config::types::McpServerTransportConfig; use toml_edit::Array as TomlArray; use toml_edit::InlineTable; @@ -248,10 +250,32 @@ mod document_helpers { { entry["oauth_resource"] = value(resource.clone()); } + if !config.tools.is_empty() { + let mut tools = new_implicit_table(); + let mut tool_entries: Vec<_> = config.tools.iter().collect(); + tool_entries.sort_by(|(left, _), (right, _)| left.cmp(right)); + for (name, tool_config) in tool_entries { + tools.insert(name, serialize_mcp_server_tool(tool_config)); + } + entry.insert("tools", TomlItem::Table(tools)); + } entry } + fn serialize_mcp_server_tool(config: &McpServerToolConfig) -> TomlItem { + let mut entry = TomlTable::new(); + entry.set_implicit(false); + if let Some(approval_mode) = config.approval_mode { + entry["approval_mode"] = value(match approval_mode { + AppToolApproval::Auto => "auto", + AppToolApproval::Prompt => "prompt", + AppToolApproval::Approve => "approve", + }); + } + TomlItem::Table(entry) + } + pub(super) fn serialize_mcp_server(config: &McpServerConfig) -> TomlItem { TomlItem::Table(serialize_mcp_server_table(config)) } diff --git a/codex-rs/core/src/config/edit_tests.rs b/codex-rs/core/src/config/edit_tests.rs index 632716f00d..d7386c546c 100644 --- a/codex-rs/core/src/config/edit_tests.rs +++ b/codex-rs/core/src/config/edit_tests.rs @@ -1,4 +1,6 @@ use super::*; +use crate::config::types::AppToolApproval; +use crate::config::types::McpServerToolConfig; use crate::config::types::McpServerTransportConfig; use codex_protocol::openai_models::ReasoningEffort; use pretty_assertions::assert_eq; @@ -582,6 +584,7 @@ fn blocking_replace_mcp_servers_round_trips() { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); @@ -607,6 +610,7 @@ fn blocking_replace_mcp_servers_round_trips() { disabled_tools: Some(vec!["forbidden".to_string()]), scopes: None, oauth_resource: Some("https://resource.example.com".to_string()), + tools: HashMap::new(), }, ); @@ -643,6 +647,53 @@ B = \"2\" assert_eq!(raw, expected); } +#[test] +fn blocking_replace_mcp_servers_serializes_tool_approval_overrides() { + let tmp = tempdir().expect("tmpdir"); + let codex_home = tmp.path(); + + let mut servers = BTreeMap::new(); + servers.insert( + "docs".to_string(), + McpServerConfig { + transport: McpServerTransportConfig::Stdio { + command: "docs-server".to_string(), + args: Vec::new(), + env: None, + env_vars: Vec::new(), + cwd: None, + }, + enabled: true, + required: false, + disabled_reason: None, + startup_timeout_sec: None, + tool_timeout_sec: None, + enabled_tools: None, + disabled_tools: None, + scopes: None, + oauth_resource: None, + tools: HashMap::from([( + "search".to_string(), + McpServerToolConfig { + approval_mode: Some(AppToolApproval::Approve), + }, + )]), + }, + ); + + apply_blocking(codex_home, None, &[ConfigEdit::ReplaceMcpServers(servers)]).expect("persist"); + + let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); + let expected = "\ +[mcp_servers.docs] +command = \"docs-server\" + +[mcp_servers.docs.tools.search] +approval_mode = \"approve\" +"; + assert_eq!(raw, expected); +} + #[test] fn blocking_replace_mcp_servers_preserves_inline_comments() { let tmp = tempdir().expect("tmpdir"); @@ -676,6 +727,7 @@ foo = { command = "cmd" } disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); @@ -721,6 +773,7 @@ foo = { command = "cmd" } # keep me disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); @@ -765,6 +818,7 @@ foo = { command = "cmd", args = ["--flag"] } # keep me disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); @@ -810,6 +864,7 @@ foo = { command = "cmd" } disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index e799f1b9ba..fa21cbad84 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -78,7 +78,6 @@ use codex_protocol::config_types::WebSearchConfig; use codex_protocol::config_types::WebSearchMode; use codex_protocol::config_types::WebSearchToolConfig; use codex_protocol::config_types::WindowsSandboxLevel; -use codex_protocol::models::MacOsSeatbeltProfileExtensions; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::permissions::FileSystemSandboxPolicy; @@ -96,8 +95,6 @@ use std::collections::HashMap; use std::io::ErrorKind; use std::path::Path; use std::path::PathBuf; -#[cfg(target_os = "linux")] -use std::process::Command; use crate::config::permissions::compile_permission_profile; use crate::config::permissions::get_readable_roots_required_for_codex_runtime; @@ -120,7 +117,7 @@ pub use codex_config::Constrained; pub use codex_config::ConstraintError; pub use codex_config::ConstraintResult; pub use codex_network_proxy::NetworkProxyAuditMetadata; - +pub use codex_sandboxing::system_bwrap_warning; pub use managed_features::ManagedFeatures; pub use network_proxy_spec::NetworkProxySpec; pub use network_proxy_spec::StartedNetworkProxy; @@ -146,55 +143,12 @@ pub(crate) const DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS: Option = None; pub const CONFIG_TOML_FILE: &str = "config.toml"; const OPENAI_BASE_URL_ENV_VAR: &str = "OPENAI_BASE_URL"; -#[cfg(target_os = "linux")] -const SYSTEM_BWRAP_PATH: &str = "/usr/bin/bwrap"; const RESERVED_MODEL_PROVIDER_IDS: [&str; 3] = [ OPENAI_PROVIDER_ID, OLLAMA_OSS_PROVIDER_ID, LMSTUDIO_OSS_PROVIDER_ID, ]; -#[cfg(target_os = "linux")] -pub fn system_bwrap_warning() -> Option { - system_bwrap_warning_for_path(Path::new(SYSTEM_BWRAP_PATH)) -} - -#[cfg(not(target_os = "linux"))] -pub fn system_bwrap_warning() -> Option { - None -} - -#[cfg(target_os = "linux")] -fn system_bwrap_warning_for_path(system_bwrap_path: &Path) -> Option { - if !system_bwrap_path.is_file() { - return Some(format!( - "Codex could not find system bubblewrap at {}. Please install bubblewrap with your package manager. Codex will use the vendored bubblewrap in the meantime.", - system_bwrap_path.display() - )); - } - if system_bwrap_supports_argv0(system_bwrap_path) { - return None; - } - - Some(format!( - "Codex found system bubblewrap at {}, but it is too old to support `--argv0`. Please upgrade bubblewrap with your package manager. Codex will use the vendored bubblewrap in the meantime.", - system_bwrap_path.display() - )) -} - -#[cfg(target_os = "linux")] -fn system_bwrap_supports_argv0(system_bwrap_path: &Path) -> bool { - // bubblewrap added `--argv0` in v0.9.0: - // https://github.com/containers/bubblewrap/releases/tag/v0.9.0 - let output = match Command::new(system_bwrap_path).arg("--help").output() { - Ok(output) => output, - Err(_) => return false, - }; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - stdout.contains("--argv0") || stderr.contains("--argv0") -} - fn resolve_sqlite_home_env(resolved_cwd: &Path) -> Option { let raw = std::env::var(codex_state::SQLITE_HOME_ENV).ok()?; let trimmed = raw.trim(); @@ -208,6 +162,7 @@ fn resolve_sqlite_home_env(resolved_cwd: &Path) -> Option { Some(resolved_cwd.join(path)) } } + #[cfg(test)] pub(crate) fn test_config() -> Config { let codex_home = tempfile::tempdir().expect("create temp dir"); @@ -250,9 +205,6 @@ pub struct Permissions { pub windows_sandbox_mode: Option, /// Whether the final Windows sandboxed child should run on a private desktop. pub windows_sandbox_private_desktop: bool, - /// Optional macOS seatbelt extension profile used to extend default - /// seatbelt permissions when running under seatbelt. - pub macos_seatbelt_profile_extensions: Option, } /// Application configuration loaded from disk and merged with overrides. @@ -476,6 +428,10 @@ pub struct Config { /// output will be hyperlinked using the specified URI scheme. pub file_opener: UriBasedFileOpener, + /// Path to the current Codex executable. This cannot be set in the config + /// file: it must be set in code via [`ConfigOverrides`]. + pub codex_self_exe: Option, + /// Path to the `codex-linux-sandbox` executable. This must be set if /// [`codex_sandboxing::SandboxType::LinuxSeccomp`] is used. Note that this /// cannot be set in the config file: it must be set in code via @@ -526,10 +482,6 @@ pub struct Config { /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). pub chatgpt_base_url: String, - /// Experimental / do not use. Overrides the URL used when connecting to - /// a remote exec server. - pub experimental_exec_server_url: Option, - /// Machine-local realtime audio device preferences used by realtime voice. pub realtime_audio: RealtimeAudioConfig, @@ -764,7 +716,7 @@ impl Config { /// designed to use [AskForApproval::Never] exclusively. /// /// Further, [ConfigOverrides] contains some options that are not supported - /// in [ConfigToml], such as `cwd`, `codex_linux_sandbox_exe`, and + /// in [ConfigToml], such as `cwd`, `codex_self_exe`, `codex_linux_sandbox_exe`, and /// `main_execve_wrapper_exe`. pub async fn load_with_cli_overrides_and_harness_overrides( cli_overrides: Vec<(String, TomlValue)>, @@ -1319,10 +1271,6 @@ pub struct ConfigToml { /// Base URL override for the built-in `openai` model provider. pub openai_base_url: Option, - /// Experimental / do not use. Overrides the URL used when connecting to - /// a remote exec server. - pub experimental_exec_server_url: Option, - /// Machine-local realtime audio device preferences used by realtime voice. #[serde(default)] pub audio: Option, @@ -1846,6 +1794,7 @@ pub struct ConfigOverrides { pub model_provider: Option, pub service_tier: Option>, pub config_profile: Option, + pub codex_self_exe: Option, pub codex_linux_sandbox_exe: Option, pub main_execve_wrapper_exe: Option, pub js_repl_node_path: Option, @@ -2044,6 +1993,7 @@ impl Config { model_provider, service_tier: service_tier_override, config_profile: config_profile_key, + codex_self_exe, codex_linux_sandbox_exe, main_execve_wrapper_exe, js_repl_node_path: js_repl_node_path_override, @@ -2602,7 +2552,6 @@ impl Config { shell_environment_policy, windows_sandbox_mode, windows_sandbox_private_desktop, - macos_seatbelt_profile_extensions: None, }, approvals_reviewer, enforce_residency: enforce_residency.value, @@ -2650,6 +2599,7 @@ impl Config { history, ephemeral: ephemeral.unwrap_or_default(), file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode), + codex_self_exe, codex_linux_sandbox_exe, main_execve_wrapper_exe, js_repl_node_path, @@ -2678,7 +2628,6 @@ impl Config { .chatgpt_base_url .or(cfg.chatgpt_base_url) .unwrap_or("https://chatgpt.com/backend-api/".to_string()), - experimental_exec_server_url: cfg.experimental_exec_server_url, realtime_audio: cfg .audio .map_or_else(RealtimeAudioConfig::default, |audio| RealtimeAudioConfig { diff --git a/codex-rs/core/src/config/schema.rs b/codex-rs/core/src/config/schema.rs index 102b7da514..53e0f7f028 100644 --- a/codex-rs/core/src/config/schema.rs +++ b/codex-rs/core/src/config/schema.rs @@ -22,6 +22,9 @@ pub(crate) fn features_schema(schema_gen: &mut SchemaGenerator) -> Schema { let mut validation = ObjectValidation::default(); for feature in FEATURES { + if feature.id == codex_features::Feature::Artifact { + continue; + } validation .properties .insert(feature.key.to_string(), schema_gen.subschema_for::()); diff --git a/codex-rs/core/src/config/types.rs b/codex-rs/core/src/config/types.rs index c9f83482aa..05fbaaaa4d 100644 --- a/codex-rs/core/src/config/types.rs +++ b/codex-rs/core/src/config/types.rs @@ -108,6 +108,10 @@ pub struct McpServerConfig { /// Optional OAuth resource parameter to include during MCP login (RFC 8707). #[serde(default, skip_serializing_if = "Option::is_none")] pub oauth_resource: Option, + + /// Per-tool approval settings keyed by tool name. + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub tools: HashMap, } // Raw MCP config shape used for deserialization and JSON Schema generation. @@ -154,6 +158,11 @@ pub(crate) struct RawMcpServerConfig { pub scopes: Option>, #[serde(default)] pub oauth_resource: Option, + /// Legacy display-name field accepted for backward compatibility. + #[serde(default, rename = "name")] + pub _name: Option, + #[serde(default)] + pub tools: Option>, } impl<'de> Deserialize<'de> for McpServerConfig { @@ -178,6 +187,7 @@ impl<'de> Deserialize<'de> for McpServerConfig { let disabled_tools = raw.disabled_tools.clone(); let scopes = raw.scopes.clone(); let oauth_resource = raw.oauth_resource.clone(); + let tools = raw.tools.clone().unwrap_or_default(); fn throw_if_set(transport: &str, field: &str, value: Option<&T>) -> Result<(), E> where @@ -236,6 +246,7 @@ impl<'de> Deserialize<'de> for McpServerConfig { disabled_tools, scopes, oauth_resource, + tools, }) } } @@ -496,6 +507,15 @@ pub enum AppToolApproval { Approve, } +/// Per-tool approval settings for a single MCP server tool. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct McpServerToolConfig { + /// Approval mode for this tool. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub approval_mode: Option, +} + /// Default settings that apply to all apps. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default, JsonSchema)] #[schemars(deny_unknown_fields)] diff --git a/codex-rs/core/src/config/types_tests.rs b/codex-rs/core/src/config/types_tests.rs index dde0cd18fc..cba01e7876 100644 --- a/codex-rs/core/src/config/types_tests.rs +++ b/codex-rs/core/src/config/types_tests.rs @@ -243,6 +243,40 @@ fn deserialize_server_config_with_tool_filters() { assert_eq!(cfg.disabled_tools, Some(vec!["blocked".to_string()])); } +#[test] +fn deserialize_ignores_unknown_server_fields() { + let cfg: McpServerConfig = toml::from_str( + r#" + command = "echo" + trust_level = "trusted" + "#, + ) + .expect("should ignore unknown server fields"); + + assert_eq!( + cfg, + McpServerConfig { + transport: McpServerTransportConfig::Stdio { + command: "echo".to_string(), + args: vec![], + env: None, + env_vars: Vec::new(), + cwd: None, + }, + enabled: true, + required: false, + disabled_reason: None, + startup_timeout_sec: None, + tool_timeout_sec: None, + enabled_tools: None, + disabled_tools: None, + scopes: None, + oauth_resource: None, + tools: HashMap::new(), + } + ); +} + #[test] fn deserialize_skill_config_with_name_selector() { let cfg: SkillConfig = toml::from_str( diff --git a/codex-rs/core/src/connectors.rs b/codex-rs/core/src/connectors.rs index fdb0f43222..e4999b9b41 100644 --- a/codex-rs/core/src/connectors.rs +++ b/codex-rs/core/src/connectors.rs @@ -16,6 +16,7 @@ pub use codex_app_server_protocol::AppInfo; pub use codex_app_server_protocol::AppMetadata; use codex_connectors::AllConnectorsCacheKey; use codex_connectors::DirectoryListResponse; +use codex_login::token_data::TokenData; use codex_protocol::protocol::SandboxPolicy; use rmcp::model::ToolAnnotations; use serde::Deserialize; @@ -43,7 +44,6 @@ use crate::mcp_connection_manager::codex_apps_tools_cache_key; use crate::plugins::AppConnectorId; use crate::plugins::PluginsManager; use crate::plugins::list_tool_suggest_discoverable_plugins; -use crate::token_data::TokenData; use crate::tools::discoverable::DiscoverablePluginInfo; use crate::tools::discoverable::DiscoverableTool; use codex_features::Feature; diff --git a/codex-rs/core/src/error.rs b/codex-rs/core/src/error.rs index b0c55a55c6..1fef42e0ad 100644 --- a/codex-rs/core/src/error.rs +++ b/codex-rs/core/src/error.rs @@ -1,7 +1,5 @@ use crate::exec::ExecToolCallOutput; use crate::network_policy_decision::NetworkPolicyDecisionPayload; -use crate::token_data::KnownPlan; -use crate::token_data::PlanType; use chrono::DateTime; use chrono::Datelike; use chrono::Local; @@ -9,6 +7,8 @@ use chrono::Utc; use codex_async_utils::CancelErr; pub use codex_login::auth::RefreshTokenFailedError; pub use codex_login::auth::RefreshTokenFailedReason; +use codex_login::token_data::KnownPlan; +use codex_login::token_data::PlanType; use codex_protocol::ThreadId; use codex_protocol::protocol::CodexErrorInfo; use codex_protocol::protocol::ErrorEvent; diff --git a/codex-rs/core/src/exec.rs b/codex-rs/core/src/exec.rs index a4399ddc46..fde8782f08 100644 --- a/codex-rs/core/src/exec.rs +++ b/codex-rs/core/src/exec.rs @@ -272,7 +272,7 @@ pub fn build_exec_request( let manager = SandboxManager::new(); let command = SandboxCommand { - program: program.clone(), + program: program.clone().into(), args: args.to_vec(), cwd, env, @@ -292,8 +292,6 @@ pub fn build_exec_request( enforce_managed_network, network: network.as_ref(), sandbox_policy_cwd: sandbox_cwd, - #[cfg(target_os = "macos")] - macos_seatbelt_profile_extensions: None, codex_linux_sandbox_exe: codex_linux_sandbox_exe.as_ref(), use_legacy_landlock, windows_sandbox_level, @@ -480,7 +478,11 @@ async fn exec_windows_sandbox( })?; let command_path = command.first().cloned(); let sandbox_level = windows_sandbox_level; - let use_elevated = matches!(sandbox_level, WindowsSandboxLevel::Elevated); + let proxy_enforced = network.is_some(); + // Windows firewall enforcement is tied to the logon-user sandbox identities, so + // proxy-enforced sessions must use that backend even when the configured mode is + // the default restricted-token sandbox. + let use_elevated = proxy_enforced || matches!(sandbox_level, WindowsSandboxLevel::Elevated); let additional_deny_write_paths = windows_restricted_token_filesystem_overlay .map(|overlay| { overlay @@ -493,14 +495,17 @@ async fn exec_windows_sandbox( let spawn_res = tokio::task::spawn_blocking(move || { if use_elevated { run_windows_sandbox_capture_elevated( - policy_str.as_str(), - &sandbox_cwd, - codex_home.as_ref(), - command, - &cwd, - env, - timeout_ms, - windows_sandbox_private_desktop, + codex_windows_sandbox::ElevatedSandboxCaptureRequest { + policy_json_or_preset: policy_str.as_str(), + sandbox_policy_cwd: &sandbox_cwd, + codex_home: codex_home.as_ref(), + command, + cwd: &cwd, + env_map: env, + timeout_ms, + use_private_desktop: windows_sandbox_private_desktop, + proxy_enforced, + }, ) } else { run_windows_sandbox_capture_with_extra_deny_write_paths( diff --git a/codex-rs/core/src/exec_policy_tests.rs b/codex-rs/core/src/exec_policy_tests.rs index f062bcc741..200f302a20 100644 --- a/codex-rs/core/src/exec_policy_tests.rs +++ b/codex-rs/core/src/exec_policy_tests.rs @@ -828,6 +828,33 @@ fn unmatched_on_request_uses_split_filesystem_policy_for_escalation_prompts() { ); } +#[tokio::test] +async fn exec_approval_requirement_prompts_for_inline_additional_permissions_under_on_request() { + assert_exec_approval_requirement_for_command( + ExecApprovalRequirementScenario { + policy_src: None, + command: vec![ + "zsh".to_string(), + "-lc".to_string(), + "touch requested-dir/requested-but-unused.txt".to_string(), + ], + approval_policy: AskForApproval::OnRequest, + sandbox_policy: SandboxPolicy::new_read_only_policy(), + file_system_sandbox_policy: read_only_file_system_sandbox_policy(), + sandbox_permissions: SandboxPermissions::WithAdditionalPermissions, + prefix_rule: None, + }, + ExecApprovalRequirement::NeedsApproval { + reason: None, + proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![ + "touch".to_string(), + "requested-dir/requested-but-unused.txt".to_string(), + ])), + }, + ) + .await; +} + #[tokio::test] async fn exec_approval_requirement_rejects_unmatched_sandbox_escalation_when_granular_sandbox_is_disabled() { diff --git a/codex-rs/core/src/exec_tests.rs b/codex-rs/core/src/exec_tests.rs index 074db95e1c..07532e5a18 100644 --- a/codex-rs/core/src/exec_tests.rs +++ b/codex-rs/core/src/exec_tests.rs @@ -595,6 +595,19 @@ fn windows_restricted_token_supports_full_read_split_write_read_carveouts() { }, ]); + #[cfg(windows)] + let expected_deny_write_paths = vec![ + codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(cwd.join(".codex")) + .expect("absolute .codex"), + codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(&docs) + .expect("absolute docs"), + ]; + #[cfg(not(windows))] + let expected_deny_write_paths = vec![ + codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(&docs) + .expect("absolute docs"), + ]; + assert_eq!( resolve_windows_restricted_token_filesystem_overlay( SandboxType::WindowsRestrictedToken, @@ -605,10 +618,7 @@ fn windows_restricted_token_supports_full_read_split_write_read_carveouts() { WindowsSandboxLevel::RestrictedToken, ), Ok(Some(WindowsRestrictedTokenFilesystemOverlay { - additional_deny_write_paths: vec![ - codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(&docs) - .expect("absolute docs"), - ], + additional_deny_write_paths: expected_deny_write_paths, })) ); } diff --git a/codex-rs/core/src/hook_runtime.rs b/codex-rs/core/src/hook_runtime.rs index 744db4886b..ece93b1da1 100644 --- a/codex-rs/core/src/hook_runtime.rs +++ b/codex-rs/core/src/hook_runtime.rs @@ -1,6 +1,8 @@ use std::future::Future; use std::sync::Arc; +use codex_hooks::PostToolUseOutcome; +use codex_hooks::PostToolUseRequest; use codex_hooks::PreToolUseOutcome; use codex_hooks::PreToolUseRequest; use codex_hooks::SessionStartOutcome; @@ -15,6 +17,7 @@ use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::HookCompletedEvent; use codex_protocol::protocol::HookRunSummary; use codex_protocol::user_input::UserInput; +use serde_json::Value; use crate::codex::Session; use crate::codex::TurnContext; @@ -141,6 +144,33 @@ pub(crate) async fn run_pre_tool_use_hooks( if should_block { block_reason } else { None } } +pub(crate) async fn run_post_tool_use_hooks( + sess: &Arc, + turn_context: &Arc, + tool_use_id: String, + command: String, + tool_response: Value, +) -> PostToolUseOutcome { + let request = PostToolUseRequest { + session_id: sess.conversation_id, + turn_id: turn_context.sub_id.clone(), + cwd: turn_context.cwd.to_path_buf(), + transcript_path: sess.hook_transcript_path().await, + model: turn_context.model_info.slug.clone(), + permission_mode: hook_permission_mode(turn_context), + tool_name: "Bash".to_string(), + tool_use_id, + command, + tool_response, + }; + let preview_runs = sess.hooks().preview_post_tool_use(&request); + emit_hook_started_events(sess, turn_context, preview_runs).await; + + let outcome = sess.hooks().run_post_tool_use(request).await; + emit_hook_completed_events(sess, turn_context, outcome.hook_events.clone()).await; + outcome +} + pub(crate) async fn run_user_prompt_submit_hooks( sess: &Arc, turn_context: &Arc, diff --git a/codex-rs/core/src/landlock.rs b/codex-rs/core/src/landlock.rs index c1dc4ba542..ef55a207e2 100644 --- a/codex-rs/core/src/landlock.rs +++ b/codex-rs/core/src/landlock.rs @@ -5,6 +5,7 @@ use crate::spawn::spawn_child_async; use codex_network_proxy::NetworkProxy; use codex_protocol::permissions::FileSystemSandboxPolicy; use codex_protocol::permissions::NetworkSandboxPolicy; +use codex_sandboxing::landlock::CODEX_LINUX_SANDBOX_ARG0; use codex_sandboxing::landlock::allow_network_for_proxy; use codex_sandboxing::landlock::create_linux_sandbox_command_args_for_policies; use std::collections::HashMap; @@ -48,11 +49,24 @@ where use_legacy_landlock, allow_network_for_proxy(/*enforce_managed_network*/ false), ); - let arg0 = Some("codex-linux-sandbox"); + let codex_linux_sandbox_exe = codex_linux_sandbox_exe.as_ref(); + // Preserve the helper alias when we already have it; otherwise force argv0 + // so arg0 dispatch still reaches the Linux sandbox path. + let arg0 = if codex_linux_sandbox_exe + .file_name() + .and_then(|name| name.to_str()) + == Some(CODEX_LINUX_SANDBOX_ARG0) + { + // Old bubblewrap builds without `--argv0` need a real helper path whose + // basename still dispatches to the Linux sandbox entrypoint. + codex_linux_sandbox_exe.to_string_lossy().into_owned() + } else { + CODEX_LINUX_SANDBOX_ARG0.to_string() + }; spawn_child_async(SpawnChildRequest { - program: codex_linux_sandbox_exe.as_ref().to_path_buf(), + program: codex_linux_sandbox_exe.to_path_buf(), args, - arg0, + arg0: Some(&arg0), cwd: command_cwd, network_sandbox_policy, network, diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index 2764f6f805..358ae58f69 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -53,7 +53,6 @@ pub mod models_manager; mod network_policy_decision; pub mod network_proxy_loader; mod original_image_detail; -mod packages; pub use mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY; pub use mcp_connection_manager::MCP_SANDBOX_STATE_METHOD; pub use mcp_connection_manager::SandboxState; @@ -103,7 +102,6 @@ mod skills_watcher; mod stream_events_utils; pub mod test_support; mod text_encoding; -pub use codex_login::token_data; mod unified_exec; pub mod windows_sandbox; pub use client::X_RESPONSESAPI_INCLUDE_TIMING_METRICS_HEADER; @@ -207,6 +205,7 @@ pub use client_common::REVIEW_PROMPT; pub use client_common::ResponseEvent; pub use client_common::ResponseStream; pub use codex_sandboxing::get_platform_sandbox; +pub use codex_tools::parse_tool_input_schema; pub use compact::content_items_to_text; pub use event_mapping::parse_turn_item; pub use exec_policy::ExecPolicyError; @@ -214,7 +213,6 @@ pub use exec_policy::check_execpolicy_for_warnings; pub use exec_policy::format_exec_policy_error_with_source; pub use exec_policy::load_exec_policy; pub use file_watcher::FileWatcherEvent; -pub use tools::spec::parse_tool_input_schema; pub use turn_metadata::build_turn_metadata_header; pub mod compact; pub mod memory_trace; diff --git a/codex-rs/core/src/mcp/mod.rs b/codex-rs/core/src/mcp/mod.rs index 81ee0c7fe8..a9d2388f70 100644 --- a/codex-rs/core/src/mcp/mod.rs +++ b/codex-rs/core/src/mcp/mod.rs @@ -176,6 +176,7 @@ fn codex_apps_mcp_server_config(config: &Config, auth: Option<&CodexAuth>) -> Mc disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), } } diff --git a/codex-rs/core/src/mcp/mod_tests.rs b/codex-rs/core/src/mcp/mod_tests.rs index dc9465e103..855e71e1f2 100644 --- a/codex-rs/core/src/mcp/mod_tests.rs +++ b/codex-rs/core/src/mcp/mod_tests.rs @@ -235,6 +235,7 @@ async fn effective_mcp_servers_include_plugins_without_overriding_user_config() disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, ); config diff --git a/codex-rs/core/src/mcp/skill_dependencies.rs b/codex-rs/core/src/mcp/skill_dependencies.rs index 489dc2e438..ba4ef26488 100644 --- a/codex-rs/core/src/mcp/skill_dependencies.rs +++ b/codex-rs/core/src/mcp/skill_dependencies.rs @@ -428,6 +428,7 @@ fn mcp_dependency_to_server_config( disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }); } @@ -453,6 +454,7 @@ fn mcp_dependency_to_server_config( disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }); } diff --git a/codex-rs/core/src/mcp/skill_dependencies_tests.rs b/codex-rs/core/src/mcp/skill_dependencies_tests.rs index ebabe16f80..7387ef4b31 100644 --- a/codex-rs/core/src/mcp/skill_dependencies_tests.rs +++ b/codex-rs/core/src/mcp/skill_dependencies_tests.rs @@ -12,8 +12,6 @@ fn skill_with_tools(tools: Vec) -> SkillMetadata { interface: None, dependencies: Some(SkillDependencies { tools }), policy: None, - permission_profile: None, - managed_network_override: None, path_to_skills_md: PathBuf::from("skill"), scope: SkillScope::User, } @@ -48,6 +46,7 @@ fn collect_missing_respects_canonical_installed_key() { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); @@ -97,6 +96,7 @@ fn collect_missing_dedupes_by_canonical_key_but_preserves_original_name() { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, )]); diff --git a/codex-rs/core/src/mcp_connection_manager_tests.rs b/codex-rs/core/src/mcp_connection_manager_tests.rs index c5f7fc4a40..2331d0c1fe 100644 --- a/codex-rs/core/src/mcp_connection_manager_tests.rs +++ b/codex-rs/core/src/mcp_connection_manager_tests.rs @@ -542,6 +542,7 @@ fn mcp_init_error_display_prompts_for_github_pat() { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, auth_status: McpAuthStatus::Unsupported, }; @@ -590,6 +591,7 @@ fn mcp_init_error_display_reports_generic_errors() { disabled_tools: None, scopes: None, oauth_resource: None, + tools: HashMap::new(), }, auth_status: McpAuthStatus::Unsupported, }; diff --git a/codex-rs/core/src/mcp_tool_call.rs b/codex-rs/core/src/mcp_tool_call.rs index d31696db5c..128e7a30ba 100644 --- a/codex-rs/core/src/mcp_tool_call.rs +++ b/codex-rs/core/src/mcp_tool_call.rs @@ -1,7 +1,10 @@ use std::collections::BTreeMap; +use std::collections::HashMap; +use std::path::PathBuf; use std::time::Duration; use std::time::Instant; +use codex_app_server_protocol::ConfigLayerSource; use codex_app_server_protocol::McpElicitationObjectType; use codex_app_server_protocol::McpElicitationSchema; use codex_app_server_protocol::McpServerElicitationRequest; @@ -12,8 +15,10 @@ use crate::arc_monitor::ArcMonitorOutcome; use crate::arc_monitor::monitor_action; use crate::codex::Session; use crate::codex::TurnContext; +use crate::config::Config; use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; +use crate::config::load_global_mcp_servers; use crate::config::types::AppToolApproval; use crate::connectors; use crate::guardian::GuardianApprovalRequest; @@ -33,6 +38,7 @@ use codex_analytics::AppInvocation; use codex_analytics::InvocationType; use codex_analytics::build_track_events_context; use codex_features::Feature; +use codex_otel::sanitize_metric_tag_value; use codex_protocol::mcp::CallToolResult; use codex_protocol::openai_models::InputModality; use codex_protocol::protocol::AskForApproval; @@ -46,10 +52,18 @@ use codex_protocol::request_user_input::RequestUserInputResponse; use codex_rmcp_client::ElicitationAction; use codex_rmcp_client::ElicitationResponse; use rmcp::model::ToolAnnotations; +use serde::Deserialize; use serde::Serialize; use std::path::Path; use std::sync::Arc; use toml_edit::value; +use tracing::Instrument; +use tracing::Span; +use tracing::field::Empty; +use url::Url; + +const MCP_CALL_COUNT_METRIC: &str = "codex.mcp.call"; +const MCP_CALL_DURATION_METRIC: &str = "codex.mcp.call.duration_ms"; /// Handles the specified tool call dispatches the appropriate /// `McpToolCallBegin` and `McpToolCallEnd` events to the `Session`. @@ -100,6 +114,11 @@ pub(crate) async fn handle_mcp_tool_call( } else { connectors::AppToolPolicy::default() }; + let approval_mode = if server == CODEX_APPS_MCP_SERVER_NAME { + app_tool_policy.approval + } else { + custom_mcp_tool_approval_mode(turn_context.as_ref(), &server, &tool_name) + }; if server == CODEX_APPS_MCP_SERVER_NAME && !app_tool_policy.enabled { let result = notify_mcp_tool_call_skip( @@ -113,7 +132,7 @@ pub(crate) async fn handle_mcp_tool_call( .await; let status = if result.is_ok() { "ok" } else { "error" }; turn_context.session_telemetry.counter( - "codex.mcp.call", + MCP_CALL_COUNT_METRIC, /*inc*/ 1, &[("status", status)], ); @@ -121,6 +140,19 @@ pub(crate) async fn handle_mcp_tool_call( } let request_meta = build_mcp_tool_call_request_meta(turn_context.as_ref(), &server, metadata.as_ref()); + let connector_id = metadata + .as_ref() + .and_then(|metadata| metadata.connector_id.clone()); + let connector_name = metadata + .as_ref() + .and_then(|metadata| metadata.connector_name.clone()); + let server_origin = sess + .services + .mcp_connection_manager + .read() + .await + .server_origin(&server) + .map(str::to_string); let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent { call_id: call_id.clone(), @@ -134,26 +166,40 @@ pub(crate) async fn handle_mcp_tool_call( &call_id, &invocation, metadata.as_ref(), - app_tool_policy.approval, + approval_mode, ) .await { - let result = match decision { + let (result, call_duration) = match decision { McpToolApprovalDecision::Accept | McpToolApprovalDecision::AcceptForSession | McpToolApprovalDecision::AcceptAndRemember => { maybe_mark_thread_memory_mode_polluted(sess.as_ref(), turn_context.as_ref()).await; let start = Instant::now(); - let result = sess - .call_tool( + let result = async { + sess.call_tool( &server, &tool_name, arguments_value.clone(), request_meta.clone(), ) .await - .map_err(|e| format!("tool call error: {e:?}")); + .map_err(|e| format!("tool call error: {e:?}")) + } + .instrument(mcp_tool_call_span( + sess.as_ref(), + turn_context.as_ref(), + McpToolCallSpanFields { + server_name: &server, + tool_name: &tool_name, + call_id: &call_id, + server_origin: server_origin.as_deref(), + connector_id: connector_id.as_deref(), + connector_name: connector_name.as_deref(), + }, + )) + .await; let result = sanitize_mcp_tool_result_for_model( turn_context .model_info @@ -161,13 +207,14 @@ pub(crate) async fn handle_mcp_tool_call( .contains(&InputModality::Image), result, ); - if let Err(e) = &result { - tracing::warn!("MCP tool call error: {e:?}"); + if let Err(error) = &result { + tracing::warn!("MCP tool call error: {error:?}"); } + let duration = start.elapsed(); let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.clone(), invocation, - duration: start.elapsed(), + duration, result: result.clone(), }); notify_mcp_tool_call_event( @@ -183,50 +230,62 @@ pub(crate) async fn handle_mcp_tool_call( &tool_name, ) .await; - result + (result, Some(duration)) } McpToolApprovalDecision::Decline => { let message = "user rejected MCP tool call".to_string(); - notify_mcp_tool_call_skip( - sess.as_ref(), - turn_context.as_ref(), - &call_id, - invocation, - message, - /*already_started*/ true, + ( + notify_mcp_tool_call_skip( + sess.as_ref(), + turn_context.as_ref(), + &call_id, + invocation, + message, + /*already_started*/ true, + ) + .await, + None, ) - .await } McpToolApprovalDecision::Cancel => { let message = "user cancelled MCP tool call".to_string(); - notify_mcp_tool_call_skip( - sess.as_ref(), - turn_context.as_ref(), - &call_id, - invocation, - message, - /*already_started*/ true, + ( + notify_mcp_tool_call_skip( + sess.as_ref(), + turn_context.as_ref(), + &call_id, + invocation, + message, + /*already_started*/ true, + ) + .await, + None, ) - .await } McpToolApprovalDecision::BlockedBySafetyMonitor(message) => { - notify_mcp_tool_call_skip( - sess.as_ref(), - turn_context.as_ref(), - &call_id, - invocation, - message, - /*already_started*/ true, + ( + notify_mcp_tool_call_skip( + sess.as_ref(), + turn_context.as_ref(), + &call_id, + invocation, + message, + /*already_started*/ true, + ) + .await, + None, ) - .await } }; let status = if result.is_ok() { "ok" } else { "error" }; - turn_context.session_telemetry.counter( - "codex.mcp.call", - /*inc*/ 1, - &[("status", status)], + emit_mcp_call_metrics( + turn_context.as_ref(), + status, + &tool_name, + connector_id.as_deref(), + connector_name.as_deref(), + call_duration, ); return CallToolResult::from_result(result); @@ -236,10 +295,24 @@ pub(crate) async fn handle_mcp_tool_call( let start = Instant::now(); // Perform the tool call. - let result = sess - .call_tool(&server, &tool_name, arguments_value.clone(), request_meta) - .await - .map_err(|e| format!("tool call error: {e:?}")); + let result = async { + sess.call_tool(&server, &tool_name, arguments_value.clone(), request_meta) + .await + .map_err(|e| format!("tool call error: {e:?}")) + } + .instrument(mcp_tool_call_span( + sess.as_ref(), + turn_context.as_ref(), + McpToolCallSpanFields { + server_name: &server, + tool_name: &tool_name, + call_id: &call_id, + server_origin: server_origin.as_deref(), + connector_id: connector_id.as_deref(), + connector_name: connector_name.as_deref(), + }, + )) + .await; let result = sanitize_mcp_tool_result_for_model( turn_context .model_info @@ -247,13 +320,14 @@ pub(crate) async fn handle_mcp_tool_call( .contains(&InputModality::Image), result, ); - if let Err(e) = &result { - tracing::warn!("MCP tool call error: {e:?}"); + if let Err(error) = &result { + tracing::warn!("MCP tool call error: {error:?}"); } + let duration = start.elapsed(); let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.clone(), invocation, - duration: start.elapsed(), + duration, result: result.clone(), }); @@ -266,13 +340,119 @@ pub(crate) async fn handle_mcp_tool_call( maybe_track_codex_app_used(sess.as_ref(), turn_context.as_ref(), &server, &tool_name).await; let status = if result.is_ok() { "ok" } else { "error" }; - turn_context - .session_telemetry - .counter("codex.mcp.call", /*inc*/ 1, &[("status", status)]); + emit_mcp_call_metrics( + turn_context.as_ref(), + status, + &tool_name, + connector_id.as_deref(), + connector_name.as_deref(), + Some(duration), + ); CallToolResult::from_result(result) } +fn emit_mcp_call_metrics( + turn_context: &TurnContext, + status: &str, + tool_name: &str, + connector_id: Option<&str>, + connector_name: Option<&str>, + duration: Option, +) { + let tags = mcp_call_metric_tags(status, tool_name, connector_id, connector_name); + let tag_refs: Vec<(&str, &str)> = tags + .iter() + .map(|(key, value)| (*key, value.as_str())) + .collect(); + turn_context + .session_telemetry + .counter(MCP_CALL_COUNT_METRIC, /*inc*/ 1, &tag_refs); + if let Some(duration) = duration { + turn_context.session_telemetry.record_duration( + MCP_CALL_DURATION_METRIC, + duration, + &tag_refs, + ); + } +} + +fn mcp_call_metric_tags( + status: &str, + tool_name: &str, + connector_id: Option<&str>, + connector_name: Option<&str>, +) -> Vec<(&'static str, String)> { + let mut tags = vec![ + ("status", sanitize_metric_tag_value(status)), + ("tool", sanitize_metric_tag_value(tool_name)), + ]; + if let Some(connector_id) = connector_id.filter(|connector_id| !connector_id.is_empty()) { + tags.push(("connector_id", sanitize_metric_tag_value(connector_id))); + } + if let Some(connector_name) = connector_name.filter(|connector_name| !connector_name.is_empty()) + { + tags.push(("connector_name", sanitize_metric_tag_value(connector_name))); + } + tags +} + +fn mcp_tool_call_span( + session: &Session, + turn_context: &TurnContext, + fields: McpToolCallSpanFields<'_>, +) -> Span { + let transport = match fields.server_origin { + Some("stdio") => "stdio", + Some(_) => "streamable_http", + None => "", + }; + let span = tracing::info_span!( + "mcp.tools.call", + otel.kind = "client", + rpc.system = "jsonrpc", + rpc.method = "tools/call", + mcp.server.name = fields.server_name, + mcp.server.origin = fields.server_origin.unwrap_or(""), + mcp.transport = transport, + mcp.connector.id = fields.connector_id.unwrap_or(""), + mcp.connector.name = fields.connector_name.unwrap_or(""), + tool.name = fields.tool_name, + tool.call_id = fields.call_id, + conversation.id = %session.conversation_id, + session.id = %session.conversation_id, + turn.id = turn_context.sub_id.as_str(), + server.address = Empty, + server.port = Empty, + ); + record_server_fields(&span, fields.server_origin); + span +} + +struct McpToolCallSpanFields<'a> { + server_name: &'a str, + tool_name: &'a str, + call_id: &'a str, + server_origin: Option<&'a str>, + connector_id: Option<&'a str>, + connector_name: Option<&'a str>, +} + +fn record_server_fields(span: &Span, url: Option<&str>) { + let Some(url) = url else { + return; + }; + let Ok(parsed) = Url::parse(url) else { + return; + }; + if let Some(host) = parsed.host_str() { + span.record("server.address", host); + } + if let Some(port) = parsed.port_or_known_default() { + span.record("server.port", port as i64); + } +} + async fn maybe_mark_thread_memory_mode_polluted(sess: &Session, turn_context: &TurnContext) { if !turn_context .config @@ -390,6 +570,27 @@ pub(crate) struct McpToolApprovalMetadata { const MCP_TOOL_CODEX_APPS_META_KEY: &str = "_codex_apps"; +fn custom_mcp_tool_approval_mode( + turn_context: &TurnContext, + server: &str, + tool_name: &str, +) -> AppToolApproval { + turn_context + .config + .config_layer_stack + .effective_config() + .as_table() + .and_then(|table| table.get("mcp_servers")) + .cloned() + .and_then(|value| { + HashMap::::deserialize(value).ok() + }) + .and_then(|servers| servers.get(server).cloned()) + .and_then(|server| server.tools.get(tool_name).cloned()) + .and_then(|tool| tool.approval_mode) + .unwrap_or_default() +} + fn build_mcp_tool_call_request_meta( turn_context: &TurnContext, server: &str, @@ -459,7 +660,6 @@ const MCP_TOOL_APPROVAL_TOOL_PARAMS_KEY: &str = "tool_params"; const MCP_TOOL_APPROVAL_TOOL_PARAMS_DISPLAY_KEY: &str = "tool_params_display"; const MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_DEFAULT: &str = "mcp_tool_call__default"; const MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_ALWAYS_ALLOW: &str = "mcp_tool_call__always_allow"; -const MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_FULL_ACCESS: &str = "mcp_tool_call__full_access"; pub(crate) fn is_mcp_tool_approval_question_id(question_id: &str) -> bool { question_id @@ -494,11 +694,14 @@ async fn maybe_request_mcp_tool_approval( metadata: Option<&McpToolApprovalMetadata>, approval_mode: AppToolApproval, ) -> Option { + if is_full_access_mode(turn_context) { + return None; + } + let annotations = metadata.and_then(|metadata| metadata.annotations.as_ref()); let approval_required = requires_mcp_tool_approval(annotations); let mut monitor_reason = None; - let auto_approved_by_policy = approval_mode == AppToolApproval::Approve - || (approval_mode == AppToolApproval::Auto && is_full_access_mode(turn_context)); + let auto_approved_by_policy = approval_mode == AppToolApproval::Approve; if auto_approved_by_policy { if !approval_required { @@ -711,12 +914,7 @@ fn persistent_mcp_tool_approval_key( metadata: Option<&McpToolApprovalMetadata>, approval_mode: AppToolApproval, ) -> Option { - if invocation.server != CODEX_APPS_MCP_SERVER_NAME { - return None; - } - session_mcp_tool_approval_key(invocation, metadata, approval_mode) - .filter(|key| key.connector_id.is_some()) } pub(crate) fn build_guardian_mcp_tool_review_request( @@ -764,16 +962,12 @@ fn is_full_access_mode(turn_context: &TurnContext) -> bool { fn mcp_tool_approval_callsite_mode( approval_mode: AppToolApproval, - turn_context: &TurnContext, + _turn_context: &TurnContext, ) -> &'static str { match approval_mode { AppToolApproval::Approve => MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_ALWAYS_ALLOW, AppToolApproval::Auto | AppToolApproval::Prompt => { - if approval_mode == AppToolApproval::Auto && is_full_access_mode(turn_context) { - MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_FULL_ACCESS - } else { - MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_DEFAULT - } + MCP_TOOL_CALL_ARC_MONITOR_CALLSITE_DEFAULT } } } @@ -1255,21 +1449,25 @@ async fn maybe_persist_mcp_tool_approval( turn_context: &TurnContext, key: McpToolApprovalKey, ) { - let Some(connector_id) = key.connector_id.clone() else { - remember_mcp_tool_approval(sess, key).await; - return; - }; let tool_name = key.tool_name.clone(); - if let Err(err) = + let persist_result = if key.server == CODEX_APPS_MCP_SERVER_NAME { + let Some(connector_id) = key.connector_id.clone() else { + remember_mcp_tool_approval(sess, key).await; + return; + }; persist_codex_app_tool_approval(&turn_context.config.codex_home, &connector_id, &tool_name) .await - { + } else { + persist_custom_mcp_tool_approval(&turn_context.config, &key.server, &tool_name).await + }; + + if let Err(err) = persist_result { error!( error = %err, - connector_id, + server = key.server, tool_name, - "failed to persist codex app tool approval" + "failed to persist MCP tool approval" ); remember_mcp_tool_approval(sess, key).await; return; @@ -1299,6 +1497,67 @@ async fn persist_codex_app_tool_approval( .await } +async fn persist_custom_mcp_tool_approval( + config: &Config, + server: &str, + tool_name: &str, +) -> anyhow::Result<()> { + let config_folder = if let Some(project_config_folder) = + project_mcp_tool_approval_config_folder(config, server) + { + project_config_folder + } else { + let servers = load_global_mcp_servers(&config.codex_home).await?; + if !servers.contains_key(server) { + anyhow::bail!("MCP server `{server}` is not configured in config.toml"); + } + config.codex_home.clone() + }; + + ConfigEditsBuilder::new(&config_folder) + .with_edits([ConfigEdit::SetPath { + segments: vec![ + "mcp_servers".to_string(), + server.to_string(), + "tools".to_string(), + tool_name.to_string(), + "approval_mode".to_string(), + ], + value: value("approve"), + }]) + .apply() + .await +} + +fn project_mcp_tool_approval_config_folder(config: &Config, server: &str) -> Option { + config + .config_layer_stack + .layers_high_to_low() + .into_iter() + .find_map(|layer| { + if !matches!(layer.name, ConfigLayerSource::Project { .. }) { + return None; + } + + let servers = layer + .config + .as_table() + .and_then(|table| table.get("mcp_servers")) + .cloned() + .and_then(|value| { + HashMap::::deserialize(value) + .ok() + })?; + if servers.contains_key(server) { + layer + .config_folder() + .map(|folder| folder.as_path().to_path_buf()) + } else { + None + } + }) +} + fn requires_mcp_tool_approval(annotations: Option<&ToolAnnotations>) -> bool { let destructive_hint = annotations.and_then(|annotations| annotations.destructive_hint); if destructive_hint == Some(true) { diff --git a/codex-rs/core/src/mcp_tool_call_tests.rs b/codex-rs/core/src/mcp_tool_call_tests.rs index 9d7d1c3a41..cf3c761faf 100644 --- a/codex-rs/core/src/mcp_tool_call_tests.rs +++ b/codex-rs/core/src/mcp_tool_call_tests.rs @@ -1,11 +1,14 @@ use super::*; use crate::codex::make_session_and_context; use crate::config::ApprovalsReviewer; +use crate::config::ConfigBuilder; use crate::config::ConfigToml; use crate::config::types::AppConfig; use crate::config::types::AppToolConfig; use crate::config::types::AppToolsConfig; use crate::config::types::AppsConfigToml; +use crate::config::types::McpServerConfig; +use crate::config::types::McpServerToolConfig; use codex_config::CONFIG_TOML_FILE; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; @@ -18,6 +21,10 @@ use serde::Deserialize; use std::collections::HashMap; use std::sync::Arc; use tempfile::tempdir; +use tracing::Instrument; +use tracing::Level; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_test::internal::MockWriter; fn annotations( read_only: Option, @@ -119,6 +126,57 @@ fn approval_question_text_prepends_safety_reason() { ); } +#[tokio::test] +async fn mcp_tool_call_span_records_expected_fields() { + let buffer: &'static std::sync::Mutex> = + Box::leak(Box::new(std::sync::Mutex::new(Vec::new()))); + let subscriber = tracing_subscriber::fmt() + .with_level(true) + .with_ansi(false) + .with_max_level(Level::TRACE) + .with_span_events(FmtSpan::FULL) + .with_writer(MockWriter::new(buffer)) + .finish(); + let _guard = tracing::subscriber::set_default(subscriber); + + let (session, turn_context) = make_session_and_context().await; + + async {} + .instrument(mcp_tool_call_span( + &session, + &turn_context, + McpToolCallSpanFields { + server_name: "rmcp", + tool_name: "echo", + call_id: "call-123", + server_origin: Some("https://example.com:8443/mcp"), + connector_id: Some("calendar"), + connector_name: Some("Calendar"), + }, + )) + .await; + + let logs = String::from_utf8(buffer.lock().expect("buffer lock").clone()).expect("utf8 logs"); + assert!( + logs.contains("mcp.tools.call{otel.kind=\"client\"") + && logs.contains("rpc.system=\"jsonrpc\"") + && logs.contains("rpc.method=\"tools/call\"") + && logs.contains("mcp.server.name=\"rmcp\"") + && logs.contains("mcp.server.origin=\"https://example.com:8443/mcp\"") + && logs.contains("mcp.transport=\"streamable_http\"") + && logs.contains("mcp.connector.id=\"calendar\"") + && logs.contains("mcp.connector.name=\"Calendar\"") + && logs.contains("tool.name=\"echo\"") + && logs.contains("tool.call_id=\"call-123\"") + && logs.contains("server.address=\"example.com\"") + && logs.contains("server.port=8443") + && logs.contains("conversation.id=") + && logs.contains("session.id=") + && logs.contains("turn.id="), + "missing MCP tool span fields\nlogs:\n{logs}" + ); +} + #[tokio::test] async fn approval_elicitation_request_uses_message_override_and_preserves_tool_params_keys() { let (session, turn_context) = make_session_and_context().await; @@ -323,13 +381,13 @@ fn codex_apps_tool_question_without_elicitation_omits_always_allow() { } #[test] -fn custom_mcp_tool_question_offers_session_remember_without_always_allow() { +fn custom_mcp_tool_question_offers_session_remember_and_always_allow() { let question = build_mcp_tool_approval_question( "q".to_string(), "custom_server", "run_action", None, - prompt_options(true, false), + prompt_options(true, true), None, ); @@ -343,13 +401,14 @@ fn custom_mcp_tool_question_offers_session_remember_without_always_allow() { vec![ MCP_TOOL_APPROVAL_ACCEPT.to_string(), MCP_TOOL_APPROVAL_ACCEPT_FOR_SESSION.to_string(), + MCP_TOOL_APPROVAL_ACCEPT_AND_REMEMBER.to_string(), MCP_TOOL_APPROVAL_CANCEL.to_string(), ] ); } #[test] -fn custom_servers_keep_session_remember_without_persistent_approval() { +fn custom_servers_support_session_and_persistent_approval() { let invocation = McpInvocation { server: "custom_server".to_string(), tool: "run_action".to_string(), @@ -363,11 +422,11 @@ fn custom_servers_keep_session_remember_without_persistent_approval() { assert_eq!( session_mcp_tool_approval_key(&invocation, None, AppToolApproval::Auto), - Some(expected) + Some(expected.clone()) ); assert_eq!( persistent_mcp_tool_approval_key(&invocation, None, AppToolApproval::Auto), - None + Some(expected) ); } @@ -557,7 +616,7 @@ fn approval_elicitation_meta_marks_tool_approvals() { } #[test] -fn approval_elicitation_meta_keeps_session_persist_behavior_for_custom_servers() { +fn approval_elicitation_meta_merges_session_and_always_persist_for_custom_servers() { assert_eq!( build_mcp_tool_approval_elicitation_meta( "custom_server", @@ -570,11 +629,14 @@ fn approval_elicitation_meta_keeps_session_persist_behavior_for_custom_servers() )), Some(&serde_json::json!({"id": 1})), None, - prompt_options(true, false), + prompt_options(true, true), ), Some(serde_json::json!({ MCP_TOOL_APPROVAL_KIND_KEY: MCP_TOOL_APPROVAL_KIND_MCP_TOOL_CALL, - MCP_TOOL_APPROVAL_PERSIST_KEY: MCP_TOOL_APPROVAL_PERSIST_SESSION, + MCP_TOOL_APPROVAL_PERSIST_KEY: [ + MCP_TOOL_APPROVAL_PERSIST_SESSION, + MCP_TOOL_APPROVAL_PERSIST_ALWAYS, + ], MCP_TOOL_APPROVAL_TOOL_TITLE_KEY: "Run Action", MCP_TOOL_APPROVAL_TOOL_DESCRIPTION_KEY: "Runs the selected action.", MCP_TOOL_APPROVAL_TOOL_PARAMS_KEY: { @@ -788,8 +850,8 @@ fn approval_elicitation_meta_merges_session_and_always_persist_with_connector_so } #[tokio::test] -async fn approval_callsite_mode_distinguishes_default_always_allow_and_full_access() { - let (_session, mut turn_context) = make_session_and_context().await; +async fn approval_callsite_mode_distinguishes_default_and_always_allow() { + let (_session, turn_context) = make_session_and_context().await; assert_eq!( mcp_tool_approval_callsite_mode(AppToolApproval::Auto, &turn_context), @@ -803,20 +865,6 @@ async fn approval_callsite_mode_distinguishes_default_always_allow_and_full_acce mcp_tool_approval_callsite_mode(AppToolApproval::Approve, &turn_context), "mcp_tool_call__always_allow" ); - - turn_context - .approval_policy - .set(AskForApproval::Never) - .expect("test setup should allow updating approval policy"); - turn_context - .sandbox_policy - .set(SandboxPolicy::DangerFullAccess) - .expect("test setup should allow updating sandbox policy"); - - assert_eq!( - mcp_tool_approval_callsite_mode(AppToolApproval::Auto, &turn_context), - "mcp_tool_call__full_access" - ); } #[test] @@ -937,6 +985,41 @@ async fn persist_codex_app_tool_approval_writes_tool_override() { assert!(contents.contains("[apps.calendar.tools.\"calendar/list_events\"]")); } +#[tokio::test] +async fn persist_custom_mcp_tool_approval_writes_tool_override() { + let tmp = tempdir().expect("tempdir"); + std::fs::write( + tmp.path().join(CONFIG_TOML_FILE), + "[mcp_servers.docs]\ncommand = \"docs-server\"\n", + ) + .expect("seed config"); + let config = ConfigBuilder::default() + .codex_home(tmp.path().to_path_buf()) + .build() + .await + .expect("load config"); + + persist_custom_mcp_tool_approval(&config, "docs", "search") + .await + .expect("persist approval"); + + let contents = std::fs::read_to_string(tmp.path().join(CONFIG_TOML_FILE)).expect("read config"); + let parsed: ConfigToml = toml::from_str(&contents).expect("parse config"); + let tool = parsed + .mcp_servers + .get("docs") + .and_then(|server| server.tools.get("search")) + .expect("docs/search tool config exists"); + + assert_eq!( + tool, + &McpServerToolConfig { + approval_mode: Some(AppToolApproval::Approve), + } + ); + assert!(contents.contains("[mcp_servers.docs.tools.search]")); +} + #[tokio::test] async fn maybe_persist_mcp_tool_approval_reloads_session_config() { let (session, turn_context) = make_session_and_context().await; @@ -976,6 +1059,104 @@ async fn maybe_persist_mcp_tool_approval_reloads_session_config() { assert_eq!(mcp_tool_approval_is_remembered(&session, &key).await, true); } +#[tokio::test] +async fn maybe_persist_mcp_tool_approval_reloads_session_config_for_custom_server() { + let (session, turn_context) = make_session_and_context().await; + let codex_home = session.codex_home().await; + std::fs::create_dir_all(&codex_home).expect("create codex home"); + std::fs::write( + codex_home.join(CONFIG_TOML_FILE), + "[mcp_servers.docs]\ncommand = \"docs-server\"\n", + ) + .expect("seed config"); + let key = McpToolApprovalKey { + server: "docs".to_string(), + connector_id: None, + tool_name: "search".to_string(), + }; + + maybe_persist_mcp_tool_approval(&session, &turn_context, key.clone()).await; + + let config = session.get_config().await; + let mcp_servers_toml = config + .config_layer_stack + .effective_config() + .as_table() + .and_then(|table| table.get("mcp_servers")) + .cloned() + .expect("mcp_servers table"); + let mcp_servers = HashMap::::deserialize(mcp_servers_toml) + .expect("deserialize MCP servers"); + let tool = mcp_servers + .get("docs") + .and_then(|server| server.tools.get("search")) + .expect("docs/search tool config exists"); + + assert_eq!( + tool, + &McpServerToolConfig { + approval_mode: Some(AppToolApproval::Approve), + } + ); + assert_eq!(mcp_tool_approval_is_remembered(&session, &key).await, true); +} + +#[tokio::test] +async fn maybe_persist_mcp_tool_approval_writes_project_config_for_project_server() { + let (session, mut turn_context) = make_session_and_context().await; + let codex_home = session.codex_home().await; + let project_dir = tempdir().expect("tempdir"); + std::fs::write(project_dir.path().join(".git"), "gitdir: nowhere").expect("seed git marker"); + let project_codex_dir = project_dir.path().join(".codex"); + std::fs::create_dir_all(&project_codex_dir).expect("create project .codex dir"); + std::fs::write( + project_codex_dir.join(CONFIG_TOML_FILE), + "[mcp_servers.docs]\ncommand = \"docs-server\"\n", + ) + .expect("seed project config"); + ConfigEditsBuilder::new(&codex_home) + .set_project_trust_level( + project_dir.path(), + codex_protocol::config_types::TrustLevel::Trusted, + ) + .apply() + .await + .expect("trust project"); + let config = ConfigBuilder::default() + .codex_home(codex_home) + .fallback_cwd(Some(project_dir.path().to_path_buf())) + .build() + .await + .expect("load project config"); + turn_context.cwd = config.cwd.clone(); + turn_context.config = Arc::new(config); + let key = McpToolApprovalKey { + server: "docs".to_string(), + connector_id: None, + tool_name: "search".to_string(), + }; + + maybe_persist_mcp_tool_approval(&session, &turn_context, key.clone()).await; + + let contents = std::fs::read_to_string(project_codex_dir.join(CONFIG_TOML_FILE)) + .expect("read project config"); + let parsed: ConfigToml = toml::from_str(&contents).expect("parse project config"); + let tool = parsed + .mcp_servers + .get("docs") + .and_then(|server| server.tools.get("search")) + .expect("docs/search tool config exists"); + + assert_eq!( + tool, + &McpServerToolConfig { + approval_mode: Some(AppToolApproval::Approve), + } + ); + assert!(contents.contains("[mcp_servers.docs.tools.search]")); + assert_eq!(mcp_tool_approval_is_remembered(&session, &key).await, true); +} + #[tokio::test] async fn approve_mode_skips_when_annotations_do_not_require_approval() { let (session, turn_context) = make_session_and_context().await; @@ -1078,6 +1259,75 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_for_model() { ); } +#[tokio::test] +async fn custom_approve_mode_blocks_when_arc_returns_interrupt_for_model() { + use wiremock::Mock; + use wiremock::MockServer; + use wiremock::ResponseTemplate; + use wiremock::matchers::method; + use wiremock::matchers::path; + + let server = MockServer::start().await; + Mock::given(method("POST")) + .and(path("/codex/safety/arc")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "outcome": "steer-model", + "short_reason": "needs approval", + "rationale": "high-risk action", + "risk_score": 96, + "risk_level": "critical", + "evidence": [{ + "message": "dangerous_tool", + "why": "high-risk action", + }], + }))) + .expect(1) + .mount(&server) + .await; + + let (session, mut turn_context) = make_session_and_context().await; + turn_context.auth_manager = Some(crate::test_support::auth_manager_from_auth( + crate::CodexAuth::create_dummy_chatgpt_auth_for_testing(), + )); + let mut config = (*turn_context.config).clone(); + config.chatgpt_base_url = server.uri(); + turn_context.config = Arc::new(config); + + let session = Arc::new(session); + let turn_context = Arc::new(turn_context); + let invocation = McpInvocation { + server: "docs".to_string(), + tool: "dangerous_tool".to_string(), + arguments: Some(serde_json::json!({ "id": 1 })), + }; + let metadata = McpToolApprovalMetadata { + annotations: Some(annotations(Some(false), Some(true), Some(true))), + connector_id: None, + connector_name: None, + connector_description: None, + tool_title: Some("Dangerous Tool".to_string()), + tool_description: Some("Performs a risky action.".to_string()), + codex_apps_meta: None, + }; + + let decision = maybe_request_mcp_tool_approval( + &session, + &turn_context, + "call-2-custom", + &invocation, + Some(&metadata), + AppToolApproval::Approve, + ) + .await; + + assert_eq!( + decision, + Some(McpToolApprovalDecision::BlockedBySafetyMonitor( + "Tool call was cancelled because of safety risks: high-risk action".to_string(), + )) + ); +} + #[tokio::test] async fn approve_mode_blocks_when_arc_returns_interrupt_without_annotations() { use wiremock::Mock; @@ -1148,7 +1398,7 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_without_annotations() { } #[tokio::test] -async fn full_access_auto_mode_blocks_when_arc_returns_interrupt_for_model() { +async fn full_access_mode_skips_arc_monitor_for_all_approval_modes() { use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; @@ -1169,7 +1419,7 @@ async fn full_access_auto_mode_blocks_when_arc_returns_interrupt_for_model() { "why": "high-risk action", }], }))) - .expect(1) + .expect(0) .mount(&server) .await; @@ -1206,22 +1456,23 @@ async fn full_access_auto_mode_blocks_when_arc_returns_interrupt_for_model() { codex_apps_meta: None, }; - let decision = maybe_request_mcp_tool_approval( - &session, - &turn_context, - "call-2", - &invocation, - Some(&metadata), + for approval_mode in [ AppToolApproval::Auto, - ) - .await; + AppToolApproval::Prompt, + AppToolApproval::Approve, + ] { + let decision = maybe_request_mcp_tool_approval( + &session, + &turn_context, + "call-2", + &invocation, + Some(&metadata), + approval_mode, + ) + .await; - assert_eq!( - decision, - Some(McpToolApprovalDecision::BlockedBySafetyMonitor( - "Tool call was cancelled because of safety risks: high-risk action".to_string(), - )) - ); + assert_eq!(decision, None); + } } #[tokio::test] diff --git a/codex-rs/core/src/memories/prompts.rs b/codex-rs/core/src/memories/prompts.rs index 851179426f..56cb2b7b60 100644 --- a/codex-rs/core/src/memories/prompts.rs +++ b/codex-rs/core/src/memories/prompts.rs @@ -1,37 +1,42 @@ use crate::memories::memory_root; use crate::memories::phase_one; use crate::memories::storage::rollout_summary_file_stem_from_parts; -use askama::Template; use codex_protocol::openai_models::ModelInfo; use codex_state::Phase2InputSelection; use codex_state::Stage1Output; use codex_state::Stage1OutputRef; use codex_utils_output_truncation::TruncationPolicy; use codex_utils_output_truncation::truncate_text; +use codex_utils_template::Template; use std::path::Path; +use std::sync::LazyLock; use tokio::fs; use tracing::warn; -#[derive(Template)] -#[template(path = "memories/consolidation.md", escape = "none")] -struct ConsolidationPromptTemplate<'a> { - memory_root: &'a str, - phase2_input_selection: &'a str, -} +static CONSOLIDATION_PROMPT_TEMPLATE: LazyLock