mirror of
https://github.com/openai/codex.git
synced 2026-03-20 12:56:29 +03:00
Compare commits
5 Commits
windows_ke
...
starr/exec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c191254cd | ||
|
|
bba7b95d5e | ||
|
|
b4b3ffc0fc | ||
|
|
a4358f2c4f | ||
|
|
e0a7c18424 |
2
codex-rs/Cargo.lock
generated
2
codex-rs/Cargo.lock
generated
@@ -1998,10 +1998,12 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"clap",
|
||||
"codex-app-server-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-pty",
|
||||
"futures",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
|
||||
@@ -34,7 +34,7 @@ pub(crate) struct FsApi {
|
||||
impl Default for FsApi {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
file_system: Arc::new(Environment::default().get_filesystem()),
|
||||
file_system: Environment::default().get_filesystem(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@ use crate::realtime_conversation::handle_close as handle_realtime_conversation_c
|
||||
use crate::realtime_conversation::handle_start as handle_realtime_conversation_start;
|
||||
use crate::realtime_conversation::handle_text as handle_realtime_conversation_text;
|
||||
use crate::rollout::session_index;
|
||||
use crate::skills::build_skill_injections;
|
||||
use crate::skills::render_skills_section;
|
||||
use crate::stream_events_utils::HandleOutputCtx;
|
||||
use crate::stream_events_utils::handle_non_tool_response_item;
|
||||
@@ -282,7 +283,6 @@ use crate::skills::SkillInjections;
|
||||
use crate::skills::SkillLoadOutcome;
|
||||
use crate::skills::SkillMetadata;
|
||||
use crate::skills::SkillsManager;
|
||||
use crate::skills::build_skill_injections;
|
||||
use crate::skills::collect_env_var_dependencies;
|
||||
use crate::skills::collect_explicit_skill_mentions;
|
||||
use crate::skills::injection::ToolMentionKind;
|
||||
@@ -314,6 +314,7 @@ use crate::turn_timing::TurnTimingState;
|
||||
use crate::turn_timing::record_turn_ttfm_metric;
|
||||
use crate::turn_timing::record_turn_ttft_metric;
|
||||
use crate::unified_exec::UnifiedExecProcessManager;
|
||||
use crate::unified_exec::unified_exec_session_factory_for_environment;
|
||||
use crate::util::backoff;
|
||||
use crate::windows_sandbox::WindowsSandboxLevelExt;
|
||||
use codex_async_utils::OrCancelExt;
|
||||
@@ -482,7 +483,12 @@ impl Codex {
|
||||
config.startup_warnings.push(message);
|
||||
}
|
||||
|
||||
let user_instructions = get_user_instructions(&config).await;
|
||||
let environment = Arc::new(
|
||||
Environment::create(config.experimental_exec_server_url.clone())
|
||||
.await
|
||||
.map_err(|err| CodexErr::Fatal(format!("failed to create environment: {err}")))?,
|
||||
);
|
||||
let user_instructions = get_user_instructions(&config, environment.filesystem()).await;
|
||||
|
||||
let exec_policy = if crate::guardian::is_guardian_reviewer_source(&session_source) {
|
||||
// Guardian review should rely on the built-in shell safety checks,
|
||||
@@ -1773,6 +1779,9 @@ impl Session {
|
||||
});
|
||||
}
|
||||
|
||||
let unified_exec_session_factory =
|
||||
unified_exec_session_factory_for_environment(environment.as_ref());
|
||||
|
||||
let services = SessionServices {
|
||||
// Initialize the MCP connection manager with an uninitialized
|
||||
// instance. It will be replaced with one created via
|
||||
@@ -1785,8 +1794,9 @@ impl Session {
|
||||
&config.permissions.approval_policy,
|
||||
))),
|
||||
mcp_startup_cancellation_token: Mutex::new(CancellationToken::new()),
|
||||
unified_exec_manager: UnifiedExecProcessManager::new(
|
||||
unified_exec_manager: UnifiedExecProcessManager::with_session_factory(
|
||||
config.background_terminal_max_timeout,
|
||||
unified_exec_session_factory,
|
||||
),
|
||||
shell_zsh_path: config.zsh_path.clone(),
|
||||
main_execve_wrapper_exe: config.main_execve_wrapper_exe.clone(),
|
||||
@@ -1826,9 +1836,7 @@ impl Session {
|
||||
code_mode_service: crate::tools::code_mode::CodeModeService::new(
|
||||
config.js_repl_node_path.clone(),
|
||||
),
|
||||
environment: Arc::new(
|
||||
Environment::create(config.experimental_exec_server_url.clone()).await?,
|
||||
),
|
||||
environment,
|
||||
};
|
||||
let js_repl = Arc::new(JsReplHandle::with_node_path(
|
||||
config.js_repl_node_path.clone(),
|
||||
@@ -5472,6 +5480,7 @@ pub(crate) async fn run_turn(
|
||||
Some(&session_telemetry),
|
||||
&sess.services.analytics_events_client,
|
||||
tracking.clone(),
|
||||
turn_context.environment.get_filesystem(),
|
||||
)
|
||||
.await;
|
||||
|
||||
|
||||
@@ -22,9 +22,11 @@ use crate::config_loader::merge_toml_values;
|
||||
use crate::config_loader::project_root_markers_from_config;
|
||||
use crate::features::Feature;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_exec_server::ExecutorFileSystem;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use dunce::canonicalize as normalize_path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use std::sync::Arc;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::error;
|
||||
|
||||
@@ -76,9 +78,11 @@ fn render_js_repl_instructions(config: &Config) -> Option<String> {
|
||||
|
||||
/// Combines `Config::instructions` and `AGENTS.md` (if present) into a single
|
||||
/// string of instructions.
|
||||
pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
|
||||
let project_docs = read_project_docs(config).await;
|
||||
|
||||
pub(crate) async fn get_user_instructions(
|
||||
config: &Config,
|
||||
filesystem: Arc<dyn ExecutorFileSystem>,
|
||||
) -> Option<String> {
|
||||
let project_docs = read_project_docs(config, filesystem).await;
|
||||
let mut output = String::new();
|
||||
|
||||
if let Some(instructions) = config.user_instructions.clone() {
|
||||
@@ -125,7 +129,17 @@ pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
|
||||
/// concatenation of all discovered docs. If no documentation file is found the
|
||||
/// function returns `Ok(None)`. Unexpected I/O failures bubble up as `Err` so
|
||||
/// callers can decide how to handle them.
|
||||
pub async fn read_project_docs(config: &Config) -> std::io::Result<Option<String>> {
|
||||
pub async fn read_project_docs(
|
||||
config: &Config,
|
||||
filesystem: Arc<dyn ExecutorFileSystem>,
|
||||
) -> std::io::Result<Option<String>> {
|
||||
read_project_docs_with_filesystem(config, &filesystem).await
|
||||
}
|
||||
|
||||
async fn read_project_docs_with_filesystem(
|
||||
config: &Config,
|
||||
filesystem: &Arc<dyn ExecutorFileSystem>,
|
||||
) -> std::io::Result<Option<String>> {
|
||||
let max_total = config.project_doc_max_bytes;
|
||||
|
||||
if max_total == 0 {
|
||||
@@ -145,18 +159,28 @@ pub async fn read_project_docs(config: &Config) -> std::io::Result<Option<String
|
||||
break;
|
||||
}
|
||||
|
||||
let file = match tokio::fs::File::open(&p).await {
|
||||
Ok(f) => f,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
let path = match AbsolutePathBuf::try_from(p.as_path()) {
|
||||
Ok(path) => path,
|
||||
Err(err) => {
|
||||
error!("skipping non-absolute AGENTS path `{}`: {err}", p.display());
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let size = file.metadata().await?.len();
|
||||
let mut reader = tokio::io::BufReader::new(file).take(remaining);
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
reader.read_to_end(&mut data).await?;
|
||||
let data = match filesystem.read_file(&path).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.0.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(err) => return Err(err.0),
|
||||
};
|
||||
|
||||
if size > remaining {
|
||||
let truncated = data.len() > remaining as usize;
|
||||
let truncated_data = if truncated {
|
||||
data[..remaining as usize].to_vec()
|
||||
} else {
|
||||
data
|
||||
};
|
||||
|
||||
if truncated {
|
||||
tracing::warn!(
|
||||
"Project doc `{}` exceeds remaining budget ({} bytes) - truncating.",
|
||||
p.display(),
|
||||
@@ -164,10 +188,10 @@ pub async fn read_project_docs(config: &Config) -> std::io::Result<Option<String
|
||||
);
|
||||
}
|
||||
|
||||
let text = String::from_utf8_lossy(&data).to_string();
|
||||
let text = String::from_utf8_lossy(&truncated_data).to_string();
|
||||
if !text.trim().is_empty() {
|
||||
parts.push(text);
|
||||
remaining = remaining.saturating_sub(data.len() as u64);
|
||||
remaining = remaining.saturating_sub(truncated_data.len() as u64);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
use super::*;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::features::Feature;
|
||||
use codex_exec_server::Environment;
|
||||
use codex_exec_server::ExecutorFileSystem;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn test_filesystem() -> Arc<dyn ExecutorFileSystem> {
|
||||
Environment::default().filesystem()
|
||||
}
|
||||
|
||||
/// Helper that returns a `Config` pointing at `root` and using `limit` as
|
||||
/// the maximum number of bytes to embed from AGENTS.md. The caller can
|
||||
/// optionally specify a custom `instructions` string – when `None` the
|
||||
@@ -73,7 +80,7 @@ async fn make_config_with_project_root_markers(
|
||||
async fn no_doc_file_returns_none() {
|
||||
let tmp = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
let res = get_user_instructions(&make_config(&tmp, 4096, None).await).await;
|
||||
let res = get_user_instructions(&make_config(&tmp, 4096, None).await, test_filesystem()).await;
|
||||
assert!(
|
||||
res.is_none(),
|
||||
"Expected None when AGENTS.md is absent and no system instructions provided"
|
||||
@@ -87,7 +94,7 @@ async fn doc_smaller_than_limit_is_returned() {
|
||||
let tmp = tempfile::tempdir().expect("tempdir");
|
||||
fs::write(tmp.path().join("AGENTS.md"), "hello world").unwrap();
|
||||
|
||||
let res = get_user_instructions(&make_config(&tmp, 4096, None).await)
|
||||
let res = get_user_instructions(&make_config(&tmp, 4096, None).await, test_filesystem())
|
||||
.await
|
||||
.expect("doc expected");
|
||||
|
||||
@@ -106,7 +113,7 @@ async fn doc_larger_than_limit_is_truncated() {
|
||||
let huge = "A".repeat(LIMIT * 2); // 2 KiB
|
||||
fs::write(tmp.path().join("AGENTS.md"), &huge).unwrap();
|
||||
|
||||
let res = get_user_instructions(&make_config(&tmp, LIMIT, None).await)
|
||||
let res = get_user_instructions(&make_config(&tmp, LIMIT, None).await, test_filesystem())
|
||||
.await
|
||||
.expect("doc expected");
|
||||
|
||||
@@ -138,7 +145,9 @@ async fn finds_doc_in_repo_root() {
|
||||
let mut cfg = make_config(&repo, 4096, None).await;
|
||||
cfg.cwd = nested;
|
||||
|
||||
let res = get_user_instructions(&cfg).await.expect("doc expected");
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("doc expected");
|
||||
assert_eq!(res, "root level doc");
|
||||
}
|
||||
|
||||
@@ -148,7 +157,7 @@ async fn zero_byte_limit_disables_docs() {
|
||||
let tmp = tempfile::tempdir().expect("tempdir");
|
||||
fs::write(tmp.path().join("AGENTS.md"), "something").unwrap();
|
||||
|
||||
let res = get_user_instructions(&make_config(&tmp, 0, None).await).await;
|
||||
let res = get_user_instructions(&make_config(&tmp, 0, None).await, test_filesystem()).await;
|
||||
assert!(
|
||||
res.is_none(),
|
||||
"With limit 0 the function should return None"
|
||||
@@ -163,7 +172,7 @@ async fn js_repl_instructions_are_appended_when_enabled() {
|
||||
.enable(Feature::JsRepl)
|
||||
.expect("test config should allow js_repl");
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("js_repl instructions expected");
|
||||
let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`.";
|
||||
@@ -182,7 +191,7 @@ async fn js_repl_tools_only_instructions_are_feature_gated() {
|
||||
.set(features)
|
||||
.expect("test config should allow js_repl tool restrictions");
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("js_repl instructions expected");
|
||||
let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Do not call tools directly; use `js_repl` + `codex.tool(...)` for all tool calls, including shell commands.\n- MCP tools (if any) can also be called by name via `codex.tool(...)`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`.";
|
||||
@@ -201,7 +210,7 @@ async fn js_repl_image_detail_original_does_not_change_instructions() {
|
||||
.set(features)
|
||||
.expect("test config should allow js_repl image detail settings");
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("js_repl instructions expected");
|
||||
let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`.";
|
||||
@@ -217,9 +226,12 @@ async fn merges_existing_instructions_with_project_doc() {
|
||||
|
||||
const INSTRUCTIONS: &str = "base instructions";
|
||||
|
||||
let res = get_user_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS)).await)
|
||||
.await
|
||||
.expect("should produce a combined instruction string");
|
||||
let res = get_user_instructions(
|
||||
&make_config(&tmp, 4096, Some(INSTRUCTIONS)).await,
|
||||
test_filesystem(),
|
||||
)
|
||||
.await
|
||||
.expect("should produce a combined instruction string");
|
||||
|
||||
let expected = format!("{INSTRUCTIONS}{PROJECT_DOC_SEPARATOR}{}", "proj doc");
|
||||
|
||||
@@ -234,7 +246,11 @@ async fn keeps_existing_instructions_when_doc_missing() {
|
||||
|
||||
const INSTRUCTIONS: &str = "some instructions";
|
||||
|
||||
let res = get_user_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS)).await).await;
|
||||
let res = get_user_instructions(
|
||||
&make_config(&tmp, 4096, Some(INSTRUCTIONS)).await,
|
||||
test_filesystem(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(res, Some(INSTRUCTIONS.to_string()));
|
||||
}
|
||||
@@ -263,7 +279,9 @@ async fn concatenates_root_and_cwd_docs() {
|
||||
let mut cfg = make_config(&repo, 4096, None).await;
|
||||
cfg.cwd = nested;
|
||||
|
||||
let res = get_user_instructions(&cfg).await.expect("doc expected");
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("doc expected");
|
||||
assert_eq!(res, "root doc\n\ncrate doc");
|
||||
}
|
||||
|
||||
@@ -289,7 +307,9 @@ async fn project_root_markers_are_honored_for_agents_discovery() {
|
||||
assert_eq!(discovery[0], expected_parent);
|
||||
assert_eq!(discovery[1], expected_child);
|
||||
|
||||
let res = get_user_instructions(&cfg).await.expect("doc expected");
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("doc expected");
|
||||
assert_eq!(res, "parent doc\n\nchild doc");
|
||||
}
|
||||
|
||||
@@ -302,7 +322,7 @@ async fn agents_local_md_preferred() {
|
||||
|
||||
let cfg = make_config(&tmp, 4096, None).await;
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("local doc expected");
|
||||
|
||||
@@ -324,7 +344,7 @@ async fn uses_configured_fallback_when_agents_missing() {
|
||||
|
||||
let cfg = make_config_with_fallback(&tmp, 4096, None, &["EXAMPLE.md"]).await;
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("fallback doc expected");
|
||||
|
||||
@@ -340,7 +360,7 @@ async fn agents_md_preferred_over_fallbacks() {
|
||||
|
||||
let cfg = make_config_with_fallback(&tmp, 4096, None, &["EXAMPLE.md", ".example.md"]).await;
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("AGENTS.md should win");
|
||||
|
||||
@@ -369,7 +389,7 @@ async fn skills_are_not_appended_to_project_doc() {
|
||||
"extract from pdfs",
|
||||
);
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("instructions expected");
|
||||
assert_eq!(res, "base doc");
|
||||
@@ -383,7 +403,7 @@ async fn apps_feature_does_not_emit_user_instructions_by_itself() {
|
||||
.enable(Feature::Apps)
|
||||
.expect("test config should allow apps");
|
||||
|
||||
let res = get_user_instructions(&cfg).await;
|
||||
let res = get_user_instructions(&cfg, test_filesystem()).await;
|
||||
assert_eq!(res, None);
|
||||
}
|
||||
|
||||
@@ -397,7 +417,7 @@ async fn apps_feature_does_not_append_to_project_doc_user_instructions() {
|
||||
.enable(Feature::Apps)
|
||||
.expect("test config should allow apps");
|
||||
|
||||
let res = get_user_instructions(&cfg)
|
||||
let res = get_user_instructions(&cfg, test_filesystem())
|
||||
.await
|
||||
.expect("instructions expected");
|
||||
assert_eq!(res, "base doc");
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::analytics_client::AnalyticsEventsClient;
|
||||
use crate::analytics_client::InvocationType;
|
||||
@@ -10,10 +13,34 @@ use crate::instructions::SkillInstructions;
|
||||
use crate::mention_syntax::TOOL_MENTION_SIGIL;
|
||||
use crate::mentions::build_skill_name_counts;
|
||||
use crate::skills::SkillMetadata;
|
||||
use codex_exec_server::ExecutorFileSystem;
|
||||
use codex_otel::SessionTelemetry;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use tokio::fs;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
|
||||
fn absolute_path(path: &Path) -> Result<AbsolutePathBuf, std::io::Error> {
|
||||
AbsolutePathBuf::try_from(path).map_err(|error| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("invalid skill path: {error}"),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_skill_contents(
|
||||
path: &Path,
|
||||
filesystem: &Arc<dyn ExecutorFileSystem>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let abs_path = absolute_path(path)?;
|
||||
let contents = filesystem.read_file(&abs_path).await.map_err(|err| err.0)?;
|
||||
String::from_utf8(contents).map_err(|error| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("failed to decode skill file {}: {error}", path.display()),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct SkillInjections {
|
||||
@@ -26,6 +53,7 @@ pub(crate) async fn build_skill_injections(
|
||||
otel: Option<&SessionTelemetry>,
|
||||
analytics_client: &AnalyticsEventsClient,
|
||||
tracking: TrackEventsContext,
|
||||
filesystem: Arc<dyn ExecutorFileSystem>,
|
||||
) -> SkillInjections {
|
||||
if mentioned_skills.is_empty() {
|
||||
return SkillInjections::default();
|
||||
@@ -38,7 +66,7 @@ pub(crate) async fn build_skill_injections(
|
||||
let mut invocations = Vec::new();
|
||||
|
||||
for skill in mentioned_skills {
|
||||
match fs::read_to_string(&skill.path_to_skills_md).await {
|
||||
match read_skill_contents(&skill.path_to_skills_md, &filesystem).await {
|
||||
Ok(contents) => {
|
||||
emit_skill_injected_metric(otel, skill, "ok");
|
||||
invocations.push(SkillInvocation {
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::collections::VecDeque;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_string::take_bytes_at_char_boundary;
|
||||
use serde::Deserialize;
|
||||
|
||||
@@ -100,7 +101,7 @@ impl ToolHandler for ReadFileHandler {
|
||||
}
|
||||
|
||||
async fn handle(&self, invocation: ToolInvocation) -> Result<Self::Output, FunctionCallError> {
|
||||
let ToolInvocation { payload, .. } = invocation;
|
||||
let ToolInvocation { payload, turn, .. } = invocation;
|
||||
|
||||
let arguments = match payload {
|
||||
ToolPayload::Function { arguments } => arguments,
|
||||
@@ -140,11 +141,23 @@ impl ToolHandler for ReadFileHandler {
|
||||
));
|
||||
}
|
||||
|
||||
let abs_path = AbsolutePathBuf::try_from(path.clone()).map_err(|error| {
|
||||
FunctionCallError::RespondToModel(format!("invalid absolute file path: {error}"))
|
||||
})?;
|
||||
let contents = turn
|
||||
.environment
|
||||
.get_filesystem()
|
||||
.read_file(&abs_path)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("failed to read file: {err}"))
|
||||
})?;
|
||||
|
||||
let collected = match mode {
|
||||
ReadMode::Slice => slice::read(&path, offset, limit).await?,
|
||||
ReadMode::Slice => slice::read(&contents, offset, limit)?,
|
||||
ReadMode::Indentation => {
|
||||
let indentation = indentation.unwrap_or_default();
|
||||
indentation::read_block(&path, offset, limit, indentation).await?
|
||||
indentation::read_block(&contents, offset, limit, indentation)?
|
||||
}
|
||||
};
|
||||
Ok(FunctionToolOutput::from_text(
|
||||
@@ -157,42 +170,21 @@ impl ToolHandler for ReadFileHandler {
|
||||
mod slice {
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::handlers::read_file::format_line;
|
||||
use std::path::Path;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::BufReader;
|
||||
use crate::tools::handlers::read_file::strip_trailing_cr;
|
||||
|
||||
pub async fn read(
|
||||
path: &Path,
|
||||
pub fn read(
|
||||
contents: &[u8],
|
||||
offset: usize,
|
||||
limit: usize,
|
||||
) -> Result<Vec<String>, FunctionCallError> {
|
||||
let file = File::open(path).await.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("failed to read file: {err}"))
|
||||
})?;
|
||||
|
||||
let mut reader = BufReader::new(file);
|
||||
if contents.is_empty() {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"offset exceeds file length".to_string(),
|
||||
));
|
||||
}
|
||||
let mut collected = Vec::new();
|
||||
let mut seen = 0usize;
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
loop {
|
||||
buffer.clear();
|
||||
let bytes_read = reader.read_until(b'\n', &mut buffer).await.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("failed to read file: {err}"))
|
||||
})?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
if buffer.last() == Some(&b'\n') {
|
||||
buffer.pop();
|
||||
if buffer.last() == Some(&b'\r') {
|
||||
buffer.pop();
|
||||
}
|
||||
}
|
||||
|
||||
for buffer in contents.split(|byte| *byte == b'\n') {
|
||||
seen += 1;
|
||||
|
||||
if seen < offset {
|
||||
@@ -203,7 +195,9 @@ mod slice {
|
||||
break;
|
||||
}
|
||||
|
||||
let formatted = format_line(&buffer);
|
||||
let line = strip_trailing_cr(buffer);
|
||||
|
||||
let formatted = format_line(line);
|
||||
collected.push(format!("L{seen}: {formatted}"));
|
||||
|
||||
if collected.len() == limit {
|
||||
@@ -227,15 +221,12 @@ mod indentation {
|
||||
use crate::tools::handlers::read_file::LineRecord;
|
||||
use crate::tools::handlers::read_file::TAB_WIDTH;
|
||||
use crate::tools::handlers::read_file::format_line;
|
||||
use crate::tools::handlers::read_file::strip_trailing_cr;
|
||||
use crate::tools::handlers::read_file::trim_empty_lines;
|
||||
use std::collections::VecDeque;
|
||||
use std::path::Path;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
pub async fn read_block(
|
||||
path: &Path,
|
||||
pub fn read_block(
|
||||
contents: &[u8],
|
||||
offset: usize,
|
||||
limit: usize,
|
||||
options: IndentationArgs,
|
||||
@@ -254,7 +245,7 @@ mod indentation {
|
||||
));
|
||||
}
|
||||
|
||||
let collected = collect_file_lines(path).await?;
|
||||
let collected = collect_file_lines(contents);
|
||||
if collected.is_empty() || anchor_line > collected.len() {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"anchor_line exceeds file length".to_string(),
|
||||
@@ -366,37 +357,17 @@ mod indentation {
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn collect_file_lines(path: &Path) -> Result<Vec<LineRecord>, FunctionCallError> {
|
||||
let file = File::open(path).await.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("failed to read file: {err}"))
|
||||
})?;
|
||||
|
||||
let mut reader = BufReader::new(file);
|
||||
let mut buffer = Vec::new();
|
||||
fn collect_file_lines(contents: &[u8]) -> Vec<LineRecord> {
|
||||
if contents.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
let mut lines = Vec::new();
|
||||
let mut number = 0usize;
|
||||
|
||||
loop {
|
||||
buffer.clear();
|
||||
let bytes_read = reader.read_until(b'\n', &mut buffer).await.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("failed to read file: {err}"))
|
||||
})?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
if buffer.last() == Some(&b'\n') {
|
||||
buffer.pop();
|
||||
if buffer.last() == Some(&b'\r') {
|
||||
buffer.pop();
|
||||
}
|
||||
}
|
||||
|
||||
number += 1;
|
||||
let raw = String::from_utf8_lossy(&buffer).into_owned();
|
||||
for (index, line_bytes) in contents.split(|byte| *byte == b'\n').enumerate() {
|
||||
let number = index + 1;
|
||||
let line_bytes = strip_trailing_cr(line_bytes);
|
||||
let raw = String::from_utf8_lossy(line_bytes).into_owned();
|
||||
let indent = measure_indent(&raw);
|
||||
let display = format_line(&buffer);
|
||||
let display = format_line(line_bytes);
|
||||
lines.push(LineRecord {
|
||||
number,
|
||||
raw,
|
||||
@@ -404,8 +375,7 @@ mod indentation {
|
||||
indent,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(lines)
|
||||
lines
|
||||
}
|
||||
|
||||
fn compute_effective_indents(records: &[LineRecord]) -> Vec<usize> {
|
||||
@@ -430,6 +400,10 @@ mod indentation {
|
||||
}
|
||||
}
|
||||
|
||||
fn strip_trailing_cr(bytes: &[u8]) -> &[u8] {
|
||||
bytes.strip_suffix(b"\r").unwrap_or(bytes)
|
||||
}
|
||||
|
||||
fn format_line(bytes: &[u8]) -> String {
|
||||
let decoded = String::from_utf8_lossy(bytes);
|
||||
if decoded.len() > MAX_LINE_LENGTH {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use async_trait::async_trait;
|
||||
use codex_exec_server::ExecutorFileSystem;
|
||||
use codex_protocol::models::FunctionCallOutputBody;
|
||||
use codex_protocol::models::FunctionCallOutputContentItem;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
@@ -46,6 +46,7 @@ use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct UnifiedExecRequest {
|
||||
pub process_id: i32,
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub env: HashMap<String, String>,
|
||||
@@ -239,6 +240,7 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecProcess> for UnifiedExecRunt
|
||||
return self
|
||||
.manager
|
||||
.open_session_with_exec_env(
|
||||
req.process_id,
|
||||
&prepared.exec_request,
|
||||
req.tty,
|
||||
prepared.spawn_lifecycle,
|
||||
@@ -275,7 +277,12 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecProcess> for UnifiedExecRunt
|
||||
.env_for(spec, req.network.as_ref())
|
||||
.map_err(|err| ToolError::Codex(err.into()))?;
|
||||
self.manager
|
||||
.open_session_with_exec_env(&exec_env, req.tty, Box::new(NoopSpawnLifecycle))
|
||||
.open_session_with_exec_env(
|
||||
req.process_id,
|
||||
&exec_env,
|
||||
req.tty,
|
||||
Box::new(NoopSpawnLifecycle),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
UnifiedExecError::SandboxDenied { output, .. } => {
|
||||
|
||||
148
codex-rs/core/src/unified_exec/backend.rs
Normal file
148
codex-rs/core/src/unified_exec/backend.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use codex_exec_server::Environment;
|
||||
use codex_exec_server::ExecServerClient;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::exec::SandboxType;
|
||||
use crate::sandboxing::ExecRequest;
|
||||
use crate::unified_exec::SpawnLifecycleHandle;
|
||||
use crate::unified_exec::UnifiedExecError;
|
||||
use crate::unified_exec::UnifiedExecProcess;
|
||||
|
||||
pub(crate) type UnifiedExecSessionFactoryHandle = Arc<dyn UnifiedExecSessionFactory>;
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait UnifiedExecSessionFactory: std::fmt::Debug + Send + Sync {
|
||||
async fn open_session(
|
||||
&self,
|
||||
process_id: i32,
|
||||
env: &ExecRequest,
|
||||
tty: bool,
|
||||
spawn_lifecycle: SpawnLifecycleHandle,
|
||||
) -> Result<UnifiedExecProcess, UnifiedExecError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct LocalUnifiedExecSessionFactory;
|
||||
|
||||
pub(crate) fn local_unified_exec_session_factory() -> UnifiedExecSessionFactoryHandle {
|
||||
Arc::new(LocalUnifiedExecSessionFactory)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UnifiedExecSessionFactory for LocalUnifiedExecSessionFactory {
|
||||
async fn open_session(
|
||||
&self,
|
||||
_process_id: i32,
|
||||
env: &ExecRequest,
|
||||
tty: bool,
|
||||
spawn_lifecycle: SpawnLifecycleHandle,
|
||||
) -> Result<UnifiedExecProcess, UnifiedExecError> {
|
||||
open_local_session(env, tty, spawn_lifecycle).await
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ExecServerUnifiedExecSessionFactory {
|
||||
client: ExecServerClient,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ExecServerUnifiedExecSessionFactory {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("ExecServerUnifiedExecSessionFactory")
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecServerUnifiedExecSessionFactory {
|
||||
pub(crate) fn from_client(client: ExecServerClient) -> UnifiedExecSessionFactoryHandle {
|
||||
Arc::new(Self { client })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UnifiedExecSessionFactory for ExecServerUnifiedExecSessionFactory {
|
||||
async fn open_session(
|
||||
&self,
|
||||
process_id: i32,
|
||||
env: &ExecRequest,
|
||||
tty: bool,
|
||||
spawn_lifecycle: SpawnLifecycleHandle,
|
||||
) -> Result<UnifiedExecProcess, UnifiedExecError> {
|
||||
let inherited_fds = spawn_lifecycle.inherited_fds();
|
||||
if !inherited_fds.is_empty() {
|
||||
debug!(
|
||||
process_id,
|
||||
inherited_fd_count = inherited_fds.len(),
|
||||
"falling back to local unified-exec backend because exec-server does not support inherited fds",
|
||||
);
|
||||
return open_local_session(env, tty, spawn_lifecycle).await;
|
||||
}
|
||||
|
||||
if env.sandbox == SandboxType::WindowsRestrictedToken {
|
||||
debug!(
|
||||
process_id,
|
||||
"falling back to local unified-exec backend because Windows restricted-token execution is not modeled by exec-server",
|
||||
);
|
||||
return open_local_session(env, tty, spawn_lifecycle).await;
|
||||
}
|
||||
|
||||
UnifiedExecProcess::from_exec_server(
|
||||
self.client.clone(),
|
||||
process_id,
|
||||
env,
|
||||
tty,
|
||||
spawn_lifecycle,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn unified_exec_session_factory_for_environment(
|
||||
environment: &Environment,
|
||||
) -> UnifiedExecSessionFactoryHandle {
|
||||
if let Some(client) = environment.exec_server_client() {
|
||||
ExecServerUnifiedExecSessionFactory::from_client(client)
|
||||
} else {
|
||||
local_unified_exec_session_factory()
|
||||
}
|
||||
}
|
||||
|
||||
async fn open_local_session(
|
||||
env: &ExecRequest,
|
||||
tty: bool,
|
||||
mut spawn_lifecycle: SpawnLifecycleHandle,
|
||||
) -> Result<UnifiedExecProcess, UnifiedExecError> {
|
||||
let (program, args) = env
|
||||
.command
|
||||
.split_first()
|
||||
.ok_or(UnifiedExecError::MissingCommandLine)?;
|
||||
let inherited_fds = spawn_lifecycle.inherited_fds();
|
||||
|
||||
let spawn_result = if tty {
|
||||
codex_utils_pty::pty::spawn_process_with_inherited_fds(
|
||||
program,
|
||||
args,
|
||||
env.cwd.as_path(),
|
||||
&env.env,
|
||||
&env.arg0,
|
||||
codex_utils_pty::TerminalSize::default(),
|
||||
&inherited_fds,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
codex_utils_pty::pipe::spawn_process_no_stdin_with_inherited_fds(
|
||||
program,
|
||||
args,
|
||||
env.cwd.as_path(),
|
||||
&env.env,
|
||||
&env.arg0,
|
||||
&inherited_fds,
|
||||
)
|
||||
.await
|
||||
};
|
||||
let spawned = spawn_result.map_err(|err| UnifiedExecError::create_process(err.to_string()))?;
|
||||
spawn_lifecycle.after_spawn();
|
||||
UnifiedExecProcess::from_spawned(spawned, env.sandbox, spawn_lifecycle).await
|
||||
}
|
||||
@@ -38,6 +38,7 @@ use crate::codex::TurnContext;
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
|
||||
mod async_watcher;
|
||||
mod backend;
|
||||
mod errors;
|
||||
mod head_tail_buffer;
|
||||
mod process;
|
||||
@@ -47,6 +48,9 @@ pub(crate) fn set_deterministic_process_ids_for_tests(enabled: bool) {
|
||||
process_manager::set_deterministic_process_ids_for_tests(enabled);
|
||||
}
|
||||
|
||||
pub(crate) use backend::UnifiedExecSessionFactoryHandle;
|
||||
pub(crate) use backend::local_unified_exec_session_factory;
|
||||
pub(crate) use backend::unified_exec_session_factory_for_environment;
|
||||
pub(crate) use errors::UnifiedExecError;
|
||||
pub(crate) use process::NoopSpawnLifecycle;
|
||||
#[cfg(unix)]
|
||||
@@ -123,14 +127,26 @@ impl ProcessStore {
|
||||
pub(crate) struct UnifiedExecProcessManager {
|
||||
process_store: Mutex<ProcessStore>,
|
||||
max_write_stdin_yield_time_ms: u64,
|
||||
session_factory: UnifiedExecSessionFactoryHandle,
|
||||
}
|
||||
|
||||
impl UnifiedExecProcessManager {
|
||||
pub(crate) fn new(max_write_stdin_yield_time_ms: u64) -> Self {
|
||||
Self::with_session_factory(
|
||||
max_write_stdin_yield_time_ms,
|
||||
local_unified_exec_session_factory(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn with_session_factory(
|
||||
max_write_stdin_yield_time_ms: u64,
|
||||
session_factory: UnifiedExecSessionFactoryHandle,
|
||||
) -> Self {
|
||||
Self {
|
||||
process_store: Mutex::new(ProcessStore::default()),
|
||||
max_write_stdin_yield_time_ms: max_write_stdin_yield_time_ms
|
||||
.max(MIN_EMPTY_YIELD_TIME_MS),
|
||||
session_factory,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,15 +3,31 @@ use super::*;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::codex::make_session_and_context;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::tools::context::ExecCommandToolOutput;
|
||||
use crate::unified_exec::ExecCommandRequest;
|
||||
use crate::unified_exec::WriteStdinRequest;
|
||||
use core_test_support::skip_if_sandbox;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::Duration;
|
||||
|
||||
fn test_exec_request(command: Vec<String>, cwd: &Path) -> crate::sandboxing::ExecRequest {
|
||||
crate::sandboxing::ExecRequest {
|
||||
command,
|
||||
cwd: cwd.to_path_buf(),
|
||||
env: std::collections::HashMap::new(),
|
||||
arg0: None,
|
||||
timeout: None,
|
||||
user: None,
|
||||
sandbox: crate::exec::SandboxType::None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn test_session_and_turn() -> (Arc<Session>, Arc<TurnContext>) {
|
||||
let (session, mut turn) = make_session_and_context().await;
|
||||
turn.approval_policy
|
||||
@@ -233,6 +249,57 @@ async fn unified_exec_timeouts() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_can_use_remote_exec_server_environment() -> anyhow::Result<()> {
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
let cwd = TempDir::new()?;
|
||||
let config = ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.harness_overrides(ConfigOverrides {
|
||||
cwd: Some(cwd.path().to_path_buf()),
|
||||
..Default::default()
|
||||
})
|
||||
.build()
|
||||
.await?;
|
||||
let client = codex_exec_server::ExecServerClient::connect_in_process(
|
||||
codex_exec_server::ExecServerClientConnectOptions::default(),
|
||||
)
|
||||
.await?;
|
||||
let environment = codex_exec_server::Environment::from_exec_server_client(client);
|
||||
let session_factory = unified_exec_session_factory_for_environment(&environment);
|
||||
let manager = UnifiedExecProcessManager::with_session_factory(
|
||||
DEFAULT_MAX_BACKGROUND_TERMINAL_TIMEOUT_MS,
|
||||
session_factory,
|
||||
);
|
||||
let process = manager
|
||||
.open_session_with_exec_env(
|
||||
1000,
|
||||
&test_exec_request(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-c".to_string(),
|
||||
"printf unified_exec_remote_exec_server_environment_marker".to_string(),
|
||||
],
|
||||
cwd.path(),
|
||||
),
|
||||
false,
|
||||
Box::new(NoopSpawnLifecycle),
|
||||
)
|
||||
.await?;
|
||||
let mut output_rx = process.output_receiver();
|
||||
let chunk = tokio::time::timeout(Duration::from_secs(5), output_rx.recv()).await??;
|
||||
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(&chunk),
|
||||
"unified_exec_remote_exec_server_environment_marker"
|
||||
);
|
||||
|
||||
process.terminate();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_pause_blocks_yield_timeout() -> anyhow::Result<()> {
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -16,8 +17,12 @@ use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::SandboxType;
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec::is_likely_sandbox_denied;
|
||||
use crate::sandboxing::ExecRequest;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::formatted_truncate_text;
|
||||
use codex_exec_server::ExecParams;
|
||||
use codex_exec_server::ExecServerClient;
|
||||
use codex_exec_server::ExecServerEvent;
|
||||
use codex_utils_pty::ExecCommandSession;
|
||||
use codex_utils_pty::SpawnedPty;
|
||||
|
||||
@@ -56,7 +61,7 @@ pub(crate) struct OutputHandles {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UnifiedExecProcess {
|
||||
process_handle: ExecCommandSession,
|
||||
process_handle: ProcessBackend,
|
||||
output_rx: broadcast::Receiver<Vec<u8>>,
|
||||
output_buffer: OutputBuffer,
|
||||
output_notify: Arc<Notify>,
|
||||
@@ -69,9 +74,45 @@ pub(crate) struct UnifiedExecProcess {
|
||||
_spawn_lifecycle: SpawnLifecycleHandle,
|
||||
}
|
||||
|
||||
enum ProcessBackend {
|
||||
Local(ExecCommandSession),
|
||||
Remote(RemoteExecSession),
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ProcessBackend {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Local(process_handle) => f.debug_tuple("Local").field(process_handle).finish(),
|
||||
Self::Remote(process_handle) => f.debug_tuple("Remote").field(process_handle).finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct RemoteExecSession {
|
||||
process_key: String,
|
||||
client: ExecServerClient,
|
||||
writer_tx: mpsc::Sender<Vec<u8>>,
|
||||
exited: Arc<AtomicBool>,
|
||||
exit_code: Arc<StdMutex<Option<i32>>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for RemoteExecSession {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("RemoteExecSession")
|
||||
.field("process_key", &self.process_key)
|
||||
.field("exited", &self.exited.load(Ordering::SeqCst))
|
||||
.field(
|
||||
"exit_code",
|
||||
&self.exit_code.lock().ok().and_then(|guard| *guard),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl UnifiedExecProcess {
|
||||
pub(super) fn new(
|
||||
process_handle: ExecCommandSession,
|
||||
fn new(
|
||||
process_handle: ProcessBackend,
|
||||
initial_output_rx: tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
sandbox_type: SandboxType,
|
||||
spawn_lifecycle: SpawnLifecycleHandle,
|
||||
@@ -123,7 +164,10 @@ impl UnifiedExecProcess {
|
||||
}
|
||||
|
||||
pub(super) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
|
||||
self.process_handle.writer_sender()
|
||||
match &self.process_handle {
|
||||
ProcessBackend::Local(process_handle) => process_handle.writer_sender(),
|
||||
ProcessBackend::Remote(process_handle) => process_handle.writer_tx.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn output_handles(&self) -> OutputHandles {
|
||||
@@ -149,17 +193,38 @@ impl UnifiedExecProcess {
|
||||
}
|
||||
|
||||
pub(super) fn has_exited(&self) -> bool {
|
||||
self.process_handle.has_exited()
|
||||
match &self.process_handle {
|
||||
ProcessBackend::Local(process_handle) => process_handle.has_exited(),
|
||||
ProcessBackend::Remote(process_handle) => process_handle.exited.load(Ordering::SeqCst),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn exit_code(&self) -> Option<i32> {
|
||||
self.process_handle.exit_code()
|
||||
match &self.process_handle {
|
||||
ProcessBackend::Local(process_handle) => process_handle.exit_code(),
|
||||
ProcessBackend::Remote(process_handle) => process_handle
|
||||
.exit_code
|
||||
.lock()
|
||||
.ok()
|
||||
.and_then(|guard| *guard),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn terminate(&self) {
|
||||
self.output_closed.store(true, Ordering::Release);
|
||||
self.output_closed_notify.notify_waiters();
|
||||
self.process_handle.terminate();
|
||||
match &self.process_handle {
|
||||
ProcessBackend::Local(process_handle) => process_handle.terminate(),
|
||||
ProcessBackend::Remote(process_handle) => {
|
||||
let client = process_handle.client.clone();
|
||||
let process_key = process_handle.process_key.clone();
|
||||
if let Ok(handle) = tokio::runtime::Handle::try_current() {
|
||||
handle.spawn(async move {
|
||||
let _ = client.terminate(&process_key).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
self.cancellation_token.cancel();
|
||||
self.output_task.abort();
|
||||
}
|
||||
@@ -232,7 +297,12 @@ impl UnifiedExecProcess {
|
||||
mut exit_rx,
|
||||
} = spawned;
|
||||
let output_rx = codex_utils_pty::combine_output_receivers(stdout_rx, stderr_rx);
|
||||
let managed = Self::new(process_handle, output_rx, sandbox_type, spawn_lifecycle);
|
||||
let managed = Self::new(
|
||||
ProcessBackend::Local(process_handle),
|
||||
output_rx,
|
||||
sandbox_type,
|
||||
spawn_lifecycle,
|
||||
);
|
||||
|
||||
let exit_ready = matches!(exit_rx.try_recv(), Ok(_) | Err(TryRecvError::Closed));
|
||||
|
||||
@@ -262,6 +332,88 @@ impl UnifiedExecProcess {
|
||||
Ok(managed)
|
||||
}
|
||||
|
||||
pub(super) async fn from_exec_server(
|
||||
client: ExecServerClient,
|
||||
process_id: i32,
|
||||
env: &ExecRequest,
|
||||
tty: bool,
|
||||
spawn_lifecycle: SpawnLifecycleHandle,
|
||||
) -> Result<Self, UnifiedExecError> {
|
||||
let process_key = process_id.to_string();
|
||||
let mut events_rx = client.event_receiver();
|
||||
client
|
||||
.exec(ExecParams {
|
||||
process_id: process_key.clone(),
|
||||
argv: env.command.clone(),
|
||||
cwd: env.cwd.clone(),
|
||||
env: env.env.clone(),
|
||||
tty,
|
||||
arg0: env.arg0.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|err| UnifiedExecError::create_process(err.to_string()))?;
|
||||
|
||||
let (output_tx, output_rx) = broadcast::channel(256);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(256);
|
||||
let exited = Arc::new(AtomicBool::new(false));
|
||||
let exit_code = Arc::new(StdMutex::new(None));
|
||||
|
||||
let managed = Self::new(
|
||||
ProcessBackend::Remote(RemoteExecSession {
|
||||
process_key: process_key.clone(),
|
||||
client: client.clone(),
|
||||
writer_tx,
|
||||
exited: Arc::clone(&exited),
|
||||
exit_code: Arc::clone(&exit_code),
|
||||
}),
|
||||
output_rx,
|
||||
env.sandbox,
|
||||
spawn_lifecycle,
|
||||
);
|
||||
|
||||
{
|
||||
let client = client.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Some(chunk) = writer_rx.recv().await {
|
||||
if client.write(&process_key, chunk).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let process_key = process_id.to_string();
|
||||
let exited = Arc::clone(&exited);
|
||||
let exit_code = Arc::clone(&exit_code);
|
||||
let cancellation_token = managed.cancellation_token();
|
||||
tokio::spawn(async move {
|
||||
while let Ok(event) = events_rx.recv().await {
|
||||
match event {
|
||||
ExecServerEvent::OutputDelta(notification)
|
||||
if notification.process_id == process_key =>
|
||||
{
|
||||
let _ = output_tx.send(notification.chunk.into_inner());
|
||||
}
|
||||
ExecServerEvent::Exited(notification)
|
||||
if notification.process_id == process_key =>
|
||||
{
|
||||
exited.store(true, Ordering::SeqCst);
|
||||
if let Ok(mut guard) = exit_code.lock() {
|
||||
*guard = Some(notification.exit_code);
|
||||
}
|
||||
cancellation_token.cancel();
|
||||
break;
|
||||
}
|
||||
ExecServerEvent::OutputDelta(_) | ExecServerEvent::Exited(_) => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(managed)
|
||||
}
|
||||
|
||||
fn signal_exit(&self) {
|
||||
self.cancellation_token.cancel();
|
||||
}
|
||||
|
||||
@@ -539,42 +539,14 @@ impl UnifiedExecProcessManager {
|
||||
|
||||
pub(crate) async fn open_session_with_exec_env(
|
||||
&self,
|
||||
process_id: i32,
|
||||
env: &ExecRequest,
|
||||
tty: bool,
|
||||
mut spawn_lifecycle: SpawnLifecycleHandle,
|
||||
spawn_lifecycle: SpawnLifecycleHandle,
|
||||
) -> Result<UnifiedExecProcess, UnifiedExecError> {
|
||||
let (program, args) = env
|
||||
.command
|
||||
.split_first()
|
||||
.ok_or(UnifiedExecError::MissingCommandLine)?;
|
||||
let inherited_fds = spawn_lifecycle.inherited_fds();
|
||||
|
||||
let spawn_result = if tty {
|
||||
codex_utils_pty::pty::spawn_process_with_inherited_fds(
|
||||
program,
|
||||
args,
|
||||
env.cwd.as_path(),
|
||||
&env.env,
|
||||
&env.arg0,
|
||||
codex_utils_pty::TerminalSize::default(),
|
||||
&inherited_fds,
|
||||
)
|
||||
self.session_factory
|
||||
.open_session(process_id, env, tty, spawn_lifecycle)
|
||||
.await
|
||||
} else {
|
||||
codex_utils_pty::pipe::spawn_process_no_stdin_with_inherited_fds(
|
||||
program,
|
||||
args,
|
||||
env.cwd.as_path(),
|
||||
&env.env,
|
||||
&env.arg0,
|
||||
&inherited_fds,
|
||||
)
|
||||
.await
|
||||
};
|
||||
let spawned =
|
||||
spawn_result.map_err(|err| UnifiedExecError::create_process(err.to_string()))?;
|
||||
spawn_lifecycle.after_spawn();
|
||||
UnifiedExecProcess::from_spawned(spawned, env.sandbox, spawn_lifecycle).await
|
||||
}
|
||||
|
||||
pub(super) async fn open_session_with_sandbox(
|
||||
@@ -610,6 +582,7 @@ impl UnifiedExecProcessManager {
|
||||
})
|
||||
.await;
|
||||
let req = UnifiedExecToolRequest {
|
||||
process_id: request.process_id,
|
||||
command: request.command.clone(),
|
||||
cwd,
|
||||
env,
|
||||
|
||||
@@ -16,12 +16,15 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-pty = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"fs",
|
||||
@@ -41,4 +44,3 @@ tracing = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# codex-exec-server
|
||||
|
||||
`codex-exec-server` is a small standalone JSON-RPC server for spawning
|
||||
`codex-exec-server` is a small standalone WebSocket JSON-RPC server for spawning
|
||||
and controlling subprocesses through `codex-utils-pty`.
|
||||
|
||||
This PR intentionally lands only the standalone binary, client, wire protocol,
|
||||
@@ -18,16 +18,16 @@ unified-exec in this PR; it is only the standalone transport layer.
|
||||
|
||||
## Transport
|
||||
|
||||
The server speaks the shared `codex-app-server-protocol` message envelope on
|
||||
the wire.
|
||||
The server speaks JSON-RPC 2.0 over WebSockets.
|
||||
|
||||
The standalone binary supports:
|
||||
Like the app-server transport, messages on the wire omit the `"jsonrpc":"2.0"`
|
||||
field and use the shared `codex-app-server-protocol` envelope types.
|
||||
|
||||
- `ws://IP:PORT` (default)
|
||||
The current protocol version is:
|
||||
|
||||
Wire framing:
|
||||
|
||||
- websocket: one JSON-RPC message per websocket text frame
|
||||
```text
|
||||
exec-server.v0
|
||||
```
|
||||
|
||||
## Lifecycle
|
||||
|
||||
@@ -41,8 +41,8 @@ Each connection follows this sequence:
|
||||
If the server receives any notification other than `initialized`, it replies
|
||||
with an error using request id `-1`.
|
||||
|
||||
If the websocket connection closes, the server terminates any remaining managed
|
||||
processes for that client connection.
|
||||
If the stdio connection closes, the server terminates any remaining managed
|
||||
processes before exiting.
|
||||
|
||||
## API
|
||||
|
||||
@@ -61,7 +61,9 @@ Request params:
|
||||
Response:
|
||||
|
||||
```json
|
||||
{}
|
||||
{
|
||||
"protocolVersion": "exec-server.v0"
|
||||
}
|
||||
```
|
||||
|
||||
### `initialized`
|
||||
@@ -237,13 +239,13 @@ Typical error cases:
|
||||
The crate exports:
|
||||
|
||||
- `ExecServerClient`
|
||||
- `ExecServerLaunchCommand`
|
||||
- `ExecServerProcess`
|
||||
- `ExecServerError`
|
||||
- `ExecServerClientConnectOptions`
|
||||
- `RemoteExecServerConnectArgs`
|
||||
- protocol structs `InitializeParams` and `InitializeResponse`
|
||||
- `DEFAULT_LISTEN_URL` and `ExecServerListenUrlParseError`
|
||||
- `run_main_with_listen_url()`
|
||||
- `run_main()` for embedding the websocket server in a binary
|
||||
- protocol structs such as `ExecParams`, `ExecResponse`,
|
||||
`WriteParams`, `TerminateParams`, `ExecOutputDeltaNotification`, and
|
||||
`ExecExitedNotification`
|
||||
- `run_main()` for embedding the WebSocket server in a binary
|
||||
|
||||
## Example session
|
||||
|
||||
@@ -251,7 +253,7 @@ Initialize:
|
||||
|
||||
```json
|
||||
{"id":1,"method":"initialize","params":{"clientName":"example-client"}}
|
||||
{"id":1,"result":{}}
|
||||
{"id":1,"result":{"protocolVersion":"exec-server.v0"}}
|
||||
{"method":"initialized","params":{}}
|
||||
```
|
||||
|
||||
|
||||
@@ -1,20 +1,65 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
use codex_app_server_protocol::FsCopyResponse;
|
||||
use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
use codex_app_server_protocol::FsCreateDirectoryResponse;
|
||||
use codex_app_server_protocol::FsGetMetadataParams;
|
||||
use codex_app_server_protocol::FsGetMetadataResponse;
|
||||
use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
use codex_app_server_protocol::FsReadDirectoryResponse;
|
||||
use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsReadFileResponse;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsRemoveResponse;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_app_server_protocol::FsWriteFileResponse;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use serde_json::Value;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::timeout;
|
||||
use tokio_tungstenite::connect_async;
|
||||
use tracing::debug;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::client_api::ExecServerClientConnectOptions;
|
||||
use crate::client_api::ExecServerEvent;
|
||||
use crate::client_api::RemoteExecServerConnectArgs;
|
||||
use crate::connection::JsonRpcConnection;
|
||||
use crate::protocol::EXEC_EXITED_METHOD;
|
||||
use crate::protocol::EXEC_METHOD;
|
||||
use crate::protocol::EXEC_OUTPUT_DELTA_METHOD;
|
||||
use crate::protocol::EXEC_READ_METHOD;
|
||||
use crate::protocol::EXEC_TERMINATE_METHOD;
|
||||
use crate::protocol::EXEC_WRITE_METHOD;
|
||||
use crate::protocol::ExecExitedNotification;
|
||||
use crate::protocol::ExecOutputDeltaNotification;
|
||||
use crate::protocol::ExecParams;
|
||||
use crate::protocol::ExecResponse;
|
||||
use crate::protocol::FS_COPY_METHOD;
|
||||
use crate::protocol::FS_CREATE_DIRECTORY_METHOD;
|
||||
use crate::protocol::FS_GET_METADATA_METHOD;
|
||||
use crate::protocol::FS_READ_DIRECTORY_METHOD;
|
||||
use crate::protocol::FS_READ_FILE_METHOD;
|
||||
use crate::protocol::FS_REMOVE_METHOD;
|
||||
use crate::protocol::FS_WRITE_FILE_METHOD;
|
||||
use crate::protocol::INITIALIZE_METHOD;
|
||||
use crate::protocol::INITIALIZED_METHOD;
|
||||
use crate::protocol::InitializeParams;
|
||||
use crate::protocol::InitializeResponse;
|
||||
use crate::protocol::ReadParams;
|
||||
use crate::protocol::ReadResponse;
|
||||
use crate::protocol::TerminateParams;
|
||||
use crate::protocol::TerminateResponse;
|
||||
use crate::protocol::WriteParams;
|
||||
use crate::protocol::WriteResponse;
|
||||
use crate::rpc::RpcCallError;
|
||||
use crate::rpc::RpcClient;
|
||||
use crate::rpc::RpcClientEvent;
|
||||
use crate::rpc::RpcNotificationSender;
|
||||
use crate::rpc::RpcServerOutboundMessage;
|
||||
|
||||
mod local_backend;
|
||||
use local_backend::LocalBackend;
|
||||
@@ -74,6 +119,7 @@ impl ClientBackend {
|
||||
|
||||
struct Inner {
|
||||
backend: ClientBackend,
|
||||
events_tx: broadcast::Sender<ExecServerEvent>,
|
||||
reader_task: tokio::task::JoinHandle<()>,
|
||||
}
|
||||
|
||||
@@ -124,11 +170,32 @@ impl ExecServerClient {
|
||||
pub async fn connect_in_process(
|
||||
options: ExecServerClientConnectOptions,
|
||||
) -> Result<Self, ExecServerError> {
|
||||
let backend = LocalBackend::new(crate::server::ExecServerHandler::new());
|
||||
let inner = Arc::new(Inner {
|
||||
backend: ClientBackend::InProcess(backend),
|
||||
reader_task: tokio::spawn(async {}),
|
||||
let (outgoing_tx, mut outgoing_rx) = mpsc::channel::<RpcServerOutboundMessage>(256);
|
||||
let backend = LocalBackend::new(crate::server::ExecServerHandler::new(
|
||||
RpcNotificationSender::new(outgoing_tx),
|
||||
));
|
||||
let inner = Arc::new_cyclic(|weak| {
|
||||
let weak = weak.clone();
|
||||
let reader_task = tokio::spawn(async move {
|
||||
while let Some(message) = outgoing_rx.recv().await {
|
||||
if let Some(inner) = weak.upgrade()
|
||||
&& let Err(err) = handle_in_process_outbound_message(&inner, message).await
|
||||
{
|
||||
warn!(
|
||||
"in-process exec-server client closing after unexpected response: {err}"
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Inner {
|
||||
backend: ClientBackend::InProcess(backend),
|
||||
events_tx: broadcast::channel(256).0,
|
||||
reader_task,
|
||||
}
|
||||
});
|
||||
|
||||
let client = Self { inner };
|
||||
client.initialize(options).await?;
|
||||
Ok(client)
|
||||
@@ -160,6 +227,10 @@ impl ExecServerClient {
|
||||
.await
|
||||
}
|
||||
|
||||
pub fn event_receiver(&self) -> broadcast::Receiver<ExecServerEvent> {
|
||||
self.inner.events_tx.subscribe()
|
||||
}
|
||||
|
||||
pub async fn initialize(
|
||||
&self,
|
||||
options: ExecServerClientConnectOptions,
|
||||
@@ -190,36 +261,234 @@ impl ExecServerClient {
|
||||
})?
|
||||
}
|
||||
|
||||
pub async fn exec(&self, params: ExecParams) -> Result<ExecResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.exec(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during exec".to_string(),
|
||||
));
|
||||
};
|
||||
remote.call(EXEC_METHOD, ¶ms).await.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn read(&self, params: ReadParams) -> Result<ReadResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.exec_read(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during read".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(EXEC_READ_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn write(
|
||||
&self,
|
||||
process_id: &str,
|
||||
chunk: Vec<u8>,
|
||||
) -> Result<WriteResponse, ExecServerError> {
|
||||
let params = WriteParams {
|
||||
process_id: process_id.to_string(),
|
||||
chunk: chunk.into(),
|
||||
};
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.exec_write(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during write".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(EXEC_WRITE_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn terminate(&self, process_id: &str) -> Result<TerminateResponse, ExecServerError> {
|
||||
let params = TerminateParams {
|
||||
process_id: process_id.to_string(),
|
||||
};
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.terminate(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during terminate".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(EXEC_TERMINATE_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_read_file(
|
||||
&self,
|
||||
params: FsReadFileParams,
|
||||
) -> Result<FsReadFileResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_read_file(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/readFile".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_READ_FILE_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_write_file(
|
||||
&self,
|
||||
params: FsWriteFileParams,
|
||||
) -> Result<FsWriteFileResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_write_file(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/writeFile".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_WRITE_FILE_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_create_directory(
|
||||
&self,
|
||||
params: FsCreateDirectoryParams,
|
||||
) -> Result<FsCreateDirectoryResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_create_directory(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/createDirectory".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_CREATE_DIRECTORY_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_get_metadata(
|
||||
&self,
|
||||
params: FsGetMetadataParams,
|
||||
) -> Result<FsGetMetadataResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_get_metadata(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/getMetadata".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_GET_METADATA_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_read_directory(
|
||||
&self,
|
||||
params: FsReadDirectoryParams,
|
||||
) -> Result<FsReadDirectoryResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_read_directory(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/readDirectory".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_READ_DIRECTORY_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_remove(
|
||||
&self,
|
||||
params: FsRemoveParams,
|
||||
) -> Result<FsRemoveResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_remove(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/remove".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_REMOVE_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub async fn fs_copy(&self, params: FsCopyParams) -> Result<FsCopyResponse, ExecServerError> {
|
||||
if let Some(backend) = self.inner.backend.as_local() {
|
||||
return backend.fs_copy(params).await;
|
||||
}
|
||||
let Some(remote) = self.inner.backend.as_remote() else {
|
||||
return Err(ExecServerError::Protocol(
|
||||
"remote backend missing during fs/copy".to_string(),
|
||||
));
|
||||
};
|
||||
remote
|
||||
.call(FS_COPY_METHOD, ¶ms)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn connect(
|
||||
connection: JsonRpcConnection,
|
||||
options: ExecServerClientConnectOptions,
|
||||
) -> Result<Self, ExecServerError> {
|
||||
let (rpc_client, mut events_rx) = RpcClient::new(connection);
|
||||
let reader_task = tokio::spawn(async move {
|
||||
while let Some(event) = events_rx.recv().await {
|
||||
match event {
|
||||
RpcClientEvent::Notification(notification) => {
|
||||
warn!(
|
||||
"ignoring unexpected exec-server notification during stub phase: {}",
|
||||
notification.method
|
||||
);
|
||||
}
|
||||
RpcClientEvent::Disconnected { reason } => {
|
||||
if let Some(reason) = reason {
|
||||
warn!("exec-server client transport disconnected: {reason}");
|
||||
let inner = Arc::new_cyclic(|weak| {
|
||||
let weak = weak.clone();
|
||||
let reader_task = tokio::spawn(async move {
|
||||
while let Some(event) = events_rx.recv().await {
|
||||
match event {
|
||||
RpcClientEvent::Notification(notification) => {
|
||||
if let Some(inner) = weak.upgrade()
|
||||
&& let Err(err) =
|
||||
handle_server_notification(&inner, notification).await
|
||||
{
|
||||
warn!("exec-server client closing after protocol error: {err}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
RpcClientEvent::Disconnected { reason } => {
|
||||
if let Some(reason) = reason {
|
||||
warn!("exec-server client transport disconnected: {reason}");
|
||||
}
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Inner {
|
||||
backend: ClientBackend::Remote(rpc_client),
|
||||
events_tx: broadcast::channel(256).0,
|
||||
reader_task,
|
||||
}
|
||||
});
|
||||
|
||||
let client = Self {
|
||||
inner: Arc::new(Inner {
|
||||
backend: ClientBackend::Remote(rpc_client),
|
||||
reader_task,
|
||||
}),
|
||||
};
|
||||
let client = Self { inner };
|
||||
client.initialize(options).await?;
|
||||
Ok(client)
|
||||
}
|
||||
@@ -247,3 +516,39 @@ impl From<RpcCallError> for ExecServerError {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_in_process_outbound_message(
|
||||
inner: &Arc<Inner>,
|
||||
message: RpcServerOutboundMessage,
|
||||
) -> Result<(), ExecServerError> {
|
||||
match message {
|
||||
RpcServerOutboundMessage::Response { .. } | RpcServerOutboundMessage::Error { .. } => Err(
|
||||
ExecServerError::Protocol("unexpected in-process RPC response".to_string()),
|
||||
),
|
||||
RpcServerOutboundMessage::Notification(notification) => {
|
||||
handle_server_notification(inner, notification).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_server_notification(
|
||||
inner: &Arc<Inner>,
|
||||
notification: JSONRPCNotification,
|
||||
) -> Result<(), ExecServerError> {
|
||||
match notification.method.as_str() {
|
||||
EXEC_OUTPUT_DELTA_METHOD => {
|
||||
let params: ExecOutputDeltaNotification =
|
||||
serde_json::from_value(notification.params.unwrap_or(Value::Null))?;
|
||||
let _ = inner.events_tx.send(ExecServerEvent::OutputDelta(params));
|
||||
}
|
||||
EXEC_EXITED_METHOD => {
|
||||
let params: ExecExitedNotification =
|
||||
serde_json::from_value(notification.params.unwrap_or(Value::Null))?;
|
||||
let _ = inner.events_tx.send(ExecServerEvent::Exited(params));
|
||||
}
|
||||
other => {
|
||||
debug!("ignoring unknown exec-server notification: {other}");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,29 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::protocol::ExecParams;
|
||||
use crate::protocol::ExecResponse;
|
||||
use crate::protocol::InitializeResponse;
|
||||
use crate::protocol::ReadParams;
|
||||
use crate::protocol::ReadResponse;
|
||||
use crate::protocol::TerminateParams;
|
||||
use crate::protocol::TerminateResponse;
|
||||
use crate::protocol::WriteParams;
|
||||
use crate::protocol::WriteResponse;
|
||||
use crate::server::ExecServerHandler;
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
use codex_app_server_protocol::FsCopyResponse;
|
||||
use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
use codex_app_server_protocol::FsCreateDirectoryResponse;
|
||||
use codex_app_server_protocol::FsGetMetadataParams;
|
||||
use codex_app_server_protocol::FsGetMetadataResponse;
|
||||
use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
use codex_app_server_protocol::FsReadDirectoryResponse;
|
||||
use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsReadFileResponse;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsRemoveResponse;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_app_server_protocol::FsWriteFileResponse;
|
||||
|
||||
use super::ExecServerError;
|
||||
|
||||
@@ -35,4 +57,144 @@ impl LocalBackend {
|
||||
.initialized()
|
||||
.map_err(ExecServerError::Protocol)
|
||||
}
|
||||
|
||||
pub(super) async fn exec(&self, params: ExecParams) -> Result<ExecResponse, ExecServerError> {
|
||||
self.handler
|
||||
.exec(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn exec_read(
|
||||
&self,
|
||||
params: ReadParams,
|
||||
) -> Result<ReadResponse, ExecServerError> {
|
||||
self.handler
|
||||
.exec_read(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn exec_write(
|
||||
&self,
|
||||
params: WriteParams,
|
||||
) -> Result<WriteResponse, ExecServerError> {
|
||||
self.handler
|
||||
.exec_write(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn terminate(
|
||||
&self,
|
||||
params: TerminateParams,
|
||||
) -> Result<TerminateResponse, ExecServerError> {
|
||||
self.handler
|
||||
.terminate(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_read_file(
|
||||
&self,
|
||||
params: FsReadFileParams,
|
||||
) -> Result<FsReadFileResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_read_file(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_write_file(
|
||||
&self,
|
||||
params: FsWriteFileParams,
|
||||
) -> Result<FsWriteFileResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_write_file(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_create_directory(
|
||||
&self,
|
||||
params: FsCreateDirectoryParams,
|
||||
) -> Result<FsCreateDirectoryResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_create_directory(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_get_metadata(
|
||||
&self,
|
||||
params: FsGetMetadataParams,
|
||||
) -> Result<FsGetMetadataResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_get_metadata(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_read_directory(
|
||||
&self,
|
||||
params: FsReadDirectoryParams,
|
||||
) -> Result<FsReadDirectoryResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_read_directory(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_remove(
|
||||
&self,
|
||||
params: FsRemoveParams,
|
||||
) -> Result<FsRemoveResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_remove(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn fs_copy(
|
||||
&self,
|
||||
params: FsCopyParams,
|
||||
) -> Result<FsCopyResponse, ExecServerError> {
|
||||
self.handler
|
||||
.fs_copy(params)
|
||||
.await
|
||||
.map_err(|error| ExecServerError::Server {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::protocol::ExecExitedNotification;
|
||||
use crate::protocol::ExecOutputDeltaNotification;
|
||||
|
||||
/// Connection options for any exec-server client transport.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ExecServerClientConnectOptions {
|
||||
@@ -15,3 +18,10 @@ pub struct RemoteExecServerConnectArgs {
|
||||
pub connect_timeout: Duration,
|
||||
pub initialize_timeout: Duration,
|
||||
}
|
||||
|
||||
/// Connection-level server events.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ExecServerEvent {
|
||||
OutputDelta(ExecOutputDeltaNotification),
|
||||
Exited(ExecExitedNotification),
|
||||
}
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::io::AsyncWrite;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
|
||||
#[cfg(test)]
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::io::AsyncWrite;
|
||||
#[cfg(test)]
|
||||
use tokio::io::AsyncWriteExt;
|
||||
#[cfg(test)]
|
||||
use tokio::io::BufReader;
|
||||
#[cfg(test)]
|
||||
use tokio::io::BufWriter;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
|
||||
pub(crate) const CHANNEL_CAPACITY: usize = 128;
|
||||
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::ExecServerClient;
|
||||
use crate::ExecServerError;
|
||||
use crate::RemoteExecServerConnectArgs;
|
||||
use crate::executor::Executor;
|
||||
use crate::executor::LocalExecutor;
|
||||
use crate::executor::RemoteExecutor;
|
||||
use crate::fs;
|
||||
use crate::fs::ExecutorFileSystem;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
#[derive(Clone)]
|
||||
pub struct Environment {
|
||||
experimental_exec_server_url: Option<String>,
|
||||
remote_exec_server_client: Option<ExecServerClient>,
|
||||
exec_server_client: Option<ExecServerClient>,
|
||||
executor: Arc<dyn Executor>,
|
||||
file_system: Arc<dyn ExecutorFileSystem>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Environment {
|
||||
@@ -17,60 +24,133 @@ impl std::fmt::Debug for Environment {
|
||||
"experimental_exec_server_url",
|
||||
&self.experimental_exec_server_url,
|
||||
)
|
||||
.field(
|
||||
"has_remote_exec_server_client",
|
||||
&self.remote_exec_server_client.is_some(),
|
||||
)
|
||||
.finish()
|
||||
.field("has_exec_server_client", &self.exec_server_client.is_some())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum EnvironmentError {
|
||||
#[error("failed to initialize executor backend: {0}")]
|
||||
InitializeExecutor(String),
|
||||
|
||||
#[error("failed to initialize filesystem backend: {0}")]
|
||||
InitializeFilesystem(String),
|
||||
}
|
||||
|
||||
impl Environment {
|
||||
pub fn local() -> Self {
|
||||
Self {
|
||||
experimental_exec_server_url: None,
|
||||
exec_server_client: None,
|
||||
executor: Arc::new(LocalExecutor::new()),
|
||||
file_system: Arc::new(fs::LocalFileSystem),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create(
|
||||
experimental_exec_server_url: Option<String>,
|
||||
) -> Result<Self, ExecServerError> {
|
||||
let remote_exec_server_client =
|
||||
if let Some(websocket_url) = experimental_exec_server_url.as_deref() {
|
||||
Some(
|
||||
ExecServerClient::connect_websocket(RemoteExecServerConnectArgs::new(
|
||||
websocket_url.to_string(),
|
||||
"codex-core".to_string(),
|
||||
))
|
||||
.await?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
) -> Result<Self, EnvironmentError> {
|
||||
let Some(websocket_url) = experimental_exec_server_url else {
|
||||
return Ok(Self::local());
|
||||
};
|
||||
|
||||
let client = ExecServerClient::connect_websocket(RemoteExecServerConnectArgs::new(
|
||||
websocket_url.clone(),
|
||||
"codex-core".to_string(),
|
||||
))
|
||||
.await
|
||||
.map_err(|err| EnvironmentError::InitializeExecutor(err.to_string()))?;
|
||||
|
||||
Ok(Self {
|
||||
experimental_exec_server_url,
|
||||
remote_exec_server_client,
|
||||
experimental_exec_server_url: Some(websocket_url),
|
||||
exec_server_client: Some(client.clone()),
|
||||
executor: Arc::new(RemoteExecutor::new(client.clone())),
|
||||
file_system: Arc::new(fs::RemoteFileSystem::new(client)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_exec_server_client(client: ExecServerClient) -> Self {
|
||||
Self {
|
||||
experimental_exec_server_url: None,
|
||||
exec_server_client: Some(client.clone()),
|
||||
executor: Arc::new(RemoteExecutor::new(client.clone())),
|
||||
file_system: Arc::new(fs::RemoteFileSystem::new(client)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn experimental_exec_server_url(&self) -> Option<&str> {
|
||||
self.experimental_exec_server_url.as_deref()
|
||||
}
|
||||
|
||||
pub fn remote_exec_server_client(&self) -> Option<&ExecServerClient> {
|
||||
self.remote_exec_server_client.as_ref()
|
||||
pub fn exec_server_client(&self) -> Option<ExecServerClient> {
|
||||
self.exec_server_client.clone()
|
||||
}
|
||||
|
||||
pub fn get_filesystem(&self) -> impl ExecutorFileSystem + use<> {
|
||||
fs::LocalFileSystem
|
||||
pub fn filesystem(&self) -> Arc<dyn ExecutorFileSystem> {
|
||||
Arc::clone(&self.file_system)
|
||||
}
|
||||
|
||||
pub fn get_filesystem(&self) -> Arc<dyn ExecutorFileSystem> {
|
||||
self.filesystem()
|
||||
}
|
||||
|
||||
pub fn executor(&self) -> Arc<dyn Executor> {
|
||||
Arc::clone(&self.executor)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Environment {
|
||||
fn default() -> Self {
|
||||
Self::local()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ExecServerError> for EnvironmentError {
|
||||
fn from(err: ExecServerError) -> Self {
|
||||
Self::InitializeExecutor(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Environment;
|
||||
use crate::ExecServerClient;
|
||||
use crate::ExecServerClientConnectOptions;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_without_remote_exec_server_url_does_not_connect() {
|
||||
let environment = Environment::create(None).await.expect("create environment");
|
||||
|
||||
assert_eq!(environment.experimental_exec_server_url(), None);
|
||||
assert!(environment.remote_exec_server_client().is_none());
|
||||
assert!(environment.exec_server_client().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn environment_uses_remote_filesystem_abstraction_when_client_is_provided() {
|
||||
let client =
|
||||
ExecServerClient::connect_in_process(ExecServerClientConnectOptions::default())
|
||||
.await
|
||||
.expect("connect in-process client");
|
||||
let environment = Environment::from_exec_server_client(client);
|
||||
let tempdir = TempDir::new().expect("tempdir");
|
||||
let path = AbsolutePathBuf::try_from(tempdir.path().join("marker.txt")).expect("path");
|
||||
|
||||
environment
|
||||
.filesystem()
|
||||
.write_file(&path, b"hello".to_vec())
|
||||
.await
|
||||
.expect("write file through environment abstraction");
|
||||
|
||||
let bytes = environment
|
||||
.filesystem()
|
||||
.read_file(&path)
|
||||
.await
|
||||
.expect("read file through environment abstraction");
|
||||
|
||||
assert_eq!(bytes, b"hello");
|
||||
}
|
||||
}
|
||||
|
||||
418
codex-rs/exec-server/src/executor.rs
Normal file
418
codex-rs/exec-server/src/executor.rs
Normal file
@@ -0,0 +1,418 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::watch;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::ExecServerClient;
|
||||
use crate::client_api::ExecServerClientConnectOptions;
|
||||
use crate::protocol::ExecOutputStream;
|
||||
use crate::protocol::ExecParams;
|
||||
use crate::protocol::ReadParams;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ExecSpawnRequest {
|
||||
pub process_id: String,
|
||||
pub argv: Vec<String>,
|
||||
pub cwd: AbsolutePathBuf,
|
||||
pub env: HashMap<String, String>,
|
||||
pub arg0: Option<String>,
|
||||
pub tty: bool,
|
||||
pub sandbox: SandboxKind,
|
||||
pub inherited_fds: Vec<InheritedFd>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ExecOutputEvent {
|
||||
Stdout(Vec<u8>),
|
||||
Stderr(Vec<u8>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct ExecExit {
|
||||
pub exit_code: i32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct PtySize {
|
||||
pub rows: u16,
|
||||
pub cols: u16,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum SandboxKind {
|
||||
None,
|
||||
MacosSeatbelt,
|
||||
LinuxSeccomp,
|
||||
WindowsRestrictedToken,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct InheritedFd {
|
||||
pub target_fd: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ExecError {
|
||||
#[error("failed to spawn process: {0}")]
|
||||
Spawn(String),
|
||||
#[error("failed to write to process: {0}")]
|
||||
Write(String),
|
||||
#[error("failed to resize process pty: {0}")]
|
||||
Resize(String),
|
||||
#[error("failed to terminate process: {0}")]
|
||||
Terminate(String),
|
||||
#[error("executor transport failure: {0}")]
|
||||
Transport(String),
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Executor: std::fmt::Debug + Send + Sync {
|
||||
async fn spawn(&self, request: ExecSpawnRequest) -> Result<Box<dyn ExecSession>, ExecError>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ExecSession: std::fmt::Debug + Send + Sync {
|
||||
fn process_id(&self) -> &str;
|
||||
|
||||
fn subscribe_output(&self) -> broadcast::Receiver<ExecOutputEvent>;
|
||||
|
||||
async fn write(&self, chunk: Vec<u8>) -> Result<(), ExecError>;
|
||||
|
||||
async fn resize(&self, _size: PtySize) -> Result<(), ExecError> {
|
||||
Err(ExecError::Resize(
|
||||
"resize is not supported by exec-server sessions".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn terminate(&self) -> Result<(), ExecError>;
|
||||
|
||||
async fn wait(&self) -> Result<ExecExit, ExecError>;
|
||||
|
||||
fn try_exit_status(&self) -> Option<ExecExit>;
|
||||
}
|
||||
|
||||
pub struct LocalExecutor {
|
||||
client: Mutex<Option<ExecServerClient>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for LocalExecutor {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("LocalExecutor").finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RemoteExecutor {
|
||||
client: ExecServerClient,
|
||||
}
|
||||
|
||||
impl fmt::Debug for RemoteExecutor {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RemoteExecutor").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LocalExecSession {
|
||||
inner: ExecServerExecSession,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteExecSession {
|
||||
inner: ExecServerExecSession,
|
||||
}
|
||||
|
||||
struct ExecServerExecSession {
|
||||
process_id: String,
|
||||
client: ExecServerClient,
|
||||
output_tx: broadcast::Sender<ExecOutputEvent>,
|
||||
exit_status: watch::Sender<Option<ExecExit>>,
|
||||
output_task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ExecServerExecSession {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ExecServerExecSession")
|
||||
.field("process_id", &self.process_id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LocalExecutor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalExecutor {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
client: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
async fn client(&self) -> Result<ExecServerClient, ExecError> {
|
||||
let mut client_guard = self.client.lock().await;
|
||||
if let Some(client) = client_guard.clone() {
|
||||
return Ok(client);
|
||||
}
|
||||
|
||||
let client =
|
||||
ExecServerClient::connect_in_process(ExecServerClientConnectOptions::default())
|
||||
.await
|
||||
.map_err(|err| ExecError::Spawn(err.to_string()))?;
|
||||
*client_guard = Some(client.clone());
|
||||
Ok(client)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Executor for LocalExecutor {
|
||||
async fn spawn(&self, request: ExecSpawnRequest) -> Result<Box<dyn ExecSession>, ExecError> {
|
||||
validate_request(&request)?;
|
||||
let client = self.client().await?;
|
||||
let response = client
|
||||
.exec(request_to_exec_params(request))
|
||||
.await
|
||||
.map_err(|err| ExecError::Spawn(format!("failed to spawn process: {err}")))?;
|
||||
|
||||
Ok(Box::new(LocalExecSession {
|
||||
inner: ExecServerExecSession::new(response.process_id, client),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl RemoteExecutor {
|
||||
pub fn new(client: ExecServerClient) -> Self {
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Executor for RemoteExecutor {
|
||||
async fn spawn(&self, request: ExecSpawnRequest) -> Result<Box<dyn ExecSession>, ExecError> {
|
||||
validate_request(&request)?;
|
||||
let response = self
|
||||
.client
|
||||
.exec(request_to_exec_params(request))
|
||||
.await
|
||||
.map_err(|err| ExecError::Spawn(format!("failed to spawn process: {err}")))?;
|
||||
|
||||
Ok(Box::new(RemoteExecSession {
|
||||
inner: ExecServerExecSession::new(response.process_id, self.client.clone()),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ExecSession for LocalExecSession {
|
||||
fn process_id(&self) -> &str {
|
||||
self.inner.process_id()
|
||||
}
|
||||
|
||||
fn subscribe_output(&self) -> broadcast::Receiver<ExecOutputEvent> {
|
||||
self.inner.subscribe_output()
|
||||
}
|
||||
|
||||
async fn write(&self, chunk: Vec<u8>) -> Result<(), ExecError> {
|
||||
self.inner.write(chunk).await
|
||||
}
|
||||
|
||||
async fn terminate(&self) -> Result<(), ExecError> {
|
||||
self.inner.terminate().await
|
||||
}
|
||||
|
||||
async fn wait(&self) -> Result<ExecExit, ExecError> {
|
||||
self.inner.wait().await
|
||||
}
|
||||
|
||||
fn try_exit_status(&self) -> Option<ExecExit> {
|
||||
self.inner.try_exit_status()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ExecSession for RemoteExecSession {
|
||||
fn process_id(&self) -> &str {
|
||||
self.inner.process_id()
|
||||
}
|
||||
|
||||
fn subscribe_output(&self) -> broadcast::Receiver<ExecOutputEvent> {
|
||||
self.inner.subscribe_output()
|
||||
}
|
||||
|
||||
async fn write(&self, chunk: Vec<u8>) -> Result<(), ExecError> {
|
||||
self.inner.write(chunk).await
|
||||
}
|
||||
|
||||
async fn terminate(&self) -> Result<(), ExecError> {
|
||||
self.inner.terminate().await
|
||||
}
|
||||
|
||||
async fn wait(&self) -> Result<ExecExit, ExecError> {
|
||||
self.inner.wait().await
|
||||
}
|
||||
|
||||
fn try_exit_status(&self) -> Option<ExecExit> {
|
||||
self.inner.try_exit_status()
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecServerExecSession {
|
||||
fn new(process_id: String, client: ExecServerClient) -> Self {
|
||||
let (output_tx, _) = broadcast::channel(128);
|
||||
let (exit_status, _) = watch::channel(None);
|
||||
let mut events = client.event_receiver();
|
||||
|
||||
let process_id_clone = process_id.clone();
|
||||
let output_tx_clone = output_tx.clone();
|
||||
let exit_status_clone = exit_status.clone();
|
||||
let output_task = tokio::spawn(async move {
|
||||
loop {
|
||||
match events.recv().await {
|
||||
Ok(event) => match event {
|
||||
crate::client_api::ExecServerEvent::OutputDelta(notification) => {
|
||||
if notification.process_id != process_id_clone {
|
||||
continue;
|
||||
}
|
||||
let chunk = notification.chunk.into_inner();
|
||||
let stream = match notification.stream {
|
||||
ExecOutputStream::Stdout => ExecOutputEvent::Stdout(chunk),
|
||||
ExecOutputStream::Stderr => ExecOutputEvent::Stderr(chunk),
|
||||
ExecOutputStream::Pty => ExecOutputEvent::Stdout(chunk),
|
||||
};
|
||||
let _ = output_tx_clone.send(stream);
|
||||
}
|
||||
crate::client_api::ExecServerEvent::Exited(notification) => {
|
||||
if notification.process_id == process_id_clone {
|
||||
let _ = exit_status_clone.send_replace(Some(ExecExit {
|
||||
exit_code: notification.exit_code,
|
||||
}));
|
||||
break;
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(RecvError::Lagged(_)) => continue,
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
process_id,
|
||||
client,
|
||||
output_tx,
|
||||
exit_status,
|
||||
output_task,
|
||||
}
|
||||
}
|
||||
|
||||
fn process_id(&self) -> &str {
|
||||
&self.process_id
|
||||
}
|
||||
|
||||
fn subscribe_output(&self) -> broadcast::Receiver<ExecOutputEvent> {
|
||||
self.output_tx.subscribe()
|
||||
}
|
||||
|
||||
async fn write(&self, chunk: Vec<u8>) -> Result<(), ExecError> {
|
||||
self.client
|
||||
.write(&self.process_id, chunk)
|
||||
.await
|
||||
.map_err(|err| ExecError::Write(format!("failed to write to process: {err}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn terminate(&self) -> Result<(), ExecError> {
|
||||
self.client
|
||||
.terminate(&self.process_id)
|
||||
.await
|
||||
.map_err(|err| ExecError::Terminate(format!("failed to terminate process: {err}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn wait(&self) -> Result<ExecExit, ExecError> {
|
||||
let mut exit_receiver = self.exit_status.subscribe();
|
||||
if let Some(status) = *exit_receiver.borrow() {
|
||||
return Ok(status);
|
||||
}
|
||||
|
||||
loop {
|
||||
match exit_receiver.changed().await {
|
||||
Ok(()) => {
|
||||
if let Some(status) = *exit_receiver.borrow() {
|
||||
return Ok(status);
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
return self.wait_for_read_exit().await.map_err(|err| {
|
||||
ExecError::Transport(format!("failed to wait for process: {err}"))
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_read_exit(&self) -> Result<ExecExit, crate::ExecServerError> {
|
||||
loop {
|
||||
let response = self
|
||||
.client
|
||||
.read(ReadParams {
|
||||
process_id: self.process_id.clone(),
|
||||
after_seq: None,
|
||||
max_bytes: Some(0),
|
||||
wait_ms: Some(50),
|
||||
})
|
||||
.await?;
|
||||
if response.exited {
|
||||
return Ok(ExecExit {
|
||||
exit_code: response.exit_code.unwrap_or(-1),
|
||||
});
|
||||
}
|
||||
sleep(Duration::from_millis(25)).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn try_exit_status(&self) -> Option<ExecExit> {
|
||||
*self.exit_status.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ExecServerExecSession {
|
||||
fn drop(&mut self) {
|
||||
self.output_task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
fn request_to_exec_params(request: ExecSpawnRequest) -> ExecParams {
|
||||
ExecParams {
|
||||
process_id: request.process_id,
|
||||
argv: request.argv,
|
||||
cwd: request.cwd.into(),
|
||||
env: request.env,
|
||||
tty: request.tty,
|
||||
arg0: request.arg0,
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_request(request: &ExecSpawnRequest) -> Result<(), ExecError> {
|
||||
if request.sandbox != SandboxKind::None {
|
||||
return Err(ExecError::Spawn(format!(
|
||||
"sandbox policy {:?} is not supported by this executor",
|
||||
request.sandbox
|
||||
)));
|
||||
}
|
||||
if !request.inherited_fds.is_empty() {
|
||||
return Err(ExecError::Spawn(
|
||||
"inherited file descriptors are not supported by exec-server executor".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,5 +1,15 @@
|
||||
use async_trait::async_trait;
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
use codex_app_server_protocol::FsGetMetadataParams;
|
||||
use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::fmt;
|
||||
use std::path::Component;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
@@ -7,8 +17,23 @@ use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
use tokio::io;
|
||||
|
||||
use crate::ExecServerClient;
|
||||
use crate::ExecServerError;
|
||||
|
||||
const MAX_READ_FILE_BYTES: u64 = 512 * 1024 * 1024;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("filesystem operation failed: {0}")]
|
||||
pub struct FsError(#[source] pub io::Error);
|
||||
|
||||
impl From<io::Error> for FsError {
|
||||
fn from(err: io::Error) -> Self {
|
||||
Self(err)
|
||||
}
|
||||
}
|
||||
|
||||
pub type FileSystemResult<T> = Result<T, FsError>;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct CreateDirectoryOptions {
|
||||
pub recursive: bool,
|
||||
@@ -40,10 +65,8 @@ pub struct ReadDirectoryEntry {
|
||||
pub is_file: bool,
|
||||
}
|
||||
|
||||
pub type FileSystemResult<T> = io::Result<T>;
|
||||
|
||||
#[async_trait]
|
||||
pub trait ExecutorFileSystem: Send + Sync {
|
||||
pub trait ExecutorFileSystem: std::fmt::Debug + Send + Sync {
|
||||
async fn read_file(&self, path: &AbsolutePathBuf) -> FileSystemResult<Vec<u8>>;
|
||||
|
||||
async fn write_file(&self, path: &AbsolutePathBuf, contents: Vec<u8>) -> FileSystemResult<()>;
|
||||
@@ -69,26 +92,79 @@ pub trait ExecutorFileSystem: Send + Sync {
|
||||
destination_path: &AbsolutePathBuf,
|
||||
options: CopyOptions,
|
||||
) -> FileSystemResult<()>;
|
||||
|
||||
async fn file_metadata(&self, path: &AbsolutePathBuf) -> FileSystemResult<FileMetadata> {
|
||||
self.get_metadata(path).await
|
||||
}
|
||||
|
||||
async fn create_dir_all(&self, path: &AbsolutePathBuf) -> FileSystemResult<()> {
|
||||
self.create_directory(path, CreateDirectoryOptions { recursive: true })
|
||||
.await
|
||||
}
|
||||
|
||||
async fn remove_file(&self, path: &AbsolutePathBuf) -> FileSystemResult<()> {
|
||||
self.remove(
|
||||
path,
|
||||
RemoveOptions {
|
||||
recursive: false,
|
||||
force: false,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn remove_dir_all(&self, path: &AbsolutePathBuf) -> FileSystemResult<()> {
|
||||
self.remove(
|
||||
path,
|
||||
RemoveOptions {
|
||||
recursive: true,
|
||||
force: false,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_dir(&self, path: &AbsolutePathBuf) -> FileSystemResult<Vec<ReadDirectoryEntry>> {
|
||||
self.read_directory(path).await
|
||||
}
|
||||
|
||||
async fn symlink_metadata(&self, path: &AbsolutePathBuf) -> FileSystemResult<FileMetadata> {
|
||||
self.file_metadata(path).await
|
||||
}
|
||||
|
||||
async fn rename(&self, _from: &AbsolutePathBuf, _to: &AbsolutePathBuf) -> FileSystemResult<()> {
|
||||
Err(FsError(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"rename is not supported by this filesystem backend",
|
||||
)))
|
||||
}
|
||||
|
||||
async fn read_link(&self, _path: &AbsolutePathBuf) -> FileSystemResult<AbsolutePathBuf> {
|
||||
Err(FsError(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"read_link is not supported by this filesystem backend",
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct LocalFileSystem;
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct LocalFileSystem;
|
||||
|
||||
#[async_trait]
|
||||
impl ExecutorFileSystem for LocalFileSystem {
|
||||
async fn read_file(&self, path: &AbsolutePathBuf) -> FileSystemResult<Vec<u8>> {
|
||||
let metadata = tokio::fs::metadata(path.as_path()).await?;
|
||||
if metadata.len() > MAX_READ_FILE_BYTES {
|
||||
return Err(io::Error::new(
|
||||
return Err(FsError(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!("file is too large to read: limit is {MAX_READ_FILE_BYTES} bytes"),
|
||||
));
|
||||
)));
|
||||
}
|
||||
tokio::fs::read(path.as_path()).await
|
||||
Ok(tokio::fs::read(path.as_path()).await?)
|
||||
}
|
||||
|
||||
async fn write_file(&self, path: &AbsolutePathBuf, contents: Vec<u8>) -> FileSystemResult<()> {
|
||||
tokio::fs::write(path.as_path(), contents).await
|
||||
Ok(tokio::fs::write(path.as_path(), contents).await?)
|
||||
}
|
||||
|
||||
async fn create_directory(
|
||||
@@ -147,7 +223,7 @@ impl ExecutorFileSystem for LocalFileSystem {
|
||||
Ok(())
|
||||
}
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound && options.force => Ok(()),
|
||||
Err(err) => Err(err),
|
||||
Err(err) => Err(FsError(err)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,19 +241,19 @@ impl ExecutorFileSystem for LocalFileSystem {
|
||||
|
||||
if file_type.is_dir() {
|
||||
if !options.recursive {
|
||||
return Err(io::Error::new(
|
||||
return Err(FsError(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"fs/copy requires recursive: true when sourcePath is a directory",
|
||||
));
|
||||
)));
|
||||
}
|
||||
if destination_is_same_or_descendant_of_source(
|
||||
source_path.as_path(),
|
||||
destination_path.as_path(),
|
||||
)? {
|
||||
return Err(io::Error::new(
|
||||
return Err(FsError(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"fs/copy cannot copy a directory to itself or one of its descendants",
|
||||
));
|
||||
)));
|
||||
}
|
||||
copy_dir_recursive(source_path.as_path(), destination_path.as_path())?;
|
||||
return Ok(());
|
||||
@@ -193,13 +269,192 @@ impl ExecutorFileSystem for LocalFileSystem {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(io::Error::new(
|
||||
Err(FsError(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"fs/copy only supports regular files, directories, and symlinks",
|
||||
))
|
||||
)))
|
||||
})
|
||||
.await
|
||||
.map_err(|err| io::Error::other(format!("filesystem task failed: {err}")))?
|
||||
.map_err(|err| FsError(io::Error::other(format!("filesystem task failed: {err}"))))?
|
||||
}
|
||||
|
||||
async fn file_metadata(&self, path: &AbsolutePathBuf) -> FileSystemResult<FileMetadata> {
|
||||
let metadata = tokio::fs::symlink_metadata(path.as_path()).await?;
|
||||
Ok(FileMetadata {
|
||||
is_directory: metadata.is_dir(),
|
||||
is_file: metadata.is_file(),
|
||||
created_at_ms: metadata.created().ok().map_or(0, system_time_to_unix_ms),
|
||||
modified_at_ms: metadata.modified().ok().map_or(0, system_time_to_unix_ms),
|
||||
})
|
||||
}
|
||||
|
||||
async fn symlink_metadata(&self, path: &AbsolutePathBuf) -> FileSystemResult<FileMetadata> {
|
||||
self.file_metadata(path).await
|
||||
}
|
||||
|
||||
async fn read_link(&self, path: &AbsolutePathBuf) -> FileSystemResult<AbsolutePathBuf> {
|
||||
let target = tokio::fs::read_link(path.as_path()).await?;
|
||||
AbsolutePathBuf::try_from(target)
|
||||
.map_err(io::Error::other)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn rename(&self, from: &AbsolutePathBuf, to: &AbsolutePathBuf) -> FileSystemResult<()> {
|
||||
tokio::fs::rename(from.as_path(), to.as_path()).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RemoteFileSystem {
|
||||
client: ExecServerClient,
|
||||
}
|
||||
|
||||
impl fmt::Debug for RemoteFileSystem {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RemoteFileSystem")
|
||||
.field("client", &"redacted")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl RemoteFileSystem {
|
||||
pub(crate) fn new(client: ExecServerClient) -> Self {
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ExecutorFileSystem for RemoteFileSystem {
|
||||
async fn read_file(&self, path: &AbsolutePathBuf) -> FileSystemResult<Vec<u8>> {
|
||||
let response = self
|
||||
.client
|
||||
.fs_read_file(FsReadFileParams { path: path.clone() })
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
STANDARD
|
||||
.decode(response.data_base64)
|
||||
.map_err(|err| FsError(io::Error::new(io::ErrorKind::InvalidData, err)))
|
||||
}
|
||||
|
||||
async fn write_file(&self, path: &AbsolutePathBuf, contents: Vec<u8>) -> FileSystemResult<()> {
|
||||
self.client
|
||||
.fs_write_file(FsWriteFileParams {
|
||||
path: path.clone(),
|
||||
data_base64: STANDARD.encode(contents),
|
||||
})
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_directory(
|
||||
&self,
|
||||
path: &AbsolutePathBuf,
|
||||
options: CreateDirectoryOptions,
|
||||
) -> FileSystemResult<()> {
|
||||
self.client
|
||||
.fs_create_directory(FsCreateDirectoryParams {
|
||||
path: path.clone(),
|
||||
recursive: Some(options.recursive),
|
||||
})
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_metadata(&self, path: &AbsolutePathBuf) -> FileSystemResult<FileMetadata> {
|
||||
let response = self
|
||||
.client
|
||||
.fs_get_metadata(FsGetMetadataParams { path: path.clone() })
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
Ok(FileMetadata {
|
||||
is_directory: response.is_directory,
|
||||
is_file: response.is_file,
|
||||
created_at_ms: response.created_at_ms,
|
||||
modified_at_ms: response.modified_at_ms,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_directory(
|
||||
&self,
|
||||
path: &AbsolutePathBuf,
|
||||
) -> FileSystemResult<Vec<ReadDirectoryEntry>> {
|
||||
let response = self
|
||||
.client
|
||||
.fs_read_directory(FsReadDirectoryParams { path: path.clone() })
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
Ok(response
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|entry| ReadDirectoryEntry {
|
||||
file_name: entry.file_name,
|
||||
is_directory: entry.is_directory,
|
||||
is_file: entry.is_file,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn remove(&self, path: &AbsolutePathBuf, options: RemoveOptions) -> FileSystemResult<()> {
|
||||
self.client
|
||||
.fs_remove(FsRemoveParams {
|
||||
path: path.clone(),
|
||||
recursive: Some(options.recursive),
|
||||
force: Some(options.force),
|
||||
})
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn copy(
|
||||
&self,
|
||||
source_path: &AbsolutePathBuf,
|
||||
destination_path: &AbsolutePathBuf,
|
||||
options: CopyOptions,
|
||||
) -> FileSystemResult<()> {
|
||||
self.client
|
||||
.fs_copy(FsCopyParams {
|
||||
source_path: source_path.clone(),
|
||||
destination_path: destination_path.clone(),
|
||||
recursive: options.recursive,
|
||||
})
|
||||
.await
|
||||
.map_err(map_exec_server_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_link(&self, _path: &AbsolutePathBuf) -> FileSystemResult<AbsolutePathBuf> {
|
||||
Err(FsError(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"read_link is not supported by remote exec-server filesystem",
|
||||
)))
|
||||
}
|
||||
|
||||
async fn symlink_metadata(&self, _path: &AbsolutePathBuf) -> FileSystemResult<FileMetadata> {
|
||||
Err(FsError(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"symlink_metadata is not supported by remote exec-server filesystem",
|
||||
)))
|
||||
}
|
||||
|
||||
async fn rename(&self, _from: &AbsolutePathBuf, _to: &AbsolutePathBuf) -> FileSystemResult<()> {
|
||||
Err(FsError(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"rename is not supported by remote exec-server filesystem",
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn map_exec_server_error(err: ExecServerError) -> FsError {
|
||||
match err {
|
||||
ExecServerError::Server {
|
||||
code: -32600 | -32602,
|
||||
message,
|
||||
} => io::Error::new(io::ErrorKind::InvalidInput, message).into(),
|
||||
other => io::Error::other(other.to_string()).into(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@ mod client;
|
||||
mod client_api;
|
||||
mod connection;
|
||||
mod environment;
|
||||
mod executor;
|
||||
mod fs;
|
||||
mod local;
|
||||
mod protocol;
|
||||
mod rpc;
|
||||
mod server;
|
||||
@@ -10,17 +12,64 @@ mod server;
|
||||
pub use client::ExecServerClient;
|
||||
pub use client::ExecServerError;
|
||||
pub use client_api::ExecServerClientConnectOptions;
|
||||
pub use client_api::ExecServerEvent;
|
||||
pub use client_api::RemoteExecServerConnectArgs;
|
||||
pub use codex_app_server_protocol::FsCopyParams;
|
||||
pub use codex_app_server_protocol::FsCopyResponse;
|
||||
pub use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
pub use codex_app_server_protocol::FsCreateDirectoryResponse;
|
||||
pub use codex_app_server_protocol::FsGetMetadataParams;
|
||||
pub use codex_app_server_protocol::FsGetMetadataResponse;
|
||||
pub use codex_app_server_protocol::FsReadDirectoryEntry;
|
||||
pub use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
pub use codex_app_server_protocol::FsReadDirectoryResponse;
|
||||
pub use codex_app_server_protocol::FsReadFileParams;
|
||||
pub use codex_app_server_protocol::FsReadFileResponse;
|
||||
pub use codex_app_server_protocol::FsRemoveParams;
|
||||
pub use codex_app_server_protocol::FsRemoveResponse;
|
||||
pub use codex_app_server_protocol::FsWriteFileParams;
|
||||
pub use codex_app_server_protocol::FsWriteFileResponse;
|
||||
pub use environment::Environment;
|
||||
pub use environment::EnvironmentError;
|
||||
pub use executor::ExecError;
|
||||
pub use executor::ExecExit;
|
||||
pub use executor::ExecOutputEvent;
|
||||
pub use executor::ExecSession;
|
||||
pub use executor::ExecSpawnRequest;
|
||||
pub use executor::Executor;
|
||||
pub use executor::InheritedFd;
|
||||
pub use executor::LocalExecSession;
|
||||
pub use executor::LocalExecutor;
|
||||
pub use executor::PtySize;
|
||||
pub use executor::RemoteExecSession;
|
||||
pub use executor::RemoteExecutor;
|
||||
pub use executor::SandboxKind;
|
||||
pub use fs::CopyOptions;
|
||||
pub use fs::CreateDirectoryOptions;
|
||||
pub use fs::ExecutorFileSystem;
|
||||
pub use fs::FileMetadata;
|
||||
pub use fs::FileSystemResult;
|
||||
pub use fs::FsError;
|
||||
pub use fs::LocalFileSystem;
|
||||
pub use fs::ReadDirectoryEntry;
|
||||
pub use fs::RemoteFileSystem;
|
||||
pub use fs::RemoveOptions;
|
||||
pub use local::ExecServerLaunchCommand;
|
||||
pub use local::SpawnedExecServer;
|
||||
pub use local::spawn_local_exec_server;
|
||||
pub use protocol::ExecExitedNotification;
|
||||
pub use protocol::ExecOutputDeltaNotification;
|
||||
pub use protocol::ExecOutputStream;
|
||||
pub use protocol::ExecParams;
|
||||
pub use protocol::ExecResponse;
|
||||
pub use protocol::InitializeParams;
|
||||
pub use protocol::InitializeResponse;
|
||||
pub use protocol::ReadParams;
|
||||
pub use protocol::ReadResponse;
|
||||
pub use protocol::TerminateParams;
|
||||
pub use protocol::TerminateResponse;
|
||||
pub use protocol::WriteParams;
|
||||
pub use protocol::WriteResponse;
|
||||
pub use server::DEFAULT_LISTEN_URL;
|
||||
pub use server::ExecServerListenUrlParseError;
|
||||
pub use server::run_main;
|
||||
|
||||
109
codex-rs/exec-server/src/local.rs
Normal file
109
codex-rs/exec-server/src/local.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use std::net::TcpListener;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Stdio;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::process::Child;
|
||||
use tokio::process::Command;
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::client::ExecServerClient;
|
||||
use crate::client::ExecServerError;
|
||||
use crate::client_api::ExecServerClientConnectOptions;
|
||||
use crate::client_api::RemoteExecServerConnectArgs;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ExecServerLaunchCommand {
|
||||
pub program: PathBuf,
|
||||
pub args: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct SpawnedExecServer {
|
||||
client: ExecServerClient,
|
||||
child: StdMutex<Option<Child>>,
|
||||
}
|
||||
|
||||
const CONNECT_RETRY_INTERVAL: Duration = Duration::from_millis(25);
|
||||
|
||||
impl SpawnedExecServer {
|
||||
pub fn client(&self) -> &ExecServerClient {
|
||||
&self.client
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SpawnedExecServer {
|
||||
fn drop(&mut self) {
|
||||
if let Ok(mut child_guard) = self.child.lock()
|
||||
&& let Some(child) = child_guard.as_mut()
|
||||
{
|
||||
let _ = child.start_kill();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn spawn_local_exec_server(
|
||||
command: ExecServerLaunchCommand,
|
||||
options: ExecServerClientConnectOptions,
|
||||
) -> Result<SpawnedExecServer, ExecServerError> {
|
||||
let websocket_url = reserve_websocket_url().map_err(ExecServerError::Spawn)?;
|
||||
|
||||
let mut child = Command::new(&command.program);
|
||||
child.args(&command.args);
|
||||
child.args(["--listen", &websocket_url]);
|
||||
child.stdin(Stdio::null());
|
||||
child.stdout(Stdio::null());
|
||||
child.stderr(Stdio::inherit());
|
||||
child.kill_on_drop(true);
|
||||
|
||||
let mut child = child.spawn().map_err(ExecServerError::Spawn)?;
|
||||
let connect_args = RemoteExecServerConnectArgs {
|
||||
websocket_url,
|
||||
client_name: options.client_name.clone(),
|
||||
connect_timeout: options.initialize_timeout,
|
||||
initialize_timeout: options.initialize_timeout,
|
||||
};
|
||||
|
||||
let client = match connect_when_ready(connect_args).await {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
let _ = child.start_kill();
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(SpawnedExecServer {
|
||||
client,
|
||||
child: StdMutex::new(Some(child)),
|
||||
})
|
||||
}
|
||||
|
||||
fn reserve_websocket_url() -> std::io::Result<String> {
|
||||
let listener = TcpListener::bind("127.0.0.1:0")?;
|
||||
let addr = listener.local_addr()?;
|
||||
drop(listener);
|
||||
Ok(format!("ws://{addr}"))
|
||||
}
|
||||
|
||||
async fn connect_when_ready(
|
||||
args: RemoteExecServerConnectArgs,
|
||||
) -> Result<ExecServerClient, ExecServerError> {
|
||||
let deadline = Instant::now() + args.connect_timeout;
|
||||
loop {
|
||||
match ExecServerClient::connect_websocket(args.clone()).await {
|
||||
Ok(client) => return Ok(client),
|
||||
Err(ExecServerError::WebSocketConnect { source, .. })
|
||||
if Instant::now() < deadline
|
||||
&& matches!(
|
||||
source,
|
||||
tokio_tungstenite::tungstenite::Error::Io(ref io_err)
|
||||
if io_err.kind() == std::io::ErrorKind::ConnectionRefused
|
||||
) =>
|
||||
{
|
||||
sleep(CONNECT_RETRY_INTERVAL).await;
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,41 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
pub const INITIALIZE_METHOD: &str = "initialize";
|
||||
pub const INITIALIZED_METHOD: &str = "initialized";
|
||||
pub const EXEC_METHOD: &str = "process/start";
|
||||
pub const EXEC_READ_METHOD: &str = "process/read";
|
||||
pub const EXEC_WRITE_METHOD: &str = "process/write";
|
||||
pub const EXEC_TERMINATE_METHOD: &str = "process/terminate";
|
||||
pub const EXEC_OUTPUT_DELTA_METHOD: &str = "process/output";
|
||||
pub const EXEC_EXITED_METHOD: &str = "process/exited";
|
||||
pub const FS_READ_FILE_METHOD: &str = "fs/readFile";
|
||||
pub const FS_WRITE_FILE_METHOD: &str = "fs/writeFile";
|
||||
pub const FS_CREATE_DIRECTORY_METHOD: &str = "fs/createDirectory";
|
||||
pub const FS_GET_METADATA_METHOD: &str = "fs/getMetadata";
|
||||
pub const FS_READ_DIRECTORY_METHOD: &str = "fs/readDirectory";
|
||||
pub const FS_REMOVE_METHOD: &str = "fs/remove";
|
||||
pub const FS_COPY_METHOD: &str = "fs/copy";
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct ByteChunk(#[serde(with = "base64_bytes")] pub Vec<u8>);
|
||||
|
||||
impl ByteChunk {
|
||||
pub fn into_inner(self) -> Vec<u8> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ByteChunk {
|
||||
fn from(value: Vec<u8>) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -13,3 +46,121 @@ pub struct InitializeParams {
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InitializeResponse {}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecParams {
|
||||
/// Client-chosen logical process handle scoped to this connection/session.
|
||||
/// This is a protocol key, not an OS pid.
|
||||
pub process_id: String,
|
||||
pub argv: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub env: HashMap<String, String>,
|
||||
pub tty: bool,
|
||||
pub arg0: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecResponse {
|
||||
pub process_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ReadParams {
|
||||
pub process_id: String,
|
||||
pub after_seq: Option<u64>,
|
||||
pub max_bytes: Option<usize>,
|
||||
pub wait_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ProcessOutputChunk {
|
||||
pub seq: u64,
|
||||
pub stream: ExecOutputStream,
|
||||
pub chunk: ByteChunk,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ReadResponse {
|
||||
pub chunks: Vec<ProcessOutputChunk>,
|
||||
pub next_seq: u64,
|
||||
pub exited: bool,
|
||||
pub exit_code: Option<i32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WriteParams {
|
||||
pub process_id: String,
|
||||
pub chunk: ByteChunk,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WriteResponse {
|
||||
pub accepted: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TerminateParams {
|
||||
pub process_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TerminateResponse {
|
||||
pub running: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ExecOutputStream {
|
||||
Stdout,
|
||||
Stderr,
|
||||
Pty,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecOutputDeltaNotification {
|
||||
pub process_id: String,
|
||||
pub stream: ExecOutputStream,
|
||||
pub chunk: ByteChunk,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecExitedNotification {
|
||||
pub process_id: String,
|
||||
pub exit_code: i32,
|
||||
}
|
||||
|
||||
mod base64_bytes {
|
||||
use super::BASE64_STANDARD;
|
||||
use base64::Engine as _;
|
||||
use serde::Deserialize;
|
||||
use serde::Deserializer;
|
||||
use serde::Serializer;
|
||||
|
||||
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&BASE64_STANDARD.encode(bytes))
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let encoded = String::deserialize(deserializer)?;
|
||||
BASE64_STANDARD
|
||||
.decode(encoded)
|
||||
.map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::atomic::Ordering;
|
||||
@@ -23,6 +25,11 @@ use crate::connection::JsonRpcConnection;
|
||||
use crate::connection::JsonRpcConnectionEvent;
|
||||
|
||||
type PendingRequest = oneshot::Sender<Result<Value, JSONRPCErrorError>>;
|
||||
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send + 'static>>;
|
||||
type RequestRoute<S> =
|
||||
Box<dyn Fn(Arc<S>, JSONRPCRequest) -> BoxFuture<RpcServerOutboundMessage> + Send + Sync>;
|
||||
type NotificationRoute<S> =
|
||||
Box<dyn Fn(Arc<S>, JSONRPCNotification) -> BoxFuture<Result<(), String>> + Send + Sync>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum RpcClientEvent {
|
||||
@@ -30,6 +37,139 @@ pub(crate) enum RpcClientEvent {
|
||||
Disconnected { reason: Option<String> },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) enum RpcServerOutboundMessage {
|
||||
Response {
|
||||
request_id: RequestId,
|
||||
result: Value,
|
||||
},
|
||||
Error {
|
||||
request_id: RequestId,
|
||||
error: JSONRPCErrorError,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Notification(JSONRPCNotification),
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct RpcNotificationSender {
|
||||
outgoing_tx: mpsc::Sender<RpcServerOutboundMessage>,
|
||||
}
|
||||
|
||||
impl RpcNotificationSender {
|
||||
pub(crate) fn new(outgoing_tx: mpsc::Sender<RpcServerOutboundMessage>) -> Self {
|
||||
Self { outgoing_tx }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn notify<P: Serialize>(
|
||||
&self,
|
||||
method: &str,
|
||||
params: &P,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let params = serde_json::to_value(params).map_err(|err| internal_error(err.to_string()))?;
|
||||
self.outgoing_tx
|
||||
.send(RpcServerOutboundMessage::Notification(
|
||||
JSONRPCNotification {
|
||||
method: method.to_string(),
|
||||
params: Some(params),
|
||||
},
|
||||
))
|
||||
.await
|
||||
.map_err(|_| internal_error("RPC connection closed while sending notification".into()))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RpcRouter<S> {
|
||||
request_routes: HashMap<&'static str, RequestRoute<S>>,
|
||||
notification_routes: HashMap<&'static str, NotificationRoute<S>>,
|
||||
}
|
||||
|
||||
impl<S> Default for RpcRouter<S> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
request_routes: HashMap::new(),
|
||||
notification_routes: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> RpcRouter<S>
|
||||
where
|
||||
S: Send + Sync + 'static,
|
||||
{
|
||||
pub(crate) fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub(crate) fn request<P, R, F, Fut>(&mut self, method: &'static str, handler: F)
|
||||
where
|
||||
P: DeserializeOwned + Send + 'static,
|
||||
R: Serialize + Send + 'static,
|
||||
F: Fn(Arc<S>, P) -> Fut + Send + Sync + 'static,
|
||||
Fut: Future<Output = Result<R, JSONRPCErrorError>> + Send + 'static,
|
||||
{
|
||||
self.request_routes.insert(
|
||||
method,
|
||||
Box::new(move |state, request| {
|
||||
let request_id = request.id;
|
||||
let params = request.params;
|
||||
let response =
|
||||
decode_request_params::<P>(params).map(|params| handler(state, params));
|
||||
Box::pin(async move {
|
||||
let response = match response {
|
||||
Ok(response) => response.await,
|
||||
Err(error) => {
|
||||
return RpcServerOutboundMessage::Error { request_id, error };
|
||||
}
|
||||
};
|
||||
match response {
|
||||
Ok(result) => match serde_json::to_value(result) {
|
||||
Ok(result) => RpcServerOutboundMessage::Response { request_id, result },
|
||||
Err(err) => RpcServerOutboundMessage::Error {
|
||||
request_id,
|
||||
error: internal_error(err.to_string()),
|
||||
},
|
||||
},
|
||||
Err(error) => RpcServerOutboundMessage::Error { request_id, error },
|
||||
}
|
||||
})
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn notification<P, F, Fut>(&mut self, method: &'static str, handler: F)
|
||||
where
|
||||
P: DeserializeOwned + Send + 'static,
|
||||
F: Fn(Arc<S>, P) -> Fut + Send + Sync + 'static,
|
||||
Fut: Future<Output = Result<(), String>> + Send + 'static,
|
||||
{
|
||||
self.notification_routes.insert(
|
||||
method,
|
||||
Box::new(move |state, notification| {
|
||||
let params = decode_notification_params::<P>(notification.params)
|
||||
.map(|params| handler(state, params));
|
||||
Box::pin(async move {
|
||||
let handler = match params {
|
||||
Ok(handler) => handler,
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
handler.await
|
||||
})
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn request_route(&self, method: &str) -> Option<&RequestRoute<S>> {
|
||||
self.request_routes.get(method)
|
||||
}
|
||||
|
||||
pub(crate) fn notification_route(&self, method: &str) -> Option<&NotificationRoute<S>> {
|
||||
self.notification_routes.get(method)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RpcClient {
|
||||
write_tx: mpsc::Sender<JSONRPCMessage>,
|
||||
pending: Arc<Mutex<HashMap<RequestId, PendingRequest>>>,
|
||||
@@ -57,14 +197,8 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
JsonRpcConnectionEvent::MalformedMessage { reason } => {
|
||||
warn!("JSON-RPC client closing after malformed server message: {reason}");
|
||||
let _ = event_tx
|
||||
.send(RpcClientEvent::Disconnected {
|
||||
reason: Some(reason),
|
||||
})
|
||||
.await;
|
||||
drain_pending(&pending_for_reader).await;
|
||||
return;
|
||||
warn!("JSON-RPC client closing after malformed message: {reason}");
|
||||
break;
|
||||
}
|
||||
JsonRpcConnectionEvent::Disconnected { reason } => {
|
||||
let _ = event_tx.send(RpcClientEvent::Disconnected { reason }).await;
|
||||
@@ -177,6 +311,91 @@ pub(crate) enum RpcCallError {
|
||||
Server(JSONRPCErrorError),
|
||||
}
|
||||
|
||||
pub(crate) fn encode_server_message(
|
||||
message: RpcServerOutboundMessage,
|
||||
) -> Result<JSONRPCMessage, serde_json::Error> {
|
||||
match message {
|
||||
RpcServerOutboundMessage::Response { request_id, result } => {
|
||||
Ok(JSONRPCMessage::Response(JSONRPCResponse {
|
||||
id: request_id,
|
||||
result,
|
||||
}))
|
||||
}
|
||||
RpcServerOutboundMessage::Error { request_id, error } => {
|
||||
Ok(JSONRPCMessage::Error(JSONRPCError {
|
||||
id: request_id,
|
||||
error,
|
||||
}))
|
||||
}
|
||||
RpcServerOutboundMessage::Notification(notification) => {
|
||||
Ok(JSONRPCMessage::Notification(notification))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn invalid_request(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32600,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn method_not_found(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32601,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn invalid_params(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32602,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn internal_error(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32603,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_request_params<P>(params: Option<Value>) -> Result<P, JSONRPCErrorError>
|
||||
where
|
||||
P: DeserializeOwned,
|
||||
{
|
||||
decode_params(params).map_err(|err| invalid_params(err.to_string()))
|
||||
}
|
||||
|
||||
fn decode_notification_params<P>(params: Option<Value>) -> Result<P, String>
|
||||
where
|
||||
P: DeserializeOwned,
|
||||
{
|
||||
decode_params(params).map_err(|err| err.to_string())
|
||||
}
|
||||
|
||||
fn decode_params<P>(params: Option<Value>) -> Result<P, serde_json::Error>
|
||||
where
|
||||
P: DeserializeOwned,
|
||||
{
|
||||
let params = params.unwrap_or(Value::Null);
|
||||
match serde_json::from_value(params.clone()) {
|
||||
Ok(params) => Ok(params),
|
||||
Err(err) => {
|
||||
if matches!(params, Value::Object(ref map) if map.is_empty()) {
|
||||
serde_json::from_value(Value::Null).map_err(|_| err)
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_server_message(
|
||||
pending: &Mutex<HashMap<RequestId, PendingRequest>>,
|
||||
event_tx: &mpsc::Sender<RpcClientEvent>,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
mod filesystem;
|
||||
mod handler;
|
||||
mod jsonrpc;
|
||||
mod processor;
|
||||
mod registry;
|
||||
mod transport;
|
||||
|
||||
pub(crate) use handler::ExecServerHandler;
|
||||
|
||||
170
codex-rs/exec-server/src/server/filesystem.rs
Normal file
170
codex-rs/exec-server/src/server/filesystem.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
use codex_app_server_protocol::FsCopyResponse;
|
||||
use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
use codex_app_server_protocol::FsCreateDirectoryResponse;
|
||||
use codex_app_server_protocol::FsGetMetadataParams;
|
||||
use codex_app_server_protocol::FsGetMetadataResponse;
|
||||
use codex_app_server_protocol::FsReadDirectoryEntry;
|
||||
use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
use codex_app_server_protocol::FsReadDirectoryResponse;
|
||||
use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsReadFileResponse;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsRemoveResponse;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_app_server_protocol::FsWriteFileResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
|
||||
use crate::environment::Environment;
|
||||
use crate::fs::CopyOptions;
|
||||
use crate::fs::CreateDirectoryOptions;
|
||||
use crate::fs::ExecutorFileSystem;
|
||||
use crate::fs::FsError;
|
||||
use crate::fs::RemoveOptions;
|
||||
use crate::rpc::internal_error;
|
||||
use crate::rpc::invalid_request;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ExecServerFileSystem {
|
||||
file_system: Arc<dyn ExecutorFileSystem>,
|
||||
}
|
||||
|
||||
impl Default for ExecServerFileSystem {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
file_system: Environment::default().get_filesystem(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecServerFileSystem {
|
||||
pub(crate) async fn read_file(
|
||||
&self,
|
||||
params: FsReadFileParams,
|
||||
) -> Result<FsReadFileResponse, JSONRPCErrorError> {
|
||||
let bytes = self
|
||||
.file_system
|
||||
.read_file(¶ms.path)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsReadFileResponse {
|
||||
data_base64: STANDARD.encode(bytes),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn write_file(
|
||||
&self,
|
||||
params: FsWriteFileParams,
|
||||
) -> Result<FsWriteFileResponse, JSONRPCErrorError> {
|
||||
let bytes = STANDARD.decode(params.data_base64).map_err(|err| {
|
||||
invalid_request(format!(
|
||||
"fs/writeFile requires valid base64 dataBase64: {err}"
|
||||
))
|
||||
})?;
|
||||
self.file_system
|
||||
.write_file(¶ms.path, bytes)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsWriteFileResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn create_directory(
|
||||
&self,
|
||||
params: FsCreateDirectoryParams,
|
||||
) -> Result<FsCreateDirectoryResponse, JSONRPCErrorError> {
|
||||
self.file_system
|
||||
.create_directory(
|
||||
¶ms.path,
|
||||
CreateDirectoryOptions {
|
||||
recursive: params.recursive.unwrap_or(true),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsCreateDirectoryResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn get_metadata(
|
||||
&self,
|
||||
params: FsGetMetadataParams,
|
||||
) -> Result<FsGetMetadataResponse, JSONRPCErrorError> {
|
||||
let metadata = self
|
||||
.file_system
|
||||
.get_metadata(¶ms.path)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsGetMetadataResponse {
|
||||
is_directory: metadata.is_directory,
|
||||
is_file: metadata.is_file,
|
||||
created_at_ms: metadata.created_at_ms,
|
||||
modified_at_ms: metadata.modified_at_ms,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn read_directory(
|
||||
&self,
|
||||
params: FsReadDirectoryParams,
|
||||
) -> Result<FsReadDirectoryResponse, JSONRPCErrorError> {
|
||||
let entries = self
|
||||
.file_system
|
||||
.read_directory(¶ms.path)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsReadDirectoryResponse {
|
||||
entries: entries
|
||||
.into_iter()
|
||||
.map(|entry| FsReadDirectoryEntry {
|
||||
file_name: entry.file_name,
|
||||
is_directory: entry.is_directory,
|
||||
is_file: entry.is_file,
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn remove(
|
||||
&self,
|
||||
params: FsRemoveParams,
|
||||
) -> Result<FsRemoveResponse, JSONRPCErrorError> {
|
||||
self.file_system
|
||||
.remove(
|
||||
¶ms.path,
|
||||
RemoveOptions {
|
||||
recursive: params.recursive.unwrap_or(true),
|
||||
force: params.force.unwrap_or(true),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsRemoveResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn copy(
|
||||
&self,
|
||||
params: FsCopyParams,
|
||||
) -> Result<FsCopyResponse, JSONRPCErrorError> {
|
||||
self.file_system
|
||||
.copy(
|
||||
¶ms.source_path,
|
||||
¶ms.destination_path,
|
||||
CopyOptions {
|
||||
recursive: params.recursive,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsCopyResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
fn map_fs_error(err: FsError) -> JSONRPCErrorError {
|
||||
if err.0.kind() == std::io::ErrorKind::InvalidInput {
|
||||
invalid_request(err.0.to_string())
|
||||
} else {
|
||||
internal_error(err.0.to_string())
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,112 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
use codex_app_server_protocol::FsCopyResponse;
|
||||
use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
use codex_app_server_protocol::FsCreateDirectoryResponse;
|
||||
use codex_app_server_protocol::FsGetMetadataParams;
|
||||
use codex_app_server_protocol::FsGetMetadataResponse;
|
||||
use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
use codex_app_server_protocol::FsReadDirectoryResponse;
|
||||
use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsReadFileResponse;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsRemoveResponse;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_app_server_protocol::FsWriteFileResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_utils_pty::ExecCommandSession;
|
||||
use codex_utils_pty::TerminalSize;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Notify;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::protocol::ExecExitedNotification;
|
||||
use crate::protocol::ExecOutputDeltaNotification;
|
||||
use crate::protocol::ExecOutputStream;
|
||||
use crate::protocol::ExecParams;
|
||||
use crate::protocol::ExecResponse;
|
||||
use crate::protocol::InitializeResponse;
|
||||
use crate::server::jsonrpc::invalid_request;
|
||||
use crate::protocol::ProcessOutputChunk;
|
||||
use crate::protocol::ReadParams;
|
||||
use crate::protocol::ReadResponse;
|
||||
use crate::protocol::TerminateParams;
|
||||
use crate::protocol::TerminateResponse;
|
||||
use crate::protocol::WriteParams;
|
||||
use crate::protocol::WriteResponse;
|
||||
use crate::rpc::RpcNotificationSender;
|
||||
use crate::rpc::internal_error;
|
||||
use crate::rpc::invalid_params;
|
||||
use crate::rpc::invalid_request;
|
||||
use crate::server::filesystem::ExecServerFileSystem;
|
||||
|
||||
const RETAINED_OUTPUT_BYTES_PER_PROCESS: usize = 1024 * 1024;
|
||||
#[cfg(test)]
|
||||
const EXITED_PROCESS_RETENTION: Duration = Duration::from_millis(25);
|
||||
#[cfg(not(test))]
|
||||
const EXITED_PROCESS_RETENTION: Duration = Duration::from_secs(30);
|
||||
|
||||
#[derive(Clone)]
|
||||
struct RetainedOutputChunk {
|
||||
seq: u64,
|
||||
stream: ExecOutputStream,
|
||||
chunk: Vec<u8>,
|
||||
}
|
||||
|
||||
struct RunningProcess {
|
||||
session: ExecCommandSession,
|
||||
tty: bool,
|
||||
output: VecDeque<RetainedOutputChunk>,
|
||||
retained_bytes: usize,
|
||||
next_seq: u64,
|
||||
exit_code: Option<i32>,
|
||||
output_notify: Arc<Notify>,
|
||||
}
|
||||
|
||||
enum ProcessEntry {
|
||||
Starting,
|
||||
Running(Box<RunningProcess>),
|
||||
}
|
||||
|
||||
pub(crate) struct ExecServerHandler {
|
||||
notifications: RpcNotificationSender,
|
||||
file_system: ExecServerFileSystem,
|
||||
processes: Arc<Mutex<HashMap<String, ProcessEntry>>>,
|
||||
initialize_requested: AtomicBool,
|
||||
initialized: AtomicBool,
|
||||
}
|
||||
|
||||
impl ExecServerHandler {
|
||||
pub(crate) fn new() -> Self {
|
||||
pub(crate) fn new(notifications: RpcNotificationSender) -> Self {
|
||||
Self {
|
||||
notifications,
|
||||
file_system: ExecServerFileSystem::default(),
|
||||
processes: Arc::new(Mutex::new(HashMap::new())),
|
||||
initialize_requested: AtomicBool::new(false),
|
||||
initialized: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn shutdown(&self) {}
|
||||
pub(crate) async fn shutdown(&self) {
|
||||
let remaining = {
|
||||
let mut processes = self.processes.lock().await;
|
||||
processes
|
||||
.drain()
|
||||
.filter_map(|(_, process)| match process {
|
||||
ProcessEntry::Starting => None,
|
||||
ProcessEntry::Running(process) => Some(process),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
for process in remaining {
|
||||
process.session.terminate();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn initialize(&self) -> Result<InitializeResponse, JSONRPCErrorError> {
|
||||
if self.initialize_requested.swap(true, Ordering::SeqCst) {
|
||||
@@ -37,4 +124,391 @@ impl ExecServerHandler {
|
||||
self.initialized.store(true, Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn require_initialized_for(&self, method_family: &str) -> Result<(), JSONRPCErrorError> {
|
||||
if !self.initialize_requested.load(Ordering::SeqCst) {
|
||||
return Err(invalid_request(format!(
|
||||
"client must call initialize before using {method_family} methods"
|
||||
)));
|
||||
}
|
||||
if !self.initialized.load(Ordering::SeqCst) {
|
||||
return Err(invalid_request(format!(
|
||||
"client must send initialized before using {method_family} methods"
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn exec(&self, params: ExecParams) -> Result<ExecResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("exec")?;
|
||||
let process_id = params.process_id.clone();
|
||||
|
||||
let (program, args) = params
|
||||
.argv
|
||||
.split_first()
|
||||
.ok_or_else(|| invalid_params("argv must not be empty".to_string()))?;
|
||||
|
||||
{
|
||||
let mut process_map = self.processes.lock().await;
|
||||
if process_map.contains_key(&process_id) {
|
||||
return Err(invalid_request(format!(
|
||||
"process {process_id} already exists"
|
||||
)));
|
||||
}
|
||||
process_map.insert(process_id.clone(), ProcessEntry::Starting);
|
||||
}
|
||||
|
||||
let spawned_result = if params.tty {
|
||||
codex_utils_pty::spawn_pty_process(
|
||||
program,
|
||||
args,
|
||||
params.cwd.as_path(),
|
||||
¶ms.env,
|
||||
¶ms.arg0,
|
||||
TerminalSize::default(),
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
codex_utils_pty::spawn_pipe_process_no_stdin(
|
||||
program,
|
||||
args,
|
||||
params.cwd.as_path(),
|
||||
¶ms.env,
|
||||
¶ms.arg0,
|
||||
)
|
||||
.await
|
||||
};
|
||||
let spawned = match spawned_result {
|
||||
Ok(spawned) => spawned,
|
||||
Err(err) => {
|
||||
let mut process_map = self.processes.lock().await;
|
||||
if matches!(process_map.get(&process_id), Some(ProcessEntry::Starting)) {
|
||||
process_map.remove(&process_id);
|
||||
}
|
||||
return Err(internal_error(err.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let output_notify = Arc::new(Notify::new());
|
||||
{
|
||||
let mut process_map = self.processes.lock().await;
|
||||
process_map.insert(
|
||||
process_id.clone(),
|
||||
ProcessEntry::Running(Box::new(RunningProcess {
|
||||
session: spawned.session,
|
||||
tty: params.tty,
|
||||
output: VecDeque::new(),
|
||||
retained_bytes: 0,
|
||||
next_seq: 1,
|
||||
exit_code: None,
|
||||
output_notify: Arc::clone(&output_notify),
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
tokio::spawn(stream_output(
|
||||
process_id.clone(),
|
||||
if params.tty {
|
||||
ExecOutputStream::Pty
|
||||
} else {
|
||||
ExecOutputStream::Stdout
|
||||
},
|
||||
spawned.stdout_rx,
|
||||
self.notifications.clone(),
|
||||
Arc::clone(&self.processes),
|
||||
Arc::clone(&output_notify),
|
||||
));
|
||||
tokio::spawn(stream_output(
|
||||
process_id.clone(),
|
||||
if params.tty {
|
||||
ExecOutputStream::Pty
|
||||
} else {
|
||||
ExecOutputStream::Stderr
|
||||
},
|
||||
spawned.stderr_rx,
|
||||
self.notifications.clone(),
|
||||
Arc::clone(&self.processes),
|
||||
Arc::clone(&output_notify),
|
||||
));
|
||||
tokio::spawn(watch_exit(
|
||||
process_id.clone(),
|
||||
spawned.exit_rx,
|
||||
self.notifications.clone(),
|
||||
Arc::clone(&self.processes),
|
||||
output_notify,
|
||||
));
|
||||
|
||||
Ok(ExecResponse { process_id })
|
||||
}
|
||||
|
||||
pub(crate) async fn exec_read(
|
||||
&self,
|
||||
params: ReadParams,
|
||||
) -> Result<ReadResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("exec")?;
|
||||
let after_seq = params.after_seq.unwrap_or(0);
|
||||
let max_bytes = params.max_bytes.unwrap_or(usize::MAX);
|
||||
let wait = Duration::from_millis(params.wait_ms.unwrap_or(0));
|
||||
let deadline = tokio::time::Instant::now() + wait;
|
||||
|
||||
loop {
|
||||
let (response, output_notify) = {
|
||||
let process_map = self.processes.lock().await;
|
||||
let process = process_map.get(¶ms.process_id).ok_or_else(|| {
|
||||
invalid_request(format!("unknown process id {}", params.process_id))
|
||||
})?;
|
||||
let ProcessEntry::Running(process) = process else {
|
||||
return Err(invalid_request(format!(
|
||||
"process id {} is starting",
|
||||
params.process_id
|
||||
)));
|
||||
};
|
||||
|
||||
let mut chunks = Vec::new();
|
||||
let mut total_bytes = 0;
|
||||
let mut next_seq = process.next_seq;
|
||||
for retained in process.output.iter().filter(|chunk| chunk.seq > after_seq) {
|
||||
let chunk_len = retained.chunk.len();
|
||||
if !chunks.is_empty() && total_bytes + chunk_len > max_bytes {
|
||||
break;
|
||||
}
|
||||
total_bytes += chunk_len;
|
||||
chunks.push(ProcessOutputChunk {
|
||||
seq: retained.seq,
|
||||
stream: retained.stream,
|
||||
chunk: retained.chunk.clone().into(),
|
||||
});
|
||||
next_seq = retained.seq + 1;
|
||||
if total_bytes >= max_bytes {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
(
|
||||
ReadResponse {
|
||||
chunks,
|
||||
next_seq,
|
||||
exited: process.exit_code.is_some(),
|
||||
exit_code: process.exit_code,
|
||||
},
|
||||
Arc::clone(&process.output_notify),
|
||||
)
|
||||
};
|
||||
|
||||
if !response.chunks.is_empty()
|
||||
|| response.exited
|
||||
|| tokio::time::Instant::now() >= deadline
|
||||
{
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
|
||||
if remaining.is_zero() {
|
||||
return Ok(response);
|
||||
}
|
||||
let _ = tokio::time::timeout(remaining, output_notify.notified()).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn exec_write(
|
||||
&self,
|
||||
params: WriteParams,
|
||||
) -> Result<WriteResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("exec")?;
|
||||
let writer_tx = {
|
||||
let process_map = self.processes.lock().await;
|
||||
let process = process_map.get(¶ms.process_id).ok_or_else(|| {
|
||||
invalid_request(format!("unknown process id {}", params.process_id))
|
||||
})?;
|
||||
let ProcessEntry::Running(process) = process else {
|
||||
return Err(invalid_request(format!(
|
||||
"process id {} is starting",
|
||||
params.process_id
|
||||
)));
|
||||
};
|
||||
if !process.tty {
|
||||
return Err(invalid_request(format!(
|
||||
"stdin is closed for process {}",
|
||||
params.process_id
|
||||
)));
|
||||
}
|
||||
process.session.writer_sender()
|
||||
};
|
||||
|
||||
writer_tx
|
||||
.send(params.chunk.into_inner())
|
||||
.await
|
||||
.map_err(|_| internal_error("failed to write to process stdin".to_string()))?;
|
||||
|
||||
Ok(WriteResponse { accepted: true })
|
||||
}
|
||||
|
||||
pub(crate) async fn terminate(
|
||||
&self,
|
||||
params: TerminateParams,
|
||||
) -> Result<TerminateResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("exec")?;
|
||||
let running = {
|
||||
let process_map = self.processes.lock().await;
|
||||
match process_map.get(¶ms.process_id) {
|
||||
Some(ProcessEntry::Running(process)) => {
|
||||
process.session.terminate();
|
||||
true
|
||||
}
|
||||
Some(ProcessEntry::Starting) | None => false,
|
||||
}
|
||||
};
|
||||
|
||||
Ok(TerminateResponse { running })
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_read_file(
|
||||
&self,
|
||||
params: FsReadFileParams,
|
||||
) -> Result<FsReadFileResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.read_file(params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_write_file(
|
||||
&self,
|
||||
params: FsWriteFileParams,
|
||||
) -> Result<FsWriteFileResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.write_file(params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_create_directory(
|
||||
&self,
|
||||
params: FsCreateDirectoryParams,
|
||||
) -> Result<FsCreateDirectoryResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.create_directory(params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_get_metadata(
|
||||
&self,
|
||||
params: FsGetMetadataParams,
|
||||
) -> Result<FsGetMetadataResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.get_metadata(params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_read_directory(
|
||||
&self,
|
||||
params: FsReadDirectoryParams,
|
||||
) -> Result<FsReadDirectoryResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.read_directory(params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_remove(
|
||||
&self,
|
||||
params: FsRemoveParams,
|
||||
) -> Result<FsRemoveResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.remove(params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_copy(
|
||||
&self,
|
||||
params: FsCopyParams,
|
||||
) -> Result<FsCopyResponse, JSONRPCErrorError> {
|
||||
self.require_initialized_for("filesystem")?;
|
||||
self.file_system.copy(params).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn stream_output(
|
||||
process_id: String,
|
||||
stream: ExecOutputStream,
|
||||
mut receiver: tokio::sync::mpsc::Receiver<Vec<u8>>,
|
||||
notifications: RpcNotificationSender,
|
||||
processes: Arc<Mutex<HashMap<String, ProcessEntry>>>,
|
||||
output_notify: Arc<Notify>,
|
||||
) {
|
||||
while let Some(chunk) = receiver.recv().await {
|
||||
let notification = {
|
||||
let mut processes = processes.lock().await;
|
||||
let Some(entry) = processes.get_mut(&process_id) else {
|
||||
break;
|
||||
};
|
||||
let ProcessEntry::Running(process) = entry else {
|
||||
break;
|
||||
};
|
||||
let seq = process.next_seq;
|
||||
process.next_seq += 1;
|
||||
process.retained_bytes += chunk.len();
|
||||
process.output.push_back(RetainedOutputChunk {
|
||||
seq,
|
||||
stream,
|
||||
chunk: chunk.clone(),
|
||||
});
|
||||
while process.retained_bytes > RETAINED_OUTPUT_BYTES_PER_PROCESS {
|
||||
let Some(evicted) = process.output.pop_front() else {
|
||||
break;
|
||||
};
|
||||
process.retained_bytes = process.retained_bytes.saturating_sub(evicted.chunk.len());
|
||||
warn!(
|
||||
"retained output cap exceeded for process {process_id}; dropping oldest output"
|
||||
);
|
||||
}
|
||||
ExecOutputDeltaNotification {
|
||||
process_id: process_id.clone(),
|
||||
stream,
|
||||
chunk: chunk.into(),
|
||||
}
|
||||
};
|
||||
output_notify.notify_waiters();
|
||||
|
||||
if notifications
|
||||
.notify(crate::protocol::EXEC_OUTPUT_DELTA_METHOD, ¬ification)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn watch_exit(
|
||||
process_id: String,
|
||||
exit_rx: tokio::sync::oneshot::Receiver<i32>,
|
||||
notifications: RpcNotificationSender,
|
||||
processes: Arc<Mutex<HashMap<String, ProcessEntry>>>,
|
||||
output_notify: Arc<Notify>,
|
||||
) {
|
||||
let exit_code = exit_rx.await.unwrap_or(-1);
|
||||
{
|
||||
let mut processes = processes.lock().await;
|
||||
if let Some(ProcessEntry::Running(process)) = processes.get_mut(&process_id) {
|
||||
process.exit_code = Some(exit_code);
|
||||
}
|
||||
}
|
||||
output_notify.notify_waiters();
|
||||
if notifications
|
||||
.notify(
|
||||
crate::protocol::EXEC_EXITED_METHOD,
|
||||
&ExecExitedNotification {
|
||||
process_id: process_id.clone(),
|
||||
exit_code,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
tokio::time::sleep(EXITED_PROCESS_RETENTION).await;
|
||||
let mut processes = processes.lock().await;
|
||||
if matches!(
|
||||
processes.get(&process_id),
|
||||
Some(ProcessEntry::Running(process)) if process.exit_code == Some(exit_code)
|
||||
) {
|
||||
processes.remove(&process_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
71
codex-rs/exec-server/src/server/handler/tests.rs
Normal file
71
codex-rs/exec-server/src/server/handler/tests.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use super::ExecServerHandler;
|
||||
use crate::protocol::ExecParams;
|
||||
use crate::protocol::InitializeResponse;
|
||||
use crate::rpc::RpcNotificationSender;
|
||||
|
||||
fn exec_params(process_id: &str) -> ExecParams {
|
||||
let mut env = HashMap::new();
|
||||
if let Some(path) = std::env::var_os("PATH") {
|
||||
env.insert("PATH".to_string(), path.to_string_lossy().into_owned());
|
||||
}
|
||||
ExecParams {
|
||||
process_id: process_id.to_string(),
|
||||
argv: vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"sleep 0.1".to_string(),
|
||||
],
|
||||
cwd: std::env::current_dir().expect("cwd"),
|
||||
env,
|
||||
tty: false,
|
||||
arg0: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn initialized_handler() -> Arc<ExecServerHandler> {
|
||||
let (outgoing_tx, _outgoing_rx) = mpsc::channel(16);
|
||||
let handler = Arc::new(ExecServerHandler::new(RpcNotificationSender::new(
|
||||
outgoing_tx,
|
||||
)));
|
||||
assert_eq!(
|
||||
handler.initialize().expect("initialize"),
|
||||
InitializeResponse {}
|
||||
);
|
||||
handler.initialized().expect("initialized");
|
||||
handler
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn duplicate_process_ids_allow_only_one_successful_start() {
|
||||
let handler = initialized_handler().await;
|
||||
let first_handler = Arc::clone(&handler);
|
||||
let second_handler = Arc::clone(&handler);
|
||||
|
||||
let (first, second) = tokio::join!(
|
||||
first_handler.exec(exec_params("proc-1")),
|
||||
second_handler.exec(exec_params("proc-1")),
|
||||
);
|
||||
|
||||
let (successes, failures): (Vec<_>, Vec<_>) =
|
||||
[first, second].into_iter().partition(Result::is_ok);
|
||||
assert_eq!(successes.len(), 1);
|
||||
assert_eq!(failures.len(), 1);
|
||||
|
||||
let error = failures
|
||||
.into_iter()
|
||||
.next()
|
||||
.expect("one failed request")
|
||||
.expect_err("expected duplicate process error");
|
||||
assert_eq!(error.code, -32600);
|
||||
assert_eq!(error.message, "process proc-1 already exists");
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(150)).await;
|
||||
handler.shutdown().await;
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use serde_json::Value;
|
||||
|
||||
pub(crate) fn invalid_request(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32600,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn invalid_params(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32602,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn method_not_found(message: String) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: -32601,
|
||||
data: None,
|
||||
message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn response_message(
|
||||
request_id: RequestId,
|
||||
result: Result<Value, JSONRPCErrorError>,
|
||||
) -> JSONRPCMessage {
|
||||
match result {
|
||||
Ok(result) => JSONRPCMessage::Response(JSONRPCResponse {
|
||||
id: request_id,
|
||||
result,
|
||||
}),
|
||||
Err(error) => JSONRPCMessage::Error(JSONRPCError {
|
||||
id: request_id,
|
||||
error,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn invalid_request_message(reason: String) -> JSONRPCMessage {
|
||||
JSONRPCMessage::Error(JSONRPCError {
|
||||
id: RequestId::Integer(-1),
|
||||
error: invalid_request(reason),
|
||||
})
|
||||
}
|
||||
@@ -1,53 +1,109 @@
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use tracing::debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::connection::JsonRpcConnection;
|
||||
use crate::connection::JsonRpcConnectionEvent;
|
||||
use crate::protocol::INITIALIZE_METHOD;
|
||||
use crate::protocol::INITIALIZED_METHOD;
|
||||
use crate::protocol::InitializeParams;
|
||||
use crate::server::ExecServerHandler;
|
||||
use crate::server::jsonrpc::invalid_params;
|
||||
use crate::server::jsonrpc::invalid_request_message;
|
||||
use crate::server::jsonrpc::method_not_found;
|
||||
use crate::server::jsonrpc::response_message;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::debug;
|
||||
use tracing::warn;
|
||||
|
||||
pub(crate) async fn run_connection(connection: JsonRpcConnection) {
|
||||
let (json_outgoing_tx, mut incoming_rx, _connection_tasks) = connection.into_parts();
|
||||
let handler = ExecServerHandler::new();
|
||||
use crate::connection::CHANNEL_CAPACITY;
|
||||
use crate::connection::JsonRpcConnection;
|
||||
use crate::connection::JsonRpcConnectionEvent;
|
||||
use crate::rpc::RpcNotificationSender;
|
||||
use crate::rpc::RpcServerOutboundMessage;
|
||||
use crate::rpc::encode_server_message;
|
||||
use crate::rpc::invalid_request;
|
||||
use crate::rpc::method_not_found;
|
||||
use crate::server::ExecServerHandler;
|
||||
use crate::server::registry::build_router;
|
||||
|
||||
while let Some(event) = incoming_rx.recv().await {
|
||||
match event {
|
||||
JsonRpcConnectionEvent::Message(message) => {
|
||||
let response = match handle_connection_message(&handler, message).await {
|
||||
Ok(response) => response,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"closing exec-server connection after protocol error: {err}"
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
let Some(response) = response else {
|
||||
continue;
|
||||
};
|
||||
if json_outgoing_tx.send(response).await.is_err() {
|
||||
pub(crate) async fn run_connection(connection: JsonRpcConnection) {
|
||||
let router = Arc::new(build_router());
|
||||
let (json_outgoing_tx, mut incoming_rx, connection_tasks) = connection.into_parts();
|
||||
let (outgoing_tx, mut outgoing_rx) =
|
||||
mpsc::channel::<RpcServerOutboundMessage>(CHANNEL_CAPACITY);
|
||||
let notifications = RpcNotificationSender::new(outgoing_tx.clone());
|
||||
let handler = Arc::new(ExecServerHandler::new(notifications));
|
||||
|
||||
let outbound_task = tokio::spawn(async move {
|
||||
while let Some(message) = outgoing_rx.recv().await {
|
||||
let json_message = match encode_server_message(message) {
|
||||
Ok(json_message) => json_message,
|
||||
Err(err) => {
|
||||
warn!("failed to serialize exec-server outbound message: {err}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
if json_outgoing_tx.send(json_message).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Process inbound events sequentially to preserve initialize/initialized ordering.
|
||||
while let Some(event) = incoming_rx.recv().await {
|
||||
match event {
|
||||
JsonRpcConnectionEvent::MalformedMessage { reason } => {
|
||||
warn!("ignoring malformed exec-server message: {reason}");
|
||||
if json_outgoing_tx
|
||||
.send(invalid_request_message(reason))
|
||||
if outgoing_tx
|
||||
.send(RpcServerOutboundMessage::Error {
|
||||
request_id: codex_app_server_protocol::RequestId::Integer(-1),
|
||||
error: invalid_request(reason),
|
||||
})
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
JsonRpcConnectionEvent::Message(message) => match message {
|
||||
codex_app_server_protocol::JSONRPCMessage::Request(request) => {
|
||||
if let Some(route) = router.request_route(request.method.as_str()) {
|
||||
let message = route(handler.clone(), request).await;
|
||||
if outgoing_tx.send(message).await.is_err() {
|
||||
break;
|
||||
}
|
||||
} else if outgoing_tx
|
||||
.send(RpcServerOutboundMessage::Error {
|
||||
request_id: request.id,
|
||||
error: method_not_found(format!(
|
||||
"exec-server stub does not implement `{}` yet",
|
||||
request.method
|
||||
)),
|
||||
})
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
codex_app_server_protocol::JSONRPCMessage::Notification(notification) => {
|
||||
let Some(route) = router.notification_route(notification.method.as_str())
|
||||
else {
|
||||
warn!(
|
||||
"closing exec-server connection after unexpected notification: {}",
|
||||
notification.method
|
||||
);
|
||||
break;
|
||||
};
|
||||
if let Err(err) = route(handler.clone(), notification).await {
|
||||
warn!("closing exec-server connection after protocol error: {err}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
codex_app_server_protocol::JSONRPCMessage::Response(response) => {
|
||||
warn!(
|
||||
"closing exec-server connection after unexpected client response: {:?}",
|
||||
response.id
|
||||
);
|
||||
break;
|
||||
}
|
||||
codex_app_server_protocol::JSONRPCMessage::Error(error) => {
|
||||
warn!(
|
||||
"closing exec-server connection after unexpected client error: {:?}",
|
||||
error.id
|
||||
);
|
||||
break;
|
||||
}
|
||||
},
|
||||
JsonRpcConnectionEvent::Disconnected { reason } => {
|
||||
if let Some(reason) = reason {
|
||||
debug!("exec-server connection disconnected: {reason}");
|
||||
@@ -58,64 +114,10 @@ pub(crate) async fn run_connection(connection: JsonRpcConnection) {
|
||||
}
|
||||
|
||||
handler.shutdown().await;
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_connection_message(
|
||||
handler: &ExecServerHandler,
|
||||
message: JSONRPCMessage,
|
||||
) -> Result<Option<JSONRPCMessage>, String> {
|
||||
match message {
|
||||
JSONRPCMessage::Request(request) => Ok(Some(dispatch_request(handler, request))),
|
||||
JSONRPCMessage::Notification(notification) => {
|
||||
handle_notification(handler, notification)?;
|
||||
Ok(None)
|
||||
}
|
||||
JSONRPCMessage::Response(response) => Err(format!(
|
||||
"unexpected client response for request id {:?}",
|
||||
response.id
|
||||
)),
|
||||
JSONRPCMessage::Error(error) => Err(format!(
|
||||
"unexpected client error for request id {:?}",
|
||||
error.id
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn dispatch_request(handler: &ExecServerHandler, request: JSONRPCRequest) -> JSONRPCMessage {
|
||||
let JSONRPCRequest {
|
||||
id,
|
||||
method,
|
||||
params,
|
||||
trace: _,
|
||||
} = request;
|
||||
|
||||
match method.as_str() {
|
||||
INITIALIZE_METHOD => {
|
||||
let result = serde_json::from_value::<InitializeParams>(
|
||||
params.unwrap_or(serde_json::Value::Null),
|
||||
)
|
||||
.map_err(|err| invalid_params(err.to_string()))
|
||||
.and_then(|_params| handler.initialize())
|
||||
.and_then(|response| {
|
||||
serde_json::to_value(response).map_err(|err| invalid_params(err.to_string()))
|
||||
});
|
||||
response_message(id, result)
|
||||
}
|
||||
other => response_message(
|
||||
id,
|
||||
Err(method_not_found(format!(
|
||||
"exec-server stub does not implement `{other}` yet"
|
||||
))),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_notification(
|
||||
handler: &ExecServerHandler,
|
||||
notification: JSONRPCNotification,
|
||||
) -> Result<(), String> {
|
||||
match notification.method.as_str() {
|
||||
INITIALIZED_METHOD => handler.initialized(),
|
||||
other => Err(format!("unexpected notification method: {other}")),
|
||||
drop(outgoing_tx);
|
||||
for task in connection_tasks {
|
||||
task.abort();
|
||||
let _ = task.await;
|
||||
}
|
||||
let _ = outbound_task.await;
|
||||
}
|
||||
|
||||
110
codex-rs/exec-server/src/server/registry.rs
Normal file
110
codex-rs/exec-server/src/server/registry.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::protocol::EXEC_METHOD;
|
||||
use crate::protocol::EXEC_READ_METHOD;
|
||||
use crate::protocol::EXEC_TERMINATE_METHOD;
|
||||
use crate::protocol::EXEC_WRITE_METHOD;
|
||||
use crate::protocol::ExecParams;
|
||||
use crate::protocol::FS_COPY_METHOD;
|
||||
use crate::protocol::FS_CREATE_DIRECTORY_METHOD;
|
||||
use crate::protocol::FS_GET_METADATA_METHOD;
|
||||
use crate::protocol::FS_READ_DIRECTORY_METHOD;
|
||||
use crate::protocol::FS_READ_FILE_METHOD;
|
||||
use crate::protocol::FS_REMOVE_METHOD;
|
||||
use crate::protocol::FS_WRITE_FILE_METHOD;
|
||||
use crate::protocol::INITIALIZE_METHOD;
|
||||
use crate::protocol::INITIALIZED_METHOD;
|
||||
use crate::protocol::InitializeParams;
|
||||
use crate::protocol::ReadParams;
|
||||
use crate::protocol::TerminateParams;
|
||||
use crate::protocol::WriteParams;
|
||||
use crate::rpc::RpcRouter;
|
||||
use crate::server::ExecServerHandler;
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
use codex_app_server_protocol::FsCreateDirectoryParams;
|
||||
use codex_app_server_protocol::FsGetMetadataParams;
|
||||
use codex_app_server_protocol::FsReadDirectoryParams;
|
||||
use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
|
||||
pub(crate) fn build_router() -> RpcRouter<ExecServerHandler> {
|
||||
let mut router = RpcRouter::new();
|
||||
router.request(
|
||||
INITIALIZE_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, _params: InitializeParams| async move {
|
||||
handler.initialize()
|
||||
},
|
||||
);
|
||||
router.notification(
|
||||
INITIALIZED_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, _params: serde_json::Value| async move {
|
||||
handler.initialized()
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
EXEC_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: ExecParams| async move { handler.exec(params).await },
|
||||
);
|
||||
router.request(
|
||||
EXEC_READ_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: ReadParams| async move {
|
||||
handler.exec_read(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
EXEC_WRITE_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: WriteParams| async move {
|
||||
handler.exec_write(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
EXEC_TERMINATE_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: TerminateParams| async move {
|
||||
handler.terminate(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_READ_FILE_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsReadFileParams| async move {
|
||||
handler.fs_read_file(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_WRITE_FILE_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsWriteFileParams| async move {
|
||||
handler.fs_write_file(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_CREATE_DIRECTORY_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsCreateDirectoryParams| async move {
|
||||
handler.fs_create_directory(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_GET_METADATA_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsGetMetadataParams| async move {
|
||||
handler.fs_get_metadata(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_READ_DIRECTORY_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsReadDirectoryParams| async move {
|
||||
handler.fs_read_directory(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_REMOVE_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsRemoveParams| async move {
|
||||
handler.fs_remove(params).await
|
||||
},
|
||||
);
|
||||
router.request(
|
||||
FS_COPY_METHOD,
|
||||
|handler: Arc<ExecServerHandler>, params: FsCopyParams| async move {
|
||||
handler.fs_copy(params).await
|
||||
},
|
||||
);
|
||||
router
|
||||
}
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
mod common;
|
||||
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_exec_server::ExecResponse;
|
||||
use codex_exec_server::InitializeParams;
|
||||
use common::exec_server::exec_server;
|
||||
use pretty_assertions::assert_eq;
|
||||
@@ -24,11 +24,15 @@ async fn exec_server_stubs_process_start_over_websocket() -> anyhow::Result<()>
|
||||
.wait_for_event(|event| {
|
||||
matches!(
|
||||
event,
|
||||
JSONRPCMessage::Response(JSONRPCResponse { id, .. }) if id == &initialize_id
|
||||
JSONRPCMessage::Response(JSONRPCResponse { id, .. }) if *id == initialize_id
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
server
|
||||
.send_notification("initialized", serde_json::json!({}))
|
||||
.await?;
|
||||
|
||||
let process_start_id = server
|
||||
.send_request(
|
||||
"process/start",
|
||||
@@ -46,18 +50,20 @@ async fn exec_server_stubs_process_start_over_websocket() -> anyhow::Result<()>
|
||||
.wait_for_event(|event| {
|
||||
matches!(
|
||||
event,
|
||||
JSONRPCMessage::Error(JSONRPCError { id, .. }) if id == &process_start_id
|
||||
JSONRPCMessage::Response(JSONRPCResponse { id, .. }) if *id == process_start_id
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
let JSONRPCMessage::Error(JSONRPCError { id, error }) = response else {
|
||||
panic!("expected process/start stub error");
|
||||
let JSONRPCMessage::Response(JSONRPCResponse { id, result }) = response else {
|
||||
panic!("expected process/start response");
|
||||
};
|
||||
assert_eq!(id, process_start_id);
|
||||
assert_eq!(error.code, -32601);
|
||||
let process_start_response: ExecResponse = serde_json::from_value(result)?;
|
||||
assert_eq!(
|
||||
error.message,
|
||||
"exec-server stub does not implement `process/start` yet"
|
||||
process_start_response,
|
||||
ExecResponse {
|
||||
process_id: "proc-1".to_string()
|
||||
}
|
||||
);
|
||||
|
||||
server.shutdown().await?;
|
||||
|
||||
@@ -44,7 +44,7 @@ async fn exec_server_reports_malformed_websocket_json_and_keeps_running() -> any
|
||||
.wait_for_event(|event| {
|
||||
matches!(
|
||||
event,
|
||||
JSONRPCMessage::Response(JSONRPCResponse { id, .. }) if id == &initialize_id
|
||||
JSONRPCMessage::Response(JSONRPCResponse { id, .. }) if *id == initialize_id
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
Reference in New Issue
Block a user