Compare commits

..

1 Commits

Author SHA1 Message Date
Dave Aitel
b9034c8c24 exec: handle typed turn completion for fanout jobs 2026-03-16 20:48:55 -04:00
20 changed files with 458 additions and 4907 deletions

20
codex-rs/Cargo.lock generated
View File

@@ -1989,26 +1989,6 @@ dependencies = [
"wiremock",
]
[[package]]
name = "codex-exec-server"
version = "0.0.0"
dependencies = [
"anyhow",
"base64 0.22.1",
"clap",
"codex-app-server-protocol",
"codex-utils-cargo-bin",
"codex-utils-pty",
"futures",
"pretty_assertions",
"serde",
"serde_json",
"thiserror 2.0.18",
"tokio",
"tokio-tungstenite",
"tracing",
]
[[package]]
name = "codex-execpolicy"
version = "0.0.0"

View File

@@ -25,7 +25,6 @@ members = [
"hooks",
"secrets",
"exec",
"exec-server",
"execpolicy",
"execpolicy-legacy",
"keyring-store",

View File

@@ -1,39 +0,0 @@
[package]
name = "codex-exec-server"
version.workspace = true
edition.workspace = true
license.workspace = true
[[bin]]
name = "codex-exec-server"
path = "src/bin/codex-exec-server.rs"
[lints]
workspace = true
[dependencies]
base64 = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-app-server-protocol = { workspace = true }
codex-utils-pty = { workspace = true }
futures = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
"io-util",
"macros",
"net",
"process",
"rt-multi-thread",
"sync",
"time",
] }
tokio-tungstenite = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
anyhow = { workspace = true }
codex-utils-cargo-bin = { workspace = true }
pretty_assertions = { workspace = true }

View File

@@ -1,242 +0,0 @@
# exec-server design notes
This document sketches a likely direction for integrating `codex-exec-server`
with unified exec without baking the full tool-call policy stack into the
server.
The goal is:
- keep exec-server generic and reusable
- keep approval, sandbox, and retry policy in `core`
- preserve the unified-exec event flow the model already depends on
- support retained output caps so polling and snapshot-style APIs do not grow
memory without bound
## Unified exec today
Today the flow for LLM-visible interactive execution is:
1. The model sees the `exec_command` and `write_stdin` tools.
2. `UnifiedExecHandler` parses the tool arguments and allocates a process id.
3. `UnifiedExecProcessManager::exec_command(...)` calls
`open_session_with_sandbox(...)`.
4. `ToolOrchestrator` drives approval, sandbox selection, managed network
approval, and sandbox-denial retry behavior.
5. `UnifiedExecRuntime` builds a `CommandSpec`, asks the current
`SandboxAttempt` to transform it into an `ExecRequest`, and passes that
resolved request back to the process manager.
6. `open_session_with_exec_env(...)` spawns the process from that resolved
`ExecRequest`.
7. Unified exec emits an `ExecCommandBegin` event.
8. Unified exec starts a background output watcher that emits
`ExecCommandOutputDelta` events.
9. The initial tool call collects output until the requested yield deadline and
returns an `ExecCommandToolOutput` snapshot to the model.
10. If the process is still running, unified exec stores it and later emits
`ExecCommandEnd` when the exit watcher fires.
11. A later `write_stdin` tool call writes to the stored process, emits a
`TerminalInteraction` event, collects another bounded snapshot, and returns
that tool response to the model.
Important observation: the 250ms / 10s yield-window behavior is not really a
process-server concern. It is a client-side convenience layer for the LLM tool
API. The server should focus on raw process lifecycle and streaming events.
## Proposed boundary
The clean split is:
- exec-server server: process lifecycle, output streaming, retained output caps
- exec-server client: `wait`, `communicate`, yield-window helpers, session
bookkeeping
- unified exec in `core`: tool parsing, event emission, approvals, sandboxing,
managed networking, retry semantics
If exec-server is used by unified exec later, the boundary should sit between
step 5 and step 6 above: after policy has produced a resolved spawn request, but
before the actual PTY or pipe spawn.
## Suggested process API
Start simple and explicit:
- `process/start`
- `process/write`
- `process/closeStdin`
- `process/resize`
- `process/terminate`
- `process/wait`
- `process/snapshot`
Server notifications:
- `process/outputDelta`
- `process/exited`
- optionally `process/started`
- optionally `process/failed`
Suggested request shapes:
```rust
enum ProcessStartRequest {
Direct(DirectExecSpec),
Prepared(PreparedExecSpec),
}
struct DirectExecSpec {
session_id: String,
argv: Vec<String>,
cwd: PathBuf,
env: HashMap<String, String>,
arg0: Option<String>,
io: ProcessIo,
}
struct PreparedExecSpec {
session_id: String,
request: PreparedExecRequest,
io: ProcessIo,
}
enum ProcessIo {
Pty { rows: u16, cols: u16 },
Pipe { stdin: StdinMode },
}
enum StdinMode {
Open,
Closed,
}
enum TerminateMode {
Graceful { timeout_ms: u64 },
Force,
}
```
Notes:
- `sessionId` remains a protocol handle, not an OS pid.
- `wait` is a good generic API because many callers want process completion
without manually wiring notifications.
- `communicate` is also a reasonable API, but it should probably start as a
client helper built on top of `write + closeStdin + wait + snapshot`.
- If an RPC form of `communicate` is added later, it should be a convenience
wrapper rather than the primitive execution model.
## Output capping
Even with event streaming, the server should retain a bounded amount of output
per process so callers can poll, wait, or reconnect without unbounded memory
growth.
Suggested behavior:
- stream every output chunk live via `process/outputDelta`
- retain capped output per process in memory
- keep stdout and stderr separately for pipe-backed processes
- for PTY-backed processes, treat retained output as a single terminal stream
- expose truncation metadata on snapshots
Suggested snapshot response:
```rust
struct ProcessSnapshot {
stdout: Vec<u8>,
stderr: Vec<u8>,
terminal: Vec<u8>,
truncated: bool,
exit_code: Option<i32>,
running: bool,
}
```
Implementation-wise, the current `HeadTailBuffer` pattern used by unified exec
is a good fit. The cap should be server config, not request config, so memory
use stays predictable.
## Sandboxing and networking
### How unified exec does it today
Unified exec does not hand raw command args directly to the PTY layer for tool
calls. Instead, it:
1. computes approval requirements
2. chooses a sandbox attempt
3. applies managed-network policy if needed
4. transforms `CommandSpec` into `ExecRequest`
5. spawns from that resolved `ExecRequest`
That split is already valuable and should be preserved.
### Recommended exec-server design
Do not put approval policy into exec-server.
Instead, support two execution modes:
- `Direct`: raw command, intended for orchestrator-side or already-trusted use
- `Prepared`: already-resolved spawn request, intended for tool-call execution
For tool calls from the LLM side:
1. `core` runs the existing approval + sandbox + managed-network flow
2. `core` produces a resolved `ExecRequest`
3. the exec-server client sends `PreparedExecSpec`
4. exec-server spawns exactly that request and streams process events
For orchestrator-side execution:
1. caller sends `DirectExecSpec`
2. exec-server spawns directly without running approval or sandbox policy
This gives one generic process API while keeping the policy-sensitive logic in
the place that already owns it.
### Why not make exec-server own sandbox selection?
That would force exec-server to understand:
- approval policy
- exec policy / prefix rules
- managed-network approval flow
- sandbox retry semantics
- guardian routing
- feature-flag-driven sandbox selection
- platform-specific sandbox helper configuration
That is too opinionated for a reusable process service.
## Optional future server config
If exec-server grows beyond the current prototype, a config object like this
would be enough:
```rust
struct ExecServerConfig {
shutdown_grace_period_ms: u64,
max_processes_per_connection: usize,
retained_output_bytes_per_process: usize,
allow_direct_exec: bool,
allow_prepared_exec: bool,
}
```
That keeps policy surface small:
- lifecycle limits live in the server
- trust and sandbox policy stay with the caller
## Mapping back to LLM-visible events
If unified exec is later backed by exec-server, the `core` client wrapper should
keep owning the translation into the existing event model:
- `process/start` success -> `ExecCommandBegin`
- `process/outputDelta` -> `ExecCommandOutputDelta`
- local `process/write` call -> `TerminalInteraction`
- `process/exited` plus retained transcript -> `ExecCommandEnd`
That preserves the current LLM-facing contract while making the process backend
swappable.

View File

@@ -1,347 +0,0 @@
# codex-exec-server
`codex-exec-server` is a small standalone JSON-RPC server for spawning and
controlling subprocesses through `codex-utils-pty`.
It currently provides:
- a standalone binary: `codex-exec-server`
- a transport-agnostic server runtime with stdio and websocket entrypoints
- a Rust client: `ExecServerClient`
- a direct in-process client mode: `ExecServerClient::connect_in_process`
- a separate local launch helper: `spawn_local_exec_server`
- a small protocol module with shared request/response types
This crate is intentionally narrow. It is not wired into the main Codex CLI or
unified-exec in this PR; it is only the standalone transport layer.
The internal shape is intentionally closer to `app-server` than the first cut:
- transport adapters are separate from the per-connection request processor
- JSON-RPC route matching is separate from the stateful exec handler
- the client only speaks the protocol; it does not spawn a server subprocess
- the client can also bypass the JSON-RPC transport/routing layer in local
in-process mode and call the typed handler directly
- local child-process launch is handled by a separate helper/factory layer
That split is meant to leave reusable seams if exec-server and app-server later
share transport or JSON-RPC connection utilities. It also keeps the core
handler testable without the RPC server implementation itself.
Design notes for a likely future integration with unified exec, including
rough call flow, buffering, and sandboxing boundaries, live in
[DESIGN.md](./DESIGN.md).
## Transport
The server speaks the same JSON-RPC message shapes over multiple transports.
The standalone binary supports:
- `stdio://` (default)
- `ws://IP:PORT`
Wire framing:
- stdio: one newline-delimited JSON-RPC message per line on stdin/stdout
- websocket: one JSON-RPC message per websocket text frame
Like the app-server transport, messages on the wire omit the `"jsonrpc":"2.0"`
field and use the shared `codex-app-server-protocol` envelope types.
The current protocol version is:
```text
exec-server.v0
```
## Lifecycle
Each connection follows this sequence:
1. Send `initialize`.
2. Wait for the `initialize` response.
3. Send `initialized`.
4. Start and manage processes with `command/exec`, `command/exec/write`, and
`command/exec/terminate`.
5. Read streaming notifications from `command/exec/outputDelta` and
`command/exec/exited`.
If the client sends exec methods before completing the `initialize` /
`initialized` handshake, the server rejects them.
If a connection closes, the server terminates any remaining managed processes
for that connection.
## API
### `initialize`
Initial handshake request.
Request params:
```json
{
"clientName": "my-client"
}
```
Response:
```json
{
"protocolVersion": "exec-server.v0"
}
```
### `initialized`
Handshake acknowledgement notification sent by the client after a successful
`initialize` response. Exec methods are rejected until this arrives.
Params are currently ignored. Sending any other client notification method is a
protocol error.
### `command/exec`
Starts a new managed process.
Request params:
```json
{
"argv": ["bash", "-lc", "printf 'hello\\n'"],
"cwd": "/absolute/working/directory",
"env": {
"PATH": "/usr/bin:/bin"
},
"tty": true,
"arg0": null
}
```
Field definitions:
- `argv`: command vector. It must be non-empty.
- `cwd`: absolute working directory used for the child process.
- `env`: environment variables passed to the child process.
- `tty`: when `true`, spawn a PTY-backed interactive process; when `false`,
spawn a pipe-backed process with closed stdin.
- `arg0`: optional argv0 override forwarded to `codex-utils-pty`.
Response:
```json
{
"sessionId": "proc-1"
}
```
Behavior notes:
- `sessionId` is assigned by the server.
- PTY-backed processes accept later writes through `command/exec/write`.
- Pipe-backed processes are launched with stdin closed and reject writes.
- Output is streamed asynchronously via `command/exec/outputDelta`.
- Exit is reported asynchronously via `command/exec/exited`.
### `command/exec/write`
Writes raw bytes to a running PTY-backed process stdin.
Request params:
```json
{
"sessionId": "proc-1",
"chunk": "aGVsbG8K"
}
```
`chunk` is base64-encoded raw bytes. In the example above it is `hello\n`.
Response:
```json
{
"accepted": true
}
```
Behavior notes:
- Writes to an unknown `sessionId` are rejected.
- Writes to a non-PTY process are rejected because stdin is already closed.
### `command/exec/terminate`
Terminates a running managed process.
Request params:
```json
{
"sessionId": "proc-1"
}
```
Response:
```json
{
"running": true
}
```
If the process is already unknown or already removed, the server responds with:
```json
{
"running": false
}
```
## Notifications
### `command/exec/outputDelta`
Streaming output chunk from a running process.
Params:
```json
{
"sessionId": "proc-1",
"stream": "stdout",
"chunk": "aGVsbG8K"
}
```
Fields:
- `sessionId`: process identifier
- `stream`: `"stdout"` or `"stderr"`
- `chunk`: base64-encoded output bytes
### `command/exec/exited`
Final process exit notification.
Params:
```json
{
"sessionId": "proc-1",
"exitCode": 0
}
```
## Errors
The server returns JSON-RPC errors with these codes:
- `-32600`: invalid request
- `-32602`: invalid params
- `-32603`: internal error
Typical error cases:
- unknown method
- malformed params
- empty `argv`
- duplicate `sessionId`
- writes to unknown processes
- writes to non-PTY processes
## Rust surface
The crate exports:
- `ExecServerClient`
- `ExecServerClientConnectOptions`
- `RemoteExecServerConnectArgs`
- `ExecServerLaunchCommand`
- `ExecServerEvent`
- `ExecServerOutput`
- `SpawnedExecServer`
- `ExecServerError`
- `ExecServerTransport`
- `spawn_local_exec_server(...)`
- protocol structs such as `ExecParams`, `ExecResponse`,
`WriteParams`, `TerminateParams`, `ExecOutputDeltaNotification`, and
`ExecExitedNotification`
- `run_main()` and `run_main_with_transport(...)`
### Binary
Run over stdio:
```text
codex-exec-server
```
Run as a websocket server:
```text
codex-exec-server --listen ws://127.0.0.1:8080
```
### Client
Connect the client to an existing server transport:
- `ExecServerClient::connect_stdio(...)`
- `ExecServerClient::connect_websocket(...)`
- `ExecServerClient::connect_in_process(...)` for a local no-transport mode
backed directly by the typed handler
Timeout behavior:
- stdio and websocket clients both enforce an initialize-handshake timeout
- websocket clients also enforce a connect timeout before the handshake begins
Events:
- `ExecServerClient::event_receiver()` yields `ExecServerEvent`
- output events include both `stream` (`stdout` or `stderr`) and raw bytes
- process lifetime is tracked by server notifications such as
`command/exec/exited`, not by a client-side process registry
Spawning a local child process is deliberately separate:
- `spawn_local_exec_server(...)`
## Example session
Initialize:
```json
{"id":1,"method":"initialize","params":{"clientName":"example-client"}}
{"id":1,"result":{"protocolVersion":"exec-server.v0"}}
{"method":"initialized","params":{}}
```
Start a process:
```json
{"id":2,"method":"command/exec","params":{"argv":["bash","-lc","printf 'ready\\n'; while IFS= read -r line; do printf 'echo:%s\\n' \"$line\"; done"],"cwd":"/tmp","env":{"PATH":"/usr/bin:/bin"},"tty":true,"arg0":null}}
{"id":2,"result":{"sessionId":"proc-1"}}
{"method":"command/exec/outputDelta","params":{"sessionId":"proc-1","stream":"stdout","chunk":"cmVhZHkK"}}
```
Write to the process:
```json
{"id":3,"method":"command/exec/write","params":{"sessionId":"proc-1","chunk":"aGVsbG8K"}}
{"id":3,"result":{"accepted":true}}
{"method":"command/exec/outputDelta","params":{"sessionId":"proc-1","stream":"stdout","chunk":"ZWNobzpoZWxsbwo="}}
```
Terminate it:
```json
{"id":4,"method":"command/exec/terminate","params":{"sessionId":"proc-1"}}
{"id":4,"result":{"running":true}}
{"method":"command/exec/exited","params":{"sessionId":"proc-1","exitCode":0}}
```

View File

@@ -1,23 +0,0 @@
use clap::Parser;
use codex_exec_server::ExecServerTransport;
#[derive(Debug, Parser)]
struct ExecServerArgs {
/// Transport endpoint URL. Supported values: `stdio://` (default),
/// `ws://IP:PORT`.
#[arg(
long = "listen",
value_name = "URL",
default_value = ExecServerTransport::DEFAULT_LISTEN_URL
)]
listen: ExecServerTransport,
}
#[tokio::main]
async fn main() {
let args = ExecServerArgs::parse();
if let Err(err) = codex_exec_server::run_main_with_transport(args.listen).await {
eprintln!("{err}");
std::process::exit(1);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,417 +0,0 @@
use codex_app_server_protocol::JSONRPCMessage;
use futures::SinkExt;
use futures::StreamExt;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::io::BufWriter;
use tokio::sync::mpsc;
use tokio_tungstenite::WebSocketStream;
use tokio_tungstenite::tungstenite::Message;
pub(crate) const CHANNEL_CAPACITY: usize = 128;
#[derive(Debug)]
pub(crate) enum JsonRpcConnectionEvent {
Message(JSONRPCMessage),
Disconnected { reason: Option<String> },
}
pub(crate) struct JsonRpcConnection {
outgoing_tx: mpsc::Sender<JSONRPCMessage>,
incoming_rx: mpsc::Receiver<JsonRpcConnectionEvent>,
}
impl JsonRpcConnection {
pub(crate) fn from_stdio<R, W>(reader: R, writer: W, connection_label: String) -> Self
where
R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static,
{
let (outgoing_tx, mut outgoing_rx) = mpsc::channel(CHANNEL_CAPACITY);
let (incoming_tx, incoming_rx) = mpsc::channel(CHANNEL_CAPACITY);
let reader_label = connection_label.clone();
let incoming_tx_for_reader = incoming_tx.clone();
tokio::spawn(async move {
let mut lines = BufReader::new(reader).lines();
loop {
match lines.next_line().await {
Ok(Some(line)) => {
if line.trim().is_empty() {
continue;
}
match serde_json::from_str::<JSONRPCMessage>(&line) {
Ok(message) => {
if incoming_tx_for_reader
.send(JsonRpcConnectionEvent::Message(message))
.await
.is_err()
{
break;
}
}
Err(err) => {
send_disconnected(
&incoming_tx_for_reader,
Some(format!(
"failed to parse JSON-RPC message from {reader_label}: {err}"
)),
)
.await;
break;
}
}
}
Ok(None) => {
send_disconnected(&incoming_tx_for_reader, None).await;
break;
}
Err(err) => {
send_disconnected(
&incoming_tx_for_reader,
Some(format!(
"failed to read JSON-RPC message from {reader_label}: {err}"
)),
)
.await;
break;
}
}
}
});
tokio::spawn(async move {
let mut writer = BufWriter::new(writer);
while let Some(message) = outgoing_rx.recv().await {
if let Err(err) = write_jsonrpc_line_message(&mut writer, &message).await {
send_disconnected(
&incoming_tx,
Some(format!(
"failed to write JSON-RPC message to {connection_label}: {err}"
)),
)
.await;
break;
}
}
});
Self {
outgoing_tx,
incoming_rx,
}
}
pub(crate) fn from_websocket<S>(stream: WebSocketStream<S>, connection_label: String) -> Self
where
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
let (outgoing_tx, mut outgoing_rx) = mpsc::channel(CHANNEL_CAPACITY);
let (incoming_tx, incoming_rx) = mpsc::channel(CHANNEL_CAPACITY);
let (mut websocket_writer, mut websocket_reader) = stream.split();
let reader_label = connection_label.clone();
let incoming_tx_for_reader = incoming_tx.clone();
tokio::spawn(async move {
loop {
match websocket_reader.next().await {
Some(Ok(Message::Text(text))) => {
match serde_json::from_str::<JSONRPCMessage>(text.as_ref()) {
Ok(message) => {
if incoming_tx_for_reader
.send(JsonRpcConnectionEvent::Message(message))
.await
.is_err()
{
break;
}
}
Err(err) => {
send_disconnected(
&incoming_tx_for_reader,
Some(format!(
"failed to parse websocket JSON-RPC message from {reader_label}: {err}"
)),
)
.await;
break;
}
}
}
Some(Ok(Message::Binary(bytes))) => {
match serde_json::from_slice::<JSONRPCMessage>(bytes.as_ref()) {
Ok(message) => {
if incoming_tx_for_reader
.send(JsonRpcConnectionEvent::Message(message))
.await
.is_err()
{
break;
}
}
Err(err) => {
send_disconnected(
&incoming_tx_for_reader,
Some(format!(
"failed to parse websocket JSON-RPC message from {reader_label}: {err}"
)),
)
.await;
break;
}
}
}
Some(Ok(Message::Close(_))) => {
send_disconnected(&incoming_tx_for_reader, None).await;
break;
}
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {}
Some(Ok(_)) => {}
Some(Err(err)) => {
send_disconnected(
&incoming_tx_for_reader,
Some(format!(
"failed to read websocket JSON-RPC message from {reader_label}: {err}"
)),
)
.await;
break;
}
None => {
send_disconnected(&incoming_tx_for_reader, None).await;
break;
}
}
}
});
tokio::spawn(async move {
while let Some(message) = outgoing_rx.recv().await {
match serialize_jsonrpc_message(&message) {
Ok(encoded) => {
if let Err(err) = websocket_writer.send(Message::Text(encoded.into())).await
{
send_disconnected(
&incoming_tx,
Some(format!(
"failed to write websocket JSON-RPC message to {connection_label}: {err}"
)),
)
.await;
break;
}
}
Err(err) => {
send_disconnected(
&incoming_tx,
Some(format!(
"failed to serialize JSON-RPC message for {connection_label}: {err}"
)),
)
.await;
break;
}
}
}
});
Self {
outgoing_tx,
incoming_rx,
}
}
pub(crate) fn into_parts(
self,
) -> (
mpsc::Sender<JSONRPCMessage>,
mpsc::Receiver<JsonRpcConnectionEvent>,
) {
(self.outgoing_tx, self.incoming_rx)
}
}
async fn send_disconnected(
incoming_tx: &mpsc::Sender<JsonRpcConnectionEvent>,
reason: Option<String>,
) {
let _ = incoming_tx
.send(JsonRpcConnectionEvent::Disconnected { reason })
.await;
}
async fn write_jsonrpc_line_message<W>(
writer: &mut BufWriter<W>,
message: &JSONRPCMessage,
) -> std::io::Result<()>
where
W: AsyncWrite + Unpin,
{
let encoded =
serialize_jsonrpc_message(message).map_err(|err| std::io::Error::other(err.to_string()))?;
writer.write_all(encoded.as_bytes()).await?;
writer.write_all(b"\n").await?;
writer.flush().await
}
fn serialize_jsonrpc_message(message: &JSONRPCMessage) -> Result<String, serde_json::Error> {
serde_json::to_string(message)
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use pretty_assertions::assert_eq;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::sync::mpsc;
use tokio::time::timeout;
use super::JsonRpcConnection;
use super::JsonRpcConnectionEvent;
use super::serialize_jsonrpc_message;
async fn recv_event(
incoming_rx: &mut mpsc::Receiver<JsonRpcConnectionEvent>,
) -> JsonRpcConnectionEvent {
let recv_result = timeout(Duration::from_secs(1), incoming_rx.recv()).await;
let maybe_event = match recv_result {
Ok(maybe_event) => maybe_event,
Err(err) => panic!("timed out waiting for connection event: {err}"),
};
match maybe_event {
Some(event) => event,
None => panic!("connection event stream ended unexpectedly"),
}
}
async fn read_jsonrpc_line<R>(lines: &mut tokio::io::Lines<BufReader<R>>) -> JSONRPCMessage
where
R: tokio::io::AsyncRead + Unpin,
{
let next_line = timeout(Duration::from_secs(1), lines.next_line()).await;
let line_result = match next_line {
Ok(line_result) => line_result,
Err(err) => panic!("timed out waiting for JSON-RPC line: {err}"),
};
let maybe_line = match line_result {
Ok(maybe_line) => maybe_line,
Err(err) => panic!("failed to read JSON-RPC line: {err}"),
};
let line = match maybe_line {
Some(line) => line,
None => panic!("connection closed before JSON-RPC line arrived"),
};
match serde_json::from_str::<JSONRPCMessage>(&line) {
Ok(message) => message,
Err(err) => panic!("failed to parse JSON-RPC line: {err}"),
}
}
#[tokio::test]
async fn stdio_connection_reads_and_writes_jsonrpc_messages() {
let (mut writer_to_connection, connection_reader) = tokio::io::duplex(1024);
let (connection_writer, reader_from_connection) = tokio::io::duplex(1024);
let connection =
JsonRpcConnection::from_stdio(connection_reader, connection_writer, "test".to_string());
let (outgoing_tx, mut incoming_rx) = connection.into_parts();
let incoming_message = JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(7),
method: "initialize".to_string(),
params: Some(serde_json::json!({ "clientName": "test-client" })),
trace: None,
});
let encoded = match serialize_jsonrpc_message(&incoming_message) {
Ok(encoded) => encoded,
Err(err) => panic!("failed to serialize incoming message: {err}"),
};
if let Err(err) = writer_to_connection
.write_all(format!("{encoded}\n").as_bytes())
.await
{
panic!("failed to write to connection: {err}");
}
let event = recv_event(&mut incoming_rx).await;
match event {
JsonRpcConnectionEvent::Message(message) => {
assert_eq!(message, incoming_message);
}
JsonRpcConnectionEvent::Disconnected { reason } => {
panic!("unexpected disconnect event: {reason:?}");
}
}
let outgoing_message = JSONRPCMessage::Response(JSONRPCResponse {
id: RequestId::Integer(7),
result: serde_json::json!({ "protocolVersion": "exec-server.v0" }),
});
if let Err(err) = outgoing_tx.send(outgoing_message.clone()).await {
panic!("failed to queue outgoing message: {err}");
}
let mut lines = BufReader::new(reader_from_connection).lines();
let message = read_jsonrpc_line(&mut lines).await;
assert_eq!(message, outgoing_message);
}
#[tokio::test]
async fn stdio_connection_reports_parse_errors() {
let (mut writer_to_connection, connection_reader) = tokio::io::duplex(1024);
let (connection_writer, _reader_from_connection) = tokio::io::duplex(1024);
let connection =
JsonRpcConnection::from_stdio(connection_reader, connection_writer, "test".to_string());
let (_outgoing_tx, mut incoming_rx) = connection.into_parts();
if let Err(err) = writer_to_connection.write_all(b"not-json\n").await {
panic!("failed to write invalid JSON: {err}");
}
let event = recv_event(&mut incoming_rx).await;
match event {
JsonRpcConnectionEvent::Disconnected { reason } => {
let reason = match reason {
Some(reason) => reason,
None => panic!("expected a parse error reason"),
};
assert!(
reason.contains("failed to parse JSON-RPC message from test"),
"unexpected disconnect reason: {reason}"
);
}
JsonRpcConnectionEvent::Message(message) => {
panic!("unexpected JSON-RPC message: {message:?}");
}
}
}
#[tokio::test]
async fn stdio_connection_reports_clean_disconnect() {
let (writer_to_connection, connection_reader) = tokio::io::duplex(1024);
let (connection_writer, _reader_from_connection) = tokio::io::duplex(1024);
let connection =
JsonRpcConnection::from_stdio(connection_reader, connection_writer, "test".to_string());
let (_outgoing_tx, mut incoming_rx) = connection.into_parts();
drop(writer_to_connection);
let event = recv_event(&mut incoming_rx).await;
match event {
JsonRpcConnectionEvent::Disconnected { reason } => {
assert_eq!(reason, None);
}
JsonRpcConnectionEvent::Message(message) => {
panic!("unexpected JSON-RPC message: {message:?}");
}
}
}
}

View File

@@ -1,30 +0,0 @@
mod client;
mod connection;
mod local;
mod protocol;
mod server;
pub use client::ExecServerClient;
pub use client::ExecServerClientConnectOptions;
pub use client::ExecServerError;
pub use client::ExecServerEvent;
pub use client::ExecServerOutput;
pub use client::RemoteExecServerConnectArgs;
pub use local::ExecServerLaunchCommand;
pub use local::SpawnedExecServer;
pub use local::spawn_local_exec_server;
pub use protocol::ExecExitedNotification;
pub use protocol::ExecOutputDeltaNotification;
pub use protocol::ExecOutputStream;
pub use protocol::ExecParams;
pub use protocol::ExecResponse;
pub use protocol::InitializeParams;
pub use protocol::InitializeResponse;
pub use protocol::TerminateParams;
pub use protocol::TerminateResponse;
pub use protocol::WriteParams;
pub use protocol::WriteResponse;
pub use server::ExecServerTransport;
pub use server::ExecServerTransportParseError;
pub use server::run_main;
pub use server::run_main_with_transport;

View File

@@ -1,70 +0,0 @@
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Mutex as StdMutex;
use tokio::process::Child;
use tokio::process::Command;
use crate::client::ExecServerClient;
use crate::client::ExecServerClientConnectOptions;
use crate::client::ExecServerError;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ExecServerLaunchCommand {
pub program: PathBuf,
pub args: Vec<String>,
}
pub struct SpawnedExecServer {
client: ExecServerClient,
child: StdMutex<Option<Child>>,
}
impl SpawnedExecServer {
pub fn client(&self) -> &ExecServerClient {
&self.client
}
}
impl Drop for SpawnedExecServer {
fn drop(&mut self) {
if let Ok(mut child_guard) = self.child.lock()
&& let Some(child) = child_guard.as_mut()
{
let _ = child.start_kill();
}
}
}
pub async fn spawn_local_exec_server(
command: ExecServerLaunchCommand,
options: ExecServerClientConnectOptions,
) -> Result<SpawnedExecServer, ExecServerError> {
let mut child = Command::new(&command.program);
child.args(&command.args);
child.stdin(Stdio::piped());
child.stdout(Stdio::piped());
child.stderr(Stdio::inherit());
child.kill_on_drop(true);
let mut child = child.spawn().map_err(ExecServerError::Spawn)?;
let stdin = child.stdin.take().ok_or_else(|| {
ExecServerError::Protocol("exec-server stdin was not captured".to_string())
})?;
let stdout = child.stdout.take().ok_or_else(|| {
ExecServerError::Protocol("exec-server stdout was not captured".to_string())
})?;
let client = match ExecServerClient::connect_stdio(stdin, stdout, options).await {
Ok(client) => client,
Err(err) => {
let _ = child.start_kill();
return Err(err);
}
};
Ok(SpawnedExecServer {
client,
child: StdMutex::new(Some(child)),
})
}

View File

@@ -1,133 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde::Deserialize;
use serde::Serialize;
pub const INITIALIZE_METHOD: &str = "initialize";
pub const INITIALIZED_METHOD: &str = "initialized";
pub const EXEC_METHOD: &str = "command/exec";
pub const EXEC_WRITE_METHOD: &str = "command/exec/write";
pub const EXEC_TERMINATE_METHOD: &str = "command/exec/terminate";
pub const EXEC_OUTPUT_DELTA_METHOD: &str = "command/exec/outputDelta";
pub const EXEC_EXITED_METHOD: &str = "command/exec/exited";
pub const PROTOCOL_VERSION: &str = "exec-server.v0";
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ByteChunk(#[serde(with = "base64_bytes")] pub Vec<u8>);
impl ByteChunk {
pub fn into_inner(self) -> Vec<u8> {
self.0
}
}
impl From<Vec<u8>> for ByteChunk {
fn from(value: Vec<u8>) -> Self {
Self(value)
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InitializeParams {
pub client_name: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InitializeResponse {
pub protocol_version: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExecParams {
pub argv: Vec<String>,
pub cwd: PathBuf,
pub env: HashMap<String, String>,
pub tty: bool,
pub arg0: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExecResponse {
/// Server-assigned opaque session handle. This is a protocol key, not an
/// OS pid.
pub session_id: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct WriteParams {
pub session_id: String,
pub chunk: ByteChunk,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct WriteResponse {
pub accepted: bool,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TerminateParams {
pub session_id: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TerminateResponse {
pub running: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum ExecOutputStream {
Stdout,
Stderr,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExecOutputDeltaNotification {
pub session_id: String,
pub stream: ExecOutputStream,
pub chunk: ByteChunk,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExecExitedNotification {
pub session_id: String,
pub exit_code: i32,
}
mod base64_bytes {
use super::BASE64_STANDARD;
use base64::Engine as _;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serializer;
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&BASE64_STANDARD.encode(bytes))
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
let encoded = String::deserialize(deserializer)?;
BASE64_STANDARD
.decode(encoded)
.map_err(serde::de::Error::custom)
}
}

View File

@@ -1,24 +0,0 @@
mod handler;
mod processor;
mod routing;
mod transport;
pub(crate) use handler::ExecServerHandler;
pub(crate) use routing::ExecServerClientNotification;
pub(crate) use routing::ExecServerInboundMessage;
pub(crate) use routing::ExecServerOutboundMessage;
pub(crate) use routing::ExecServerRequest;
pub(crate) use routing::ExecServerResponseMessage;
pub(crate) use routing::ExecServerServerNotification;
pub use transport::ExecServerTransport;
pub use transport::ExecServerTransportParseError;
pub async fn run_main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
run_main_with_transport(ExecServerTransport::Stdio).await
}
pub async fn run_main_with_transport(
transport: ExecServerTransport,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
transport::run_transport(transport).await
}

View File

@@ -1,922 +0,0 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use codex_utils_pty::ExecCommandSession;
use codex_utils_pty::TerminalSize;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
use crate::protocol::ExecExitedNotification;
use crate::protocol::ExecOutputDeltaNotification;
use crate::protocol::ExecOutputStream;
use crate::protocol::ExecResponse;
use crate::protocol::InitializeResponse;
use crate::protocol::PROTOCOL_VERSION;
use crate::protocol::TerminateResponse;
use crate::protocol::WriteResponse;
use crate::server::routing::ExecServerClientNotification;
use crate::server::routing::ExecServerInboundMessage;
use crate::server::routing::ExecServerOutboundMessage;
use crate::server::routing::ExecServerRequest;
use crate::server::routing::ExecServerResponseMessage;
use crate::server::routing::ExecServerServerNotification;
use crate::server::routing::internal_error;
use crate::server::routing::invalid_params;
use crate::server::routing::invalid_request;
struct RunningProcess {
session: ExecCommandSession,
tty: bool,
}
pub(crate) struct ExecServerHandler {
outbound_tx: mpsc::Sender<ExecServerOutboundMessage>,
// Keyed by server-assigned opaque `sessionId`; this is a protocol handle,
// not an OS pid.
processes: Arc<Mutex<HashMap<String, RunningProcess>>>,
next_session_id: AtomicU64,
initialize_requested: bool,
initialized: bool,
}
impl ExecServerHandler {
pub(crate) fn new(outbound_tx: mpsc::Sender<ExecServerOutboundMessage>) -> Self {
Self {
outbound_tx,
processes: Arc::new(Mutex::new(HashMap::new())),
next_session_id: AtomicU64::new(1),
initialize_requested: false,
initialized: false,
}
}
pub(crate) async fn shutdown(&self) {
let remaining = {
let mut processes = self.processes.lock().await;
processes
.drain()
.map(|(_, process)| process)
.collect::<Vec<_>>()
};
for process in remaining {
process.session.terminate();
}
}
pub(crate) async fn handle_message(
&mut self,
message: ExecServerInboundMessage,
) -> Result<(), String> {
match message {
ExecServerInboundMessage::Request(request) => self.handle_request(request).await,
ExecServerInboundMessage::Notification(notification) => {
self.handle_notification(notification)
}
}
}
async fn handle_request(&mut self, request: ExecServerRequest) -> Result<(), String> {
match request {
ExecServerRequest::Initialize { request_id, .. } => {
let result = self
.handle_initialize_request()
.map(ExecServerResponseMessage::Initialize);
self.send_request_result(request_id, result).await;
}
ExecServerRequest::Exec { request_id, params } => {
self.send_request_result(
request_id,
match self.require_initialized() {
Ok(()) => self
.handle_exec_request(params)
.await
.map(ExecServerResponseMessage::Exec),
Err(err) => Err(err),
},
)
.await;
}
ExecServerRequest::Write { request_id, params } => {
self.send_request_result(
request_id,
match self.require_initialized() {
Ok(()) => self
.handle_write_request(params)
.await
.map(ExecServerResponseMessage::Write),
Err(err) => Err(err),
},
)
.await;
}
ExecServerRequest::Terminate { request_id, params } => {
self.send_request_result(
request_id,
match self.require_initialized() {
Ok(()) => self
.handle_terminate_request(params)
.await
.map(ExecServerResponseMessage::Terminate),
Err(err) => Err(err),
},
)
.await;
}
}
Ok(())
}
fn handle_notification(
&mut self,
notification: ExecServerClientNotification,
) -> Result<(), String> {
match notification {
ExecServerClientNotification::Initialized => {
if !self.initialize_requested {
return Err("received `initialized` notification before `initialize`".into());
}
self.initialized = true;
Ok(())
}
}
}
fn handle_initialize_request(
&mut self,
) -> Result<InitializeResponse, codex_app_server_protocol::JSONRPCErrorError> {
if self.initialize_requested {
return Err(invalid_request(
"initialize may only be sent once per connection".to_string(),
));
}
self.initialize_requested = true;
Ok(InitializeResponse {
protocol_version: PROTOCOL_VERSION.to_string(),
})
}
fn require_initialized(&self) -> Result<(), codex_app_server_protocol::JSONRPCErrorError> {
if !self.initialize_requested {
return Err(invalid_request(
"client must call initialize before using exec methods".to_string(),
));
}
if !self.initialized {
return Err(invalid_request(
"client must send initialized before using exec methods".to_string(),
));
}
Ok(())
}
async fn handle_exec_request(
&self,
params: crate::protocol::ExecParams,
) -> Result<ExecResponse, codex_app_server_protocol::JSONRPCErrorError> {
let (program, args) = params
.argv
.split_first()
.ok_or_else(|| invalid_params("argv must not be empty".to_string()))?;
let session_id = format!(
"session-{}",
self.next_session_id.fetch_add(1, Ordering::SeqCst)
);
let spawned = if params.tty {
codex_utils_pty::spawn_pty_process(
program,
args,
params.cwd.as_path(),
&params.env,
&params.arg0,
TerminalSize::default(),
)
.await
} else {
codex_utils_pty::spawn_pipe_process_no_stdin(
program,
args,
params.cwd.as_path(),
&params.env,
&params.arg0,
)
.await
}
.map_err(|err| internal_error(err.to_string()))?;
{
let mut process_map = self.processes.lock().await;
if process_map.contains_key(&session_id) {
spawned.session.terminate();
return Err(invalid_request(format!(
"session {session_id} already exists"
)));
}
process_map.insert(
session_id.clone(),
RunningProcess {
session: spawned.session,
tty: params.tty,
},
);
}
tokio::spawn(stream_output(
session_id.clone(),
ExecOutputStream::Stdout,
spawned.stdout_rx,
self.outbound_tx.clone(),
));
tokio::spawn(stream_output(
session_id.clone(),
ExecOutputStream::Stderr,
spawned.stderr_rx,
self.outbound_tx.clone(),
));
tokio::spawn(watch_exit(
session_id.clone(),
spawned.exit_rx,
self.outbound_tx.clone(),
Arc::clone(&self.processes),
));
Ok(ExecResponse { session_id })
}
async fn handle_write_request(
&self,
params: crate::protocol::WriteParams,
) -> Result<WriteResponse, codex_app_server_protocol::JSONRPCErrorError> {
let writer_tx = {
let process_map = self.processes.lock().await;
let process = process_map.get(&params.session_id).ok_or_else(|| {
invalid_request(format!("unknown session id {}", params.session_id))
})?;
if !process.tty {
return Err(invalid_request(format!(
"stdin is closed for session {}",
params.session_id
)));
}
process.session.writer_sender()
};
writer_tx
.send(params.chunk.into_inner())
.await
.map_err(|_| internal_error("failed to write to process stdin".to_string()))?;
Ok(WriteResponse { accepted: true })
}
async fn handle_terminate_request(
&self,
params: crate::protocol::TerminateParams,
) -> Result<TerminateResponse, codex_app_server_protocol::JSONRPCErrorError> {
let running = {
let process_map = self.processes.lock().await;
if let Some(process) = process_map.get(&params.session_id) {
process.session.terminate();
true
} else {
false
}
};
Ok(TerminateResponse { running })
}
async fn send_request_result(
&self,
request_id: codex_app_server_protocol::RequestId,
result: Result<ExecServerResponseMessage, codex_app_server_protocol::JSONRPCErrorError>,
) {
let outbound = match result {
Ok(response) => ExecServerOutboundMessage::Response {
request_id,
response,
},
Err(error) => ExecServerOutboundMessage::Error { request_id, error },
};
self.send_outbound(outbound).await;
}
async fn send_outbound(&self, outbound: ExecServerOutboundMessage) {
let _ = self.outbound_tx.send(outbound).await;
}
}
async fn stream_output(
session_id: String,
stream: ExecOutputStream,
mut receiver: tokio::sync::mpsc::Receiver<Vec<u8>>,
outbound_tx: mpsc::Sender<ExecServerOutboundMessage>,
) {
while let Some(chunk) = receiver.recv().await {
if outbound_tx
.send(ExecServerOutboundMessage::Notification(
ExecServerServerNotification::OutputDelta(ExecOutputDeltaNotification {
session_id: session_id.clone(),
stream,
chunk: chunk.into(),
}),
))
.await
.is_err()
{
break;
}
}
}
async fn watch_exit(
session_id: String,
exit_rx: tokio::sync::oneshot::Receiver<i32>,
outbound_tx: mpsc::Sender<ExecServerOutboundMessage>,
processes: Arc<Mutex<HashMap<String, RunningProcess>>>,
) {
let exit_code = exit_rx.await.unwrap_or(-1);
{
let mut processes = processes.lock().await;
processes.remove(&session_id);
}
let _ = outbound_tx
.send(ExecServerOutboundMessage::Notification(
ExecServerServerNotification::Exited(ExecExitedNotification {
session_id,
exit_code,
}),
))
.await;
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::time::Duration;
use pretty_assertions::assert_eq;
use tokio::time::timeout;
use super::ExecServerHandler;
use crate::protocol::InitializeParams;
use crate::protocol::InitializeResponse;
use crate::protocol::PROTOCOL_VERSION;
use crate::protocol::TerminateResponse;
use crate::protocol::WriteParams;
use crate::server::routing::ExecServerClientNotification;
use crate::server::routing::ExecServerInboundMessage;
use crate::server::routing::ExecServerOutboundMessage;
use crate::server::routing::ExecServerRequest;
use crate::server::routing::ExecServerResponseMessage;
use codex_app_server_protocol::RequestId;
async fn recv_outbound(
outgoing_rx: &mut tokio::sync::mpsc::Receiver<ExecServerOutboundMessage>,
) -> ExecServerOutboundMessage {
let recv_result = timeout(Duration::from_secs(1), outgoing_rx.recv()).await;
let maybe_message = match recv_result {
Ok(maybe_message) => maybe_message,
Err(err) => panic!("timed out waiting for handler output: {err}"),
};
match maybe_message {
Some(message) => message,
None => panic!("handler output channel closed unexpectedly"),
}
}
#[tokio::test]
async fn initialize_response_reports_protocol_version() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(1);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
assert_eq!(
recv_outbound(&mut outgoing_rx).await,
ExecServerOutboundMessage::Response {
request_id: RequestId::Integer(1),
response: ExecServerResponseMessage::Initialize(InitializeResponse {
protocol_version: PROTOCOL_VERSION.to_string(),
}),
}
);
}
#[tokio::test]
async fn exec_methods_require_initialize() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(1);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(ExecServerRequest::Exec {
request_id: RequestId::Integer(7),
params: crate::protocol::ExecParams {
argv: vec!["bash".to_string(), "-lc".to_string(), "true".to_string()],
cwd: std::env::current_dir().expect("cwd"),
env: HashMap::new(),
tty: true,
arg0: None,
},
}))
.await
{
panic!("request handling should not fail the handler: {err}");
}
let ExecServerOutboundMessage::Error { request_id, error } =
recv_outbound(&mut outgoing_rx).await
else {
panic!("expected invalid-request error");
};
assert_eq!(request_id, RequestId::Integer(7));
assert_eq!(error.code, -32600);
assert_eq!(
error.message,
"client must call initialize before using exec methods"
);
}
#[tokio::test]
async fn exec_methods_require_initialized_notification_after_initialize() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(2);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(ExecServerRequest::Exec {
request_id: RequestId::Integer(2),
params: crate::protocol::ExecParams {
argv: vec!["bash".to_string(), "-lc".to_string(), "true".to_string()],
cwd: std::env::current_dir().expect("cwd"),
env: HashMap::new(),
tty: true,
arg0: None,
},
}))
.await
{
panic!("request handling should not fail the handler: {err}");
}
let ExecServerOutboundMessage::Error { request_id, error } =
recv_outbound(&mut outgoing_rx).await
else {
panic!("expected invalid-request error");
};
assert_eq!(request_id, RequestId::Integer(2));
assert_eq!(error.code, -32600);
assert_eq!(
error.message,
"client must send initialized before using exec methods"
);
}
#[tokio::test]
async fn initialized_before_initialize_is_a_protocol_error() {
let (outgoing_tx, _outgoing_rx) = tokio::sync::mpsc::channel(1);
let mut handler = ExecServerHandler::new(outgoing_tx);
let result = handler
.handle_message(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
.await;
match result {
Err(err) => {
assert_eq!(
err,
"received `initialized` notification before `initialize`"
);
}
Ok(()) => panic!("expected protocol error for early initialized notification"),
}
}
#[tokio::test]
async fn initialize_may_only_be_sent_once_per_connection() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(2);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(2),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("duplicate initialize should not fail the handler: {err}");
}
let ExecServerOutboundMessage::Error { request_id, error } =
recv_outbound(&mut outgoing_rx).await
else {
panic!("expected invalid-request error");
};
assert_eq!(request_id, RequestId::Integer(2));
assert_eq!(error.code, -32600);
assert_eq!(
error.message,
"initialize may only be sent once per connection"
);
}
#[tokio::test]
async fn exec_returns_server_generated_session_ids() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(4);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
.await
{
panic!("initialized should succeed: {err}");
}
let params = crate::protocol::ExecParams {
argv: vec![
"bash".to_string(),
"-lc".to_string(),
"sleep 30".to_string(),
],
cwd: std::env::current_dir().expect("cwd"),
env: HashMap::new(),
tty: false,
arg0: None,
};
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(ExecServerRequest::Exec {
request_id: RequestId::Integer(2),
params: params.clone(),
}))
.await
{
panic!("first exec should succeed: {err}");
}
let ExecServerOutboundMessage::Response {
request_id,
response: ExecServerResponseMessage::Exec(first_exec),
} = recv_outbound(&mut outgoing_rx).await
else {
panic!("expected first exec response");
};
assert_eq!(request_id, RequestId::Integer(2));
assert_eq!(first_exec.session_id, "session-1");
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(ExecServerRequest::Exec {
request_id: RequestId::Integer(3),
params: crate::protocol::ExecParams {
argv: vec!["bash".to_string(), "-lc".to_string(), "true".to_string()],
..params
},
}))
.await
{
panic!("second exec should succeed: {err}");
}
let ExecServerOutboundMessage::Response {
request_id,
response: ExecServerResponseMessage::Exec(second_exec),
} = recv_outbound(&mut outgoing_rx).await
else {
panic!("expected second exec response");
};
assert_eq!(request_id, RequestId::Integer(3));
assert_eq!(second_exec.session_id, "session-2");
handler.shutdown().await;
}
#[tokio::test]
async fn writes_to_pipe_backed_processes_are_rejected() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(4);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
.await
{
panic!("initialized should succeed: {err}");
}
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(ExecServerRequest::Exec {
request_id: RequestId::Integer(2),
params: crate::protocol::ExecParams {
argv: vec![
"bash".to_string(),
"-lc".to_string(),
"sleep 30".to_string(),
],
cwd: std::env::current_dir().expect("cwd"),
env: HashMap::new(),
tty: false,
arg0: None,
},
}))
.await
{
panic!("exec should succeed: {err}");
}
let ExecServerOutboundMessage::Response {
response: ExecServerResponseMessage::Exec(exec_response),
..
} = recv_outbound(&mut outgoing_rx).await
else {
panic!("expected exec response");
};
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Write {
request_id: RequestId::Integer(3),
params: WriteParams {
session_id: exec_response.session_id,
chunk: b"hello\n".to_vec().into(),
},
},
))
.await
{
panic!("write should not fail the handler: {err}");
}
let ExecServerOutboundMessage::Error { request_id, error } =
recv_outbound(&mut outgoing_rx).await
else {
panic!("expected stdin-closed error");
};
assert_eq!(request_id, RequestId::Integer(3));
assert_eq!(error.code, -32600);
assert_eq!(error.message, "stdin is closed for session session-1");
handler.shutdown().await;
}
#[tokio::test]
async fn writes_to_unknown_processes_are_rejected() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(2);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
.await
{
panic!("initialized should succeed: {err}");
}
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Write {
request_id: RequestId::Integer(2),
params: WriteParams {
session_id: "missing".to_string(),
chunk: b"hello\n".to_vec().into(),
},
},
))
.await
{
panic!("write should not fail the handler: {err}");
}
let ExecServerOutboundMessage::Error { request_id, error } =
recv_outbound(&mut outgoing_rx).await
else {
panic!("expected unknown-process error");
};
assert_eq!(request_id, RequestId::Integer(2));
assert_eq!(error.code, -32600);
assert_eq!(error.message, "unknown session id missing");
}
#[tokio::test]
async fn terminate_unknown_processes_report_running_false() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(2);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
.await
{
panic!("initialized should succeed: {err}");
}
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Terminate {
request_id: RequestId::Integer(2),
params: crate::protocol::TerminateParams {
session_id: "missing".to_string(),
},
},
))
.await
{
panic!("terminate should not fail the handler: {err}");
}
assert_eq!(
recv_outbound(&mut outgoing_rx).await,
ExecServerOutboundMessage::Response {
request_id: RequestId::Integer(2),
response: ExecServerResponseMessage::Terminate(TerminateResponse {
running: false,
}),
}
);
}
#[tokio::test]
async fn terminate_keeps_session_ids_reserved_until_exit_cleanup() {
let (outgoing_tx, mut outgoing_rx) = tokio::sync::mpsc::channel(2);
let mut handler = ExecServerHandler::new(outgoing_tx);
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test".to_string(),
},
},
))
.await
{
panic!("initialize should succeed: {err}");
}
let _ = recv_outbound(&mut outgoing_rx).await;
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
.await
{
panic!("initialized should succeed: {err}");
}
let spawned = codex_utils_pty::spawn_pipe_process_no_stdin(
"bash",
&["-lc".to_string(), "sleep 30".to_string()],
std::env::current_dir().expect("cwd").as_path(),
&HashMap::new(),
&None,
)
.await
.expect("spawn test process");
{
let mut process_map = handler.processes.lock().await;
process_map.insert(
"proc-1".to_string(),
super::RunningProcess {
session: spawned.session,
tty: false,
},
);
}
if let Err(err) = handler
.handle_message(ExecServerInboundMessage::Request(
ExecServerRequest::Terminate {
request_id: RequestId::Integer(2),
params: crate::protocol::TerminateParams {
session_id: "proc-1".to_string(),
},
},
))
.await
{
panic!("terminate should not fail the handler: {err}");
}
assert_eq!(
recv_outbound(&mut outgoing_rx).await,
ExecServerOutboundMessage::Response {
request_id: RequestId::Integer(2),
response: ExecServerResponseMessage::Terminate(TerminateResponse { running: true }),
}
);
assert!(
handler.processes.lock().await.contains_key("proc-1"),
"terminated ids should stay reserved until exit cleanup removes them"
);
handler.shutdown().await;
}
}

View File

@@ -1,67 +0,0 @@
use tokio::sync::mpsc;
use tracing::debug;
use tracing::warn;
use crate::connection::CHANNEL_CAPACITY;
use crate::connection::JsonRpcConnection;
use crate::connection::JsonRpcConnectionEvent;
use crate::server::handler::ExecServerHandler;
use crate::server::routing::ExecServerOutboundMessage;
use crate::server::routing::RoutedExecServerMessage;
use crate::server::routing::encode_outbound_message;
use crate::server::routing::route_jsonrpc_message;
pub(crate) async fn run_connection(connection: JsonRpcConnection) {
let (json_outgoing_tx, mut incoming_rx) = connection.into_parts();
let (outgoing_tx, mut outgoing_rx) =
mpsc::channel::<ExecServerOutboundMessage>(CHANNEL_CAPACITY);
let mut handler = ExecServerHandler::new(outgoing_tx.clone());
let outbound_task = tokio::spawn(async move {
while let Some(message) = outgoing_rx.recv().await {
let json_message = match encode_outbound_message(message) {
Ok(json_message) => json_message,
Err(err) => {
warn!("failed to serialize exec-server outbound message: {err}");
break;
}
};
if json_outgoing_tx.send(json_message).await.is_err() {
break;
}
}
});
while let Some(event) = incoming_rx.recv().await {
match event {
JsonRpcConnectionEvent::Message(message) => match route_jsonrpc_message(message) {
Ok(RoutedExecServerMessage::Inbound(message)) => {
if let Err(err) = handler.handle_message(message).await {
warn!("closing exec-server connection after protocol error: {err}");
break;
}
}
Ok(RoutedExecServerMessage::ImmediateOutbound(message)) => {
if outgoing_tx.send(message).await.is_err() {
break;
}
}
Err(err) => {
warn!("closing exec-server connection after protocol error: {err}");
break;
}
},
JsonRpcConnectionEvent::Disconnected { reason } => {
if let Some(reason) = reason {
debug!("exec-server connection disconnected: {reason}");
}
break;
}
}
}
handler.shutdown().await;
drop(handler);
drop(outgoing_tx);
let _ = outbound_task.await;
}

View File

@@ -1,441 +0,0 @@
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use serde::de::DeserializeOwned;
use crate::protocol::EXEC_EXITED_METHOD;
use crate::protocol::EXEC_METHOD;
use crate::protocol::EXEC_OUTPUT_DELTA_METHOD;
use crate::protocol::EXEC_TERMINATE_METHOD;
use crate::protocol::EXEC_WRITE_METHOD;
use crate::protocol::ExecExitedNotification;
use crate::protocol::ExecOutputDeltaNotification;
use crate::protocol::ExecParams;
use crate::protocol::ExecResponse;
use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeParams;
use crate::protocol::InitializeResponse;
use crate::protocol::TerminateParams;
use crate::protocol::TerminateResponse;
use crate::protocol::WriteParams;
use crate::protocol::WriteResponse;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum ExecServerInboundMessage {
Request(ExecServerRequest),
Notification(ExecServerClientNotification),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum ExecServerRequest {
Initialize {
request_id: RequestId,
params: InitializeParams,
},
Exec {
request_id: RequestId,
params: ExecParams,
},
Write {
request_id: RequestId,
params: WriteParams,
},
Terminate {
request_id: RequestId,
params: TerminateParams,
},
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum ExecServerClientNotification {
Initialized,
}
#[derive(Debug, Clone, PartialEq)]
pub(crate) enum ExecServerOutboundMessage {
Response {
request_id: RequestId,
response: ExecServerResponseMessage,
},
Error {
request_id: RequestId,
error: JSONRPCErrorError,
},
Notification(ExecServerServerNotification),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum ExecServerResponseMessage {
Initialize(InitializeResponse),
Exec(ExecResponse),
Write(WriteResponse),
Terminate(TerminateResponse),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum ExecServerServerNotification {
OutputDelta(ExecOutputDeltaNotification),
Exited(ExecExitedNotification),
}
#[derive(Debug, Clone, PartialEq)]
pub(crate) enum RoutedExecServerMessage {
Inbound(ExecServerInboundMessage),
ImmediateOutbound(ExecServerOutboundMessage),
}
pub(crate) fn route_jsonrpc_message(
message: JSONRPCMessage,
) -> Result<RoutedExecServerMessage, String> {
match message {
JSONRPCMessage::Request(request) => route_request(request),
JSONRPCMessage::Notification(notification) => route_notification(notification),
JSONRPCMessage::Response(response) => Err(format!(
"unexpected client response for request id {:?}",
response.id
)),
JSONRPCMessage::Error(error) => Err(format!(
"unexpected client error for request id {:?}",
error.id
)),
}
}
pub(crate) fn encode_outbound_message(
message: ExecServerOutboundMessage,
) -> Result<JSONRPCMessage, serde_json::Error> {
match message {
ExecServerOutboundMessage::Response {
request_id,
response,
} => Ok(JSONRPCMessage::Response(JSONRPCResponse {
id: request_id,
result: serialize_response(response)?,
})),
ExecServerOutboundMessage::Error { request_id, error } => {
Ok(JSONRPCMessage::Error(JSONRPCError {
id: request_id,
error,
}))
}
ExecServerOutboundMessage::Notification(notification) => Ok(JSONRPCMessage::Notification(
serialize_notification(notification)?,
)),
}
}
pub(crate) fn invalid_request(message: String) -> JSONRPCErrorError {
JSONRPCErrorError {
code: -32600,
data: None,
message,
}
}
pub(crate) fn invalid_params(message: String) -> JSONRPCErrorError {
JSONRPCErrorError {
code: -32602,
data: None,
message,
}
}
pub(crate) fn internal_error(message: String) -> JSONRPCErrorError {
JSONRPCErrorError {
code: -32603,
data: None,
message,
}
}
fn route_request(request: JSONRPCRequest) -> Result<RoutedExecServerMessage, String> {
match request.method.as_str() {
INITIALIZE_METHOD => Ok(parse_request_params(request, |request_id, params| {
ExecServerRequest::Initialize { request_id, params }
})),
EXEC_METHOD => Ok(parse_request_params(request, |request_id, params| {
ExecServerRequest::Exec { request_id, params }
})),
EXEC_WRITE_METHOD => Ok(parse_request_params(request, |request_id, params| {
ExecServerRequest::Write { request_id, params }
})),
EXEC_TERMINATE_METHOD => Ok(parse_request_params(request, |request_id, params| {
ExecServerRequest::Terminate { request_id, params }
})),
other => Ok(RoutedExecServerMessage::ImmediateOutbound(
ExecServerOutboundMessage::Error {
request_id: request.id,
error: invalid_request(format!("unknown method: {other}")),
},
)),
}
}
fn route_notification(
notification: JSONRPCNotification,
) -> Result<RoutedExecServerMessage, String> {
match notification.method.as_str() {
INITIALIZED_METHOD => Ok(RoutedExecServerMessage::Inbound(
ExecServerInboundMessage::Notification(ExecServerClientNotification::Initialized),
)),
other => Err(format!("unexpected notification method: {other}")),
}
}
fn parse_request_params<P, F>(request: JSONRPCRequest, build: F) -> RoutedExecServerMessage
where
P: DeserializeOwned,
F: FnOnce(RequestId, P) -> ExecServerRequest,
{
let request_id = request.id;
match serde_json::from_value::<P>(request.params.unwrap_or(serde_json::Value::Null)) {
Ok(params) => RoutedExecServerMessage::Inbound(ExecServerInboundMessage::Request(build(
request_id, params,
))),
Err(err) => RoutedExecServerMessage::ImmediateOutbound(ExecServerOutboundMessage::Error {
request_id,
error: invalid_params(err.to_string()),
}),
}
}
fn serialize_response(
response: ExecServerResponseMessage,
) -> Result<serde_json::Value, serde_json::Error> {
match response {
ExecServerResponseMessage::Initialize(response) => serde_json::to_value(response),
ExecServerResponseMessage::Exec(response) => serde_json::to_value(response),
ExecServerResponseMessage::Write(response) => serde_json::to_value(response),
ExecServerResponseMessage::Terminate(response) => serde_json::to_value(response),
}
}
fn serialize_notification(
notification: ExecServerServerNotification,
) -> Result<JSONRPCNotification, serde_json::Error> {
match notification {
ExecServerServerNotification::OutputDelta(params) => Ok(JSONRPCNotification {
method: EXEC_OUTPUT_DELTA_METHOD.to_string(),
params: Some(serde_json::to_value(params)?),
}),
ExecServerServerNotification::Exited(params) => Ok(JSONRPCNotification {
method: EXEC_EXITED_METHOD.to_string(),
params: Some(serde_json::to_value(params)?),
}),
}
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use serde_json::json;
use super::ExecServerClientNotification;
use super::ExecServerInboundMessage;
use super::ExecServerOutboundMessage;
use super::ExecServerRequest;
use super::ExecServerResponseMessage;
use super::ExecServerServerNotification;
use super::RoutedExecServerMessage;
use super::encode_outbound_message;
use super::route_jsonrpc_message;
use crate::protocol::EXEC_EXITED_METHOD;
use crate::protocol::EXEC_METHOD;
use crate::protocol::ExecExitedNotification;
use crate::protocol::ExecParams;
use crate::protocol::ExecResponse;
use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeParams;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
#[test]
fn routes_initialize_requests_to_typed_variants() {
let routed = route_jsonrpc_message(JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(1),
method: INITIALIZE_METHOD.to_string(),
params: Some(json!({ "clientName": "test-client" })),
trace: None,
}))
.expect("initialize request should route");
assert_eq!(
routed,
RoutedExecServerMessage::Inbound(ExecServerInboundMessage::Request(
ExecServerRequest::Initialize {
request_id: RequestId::Integer(1),
params: InitializeParams {
client_name: "test-client".to_string(),
},
},
))
);
}
#[test]
fn malformed_exec_params_return_immediate_error_outbound() {
let routed = route_jsonrpc_message(JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(2),
method: EXEC_METHOD.to_string(),
params: Some(json!({ "sessionId": "proc-1" })),
trace: None,
}))
.expect("exec request should route");
let RoutedExecServerMessage::ImmediateOutbound(ExecServerOutboundMessage::Error {
request_id,
error,
}) = routed
else {
panic!("expected invalid-params error outbound");
};
assert_eq!(request_id, RequestId::Integer(2));
assert_eq!(error.code, -32602);
}
#[test]
fn routes_initialized_notifications_to_typed_variants() {
let routed = route_jsonrpc_message(JSONRPCMessage::Notification(JSONRPCNotification {
method: INITIALIZED_METHOD.to_string(),
params: Some(json!({})),
}))
.expect("initialized notification should route");
assert_eq!(
routed,
RoutedExecServerMessage::Inbound(ExecServerInboundMessage::Notification(
ExecServerClientNotification::Initialized,
))
);
}
#[test]
fn serializes_typed_notifications_back_to_jsonrpc() {
let message = encode_outbound_message(ExecServerOutboundMessage::Notification(
ExecServerServerNotification::Exited(ExecExitedNotification {
session_id: "proc-1".to_string(),
exit_code: 0,
}),
))
.expect("notification should serialize");
assert_eq!(
message,
JSONRPCMessage::Notification(JSONRPCNotification {
method: EXEC_EXITED_METHOD.to_string(),
params: Some(json!({
"sessionId": "proc-1",
"exitCode": 0,
})),
})
);
}
#[test]
fn serializes_typed_responses_back_to_jsonrpc() {
let message = encode_outbound_message(ExecServerOutboundMessage::Response {
request_id: RequestId::Integer(3),
response: ExecServerResponseMessage::Exec(ExecResponse {
session_id: "proc-1".to_string(),
}),
})
.expect("response should serialize");
assert_eq!(
message,
JSONRPCMessage::Response(codex_app_server_protocol::JSONRPCResponse {
id: RequestId::Integer(3),
result: json!({
"sessionId": "proc-1",
}),
})
);
}
#[test]
fn routes_exec_requests_with_typed_params() {
let cwd = std::env::current_dir().expect("cwd");
let routed = route_jsonrpc_message(JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(4),
method: EXEC_METHOD.to_string(),
params: Some(json!({
"sessionId": "proc-1",
"argv": ["bash", "-lc", "true"],
"cwd": cwd,
"env": {},
"tty": true,
"arg0": null,
})),
trace: None,
}))
.expect("exec request should route");
let RoutedExecServerMessage::Inbound(ExecServerInboundMessage::Request(
ExecServerRequest::Exec { request_id, params },
)) = routed
else {
panic!("expected typed exec request");
};
assert_eq!(request_id, RequestId::Integer(4));
assert_eq!(
params,
ExecParams {
argv: vec!["bash".to_string(), "-lc".to_string(), "true".to_string()],
cwd: std::env::current_dir().expect("cwd"),
env: std::collections::HashMap::new(),
tty: true,
arg0: None,
}
);
}
#[test]
fn unknown_request_methods_return_immediate_invalid_request_errors() {
let routed = route_jsonrpc_message(JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(5),
method: "process/unknown".to_string(),
params: Some(json!({})),
trace: None,
}))
.expect("unknown request should still route");
assert_eq!(
routed,
RoutedExecServerMessage::ImmediateOutbound(ExecServerOutboundMessage::Error {
request_id: RequestId::Integer(5),
error: super::invalid_request("unknown method: process/unknown".to_string()),
})
);
}
#[test]
fn unexpected_client_notifications_are_rejected() {
let err = route_jsonrpc_message(JSONRPCMessage::Notification(JSONRPCNotification {
method: "process/outputDelta".to_string(),
params: Some(json!({})),
}))
.expect_err("unexpected client notification should fail");
assert_eq!(err, "unexpected notification method: process/outputDelta");
}
#[test]
fn unexpected_client_responses_are_rejected() {
let err = route_jsonrpc_message(JSONRPCMessage::Response(JSONRPCResponse {
id: RequestId::Integer(6),
result: json!({}),
}))
.expect_err("unexpected client response should fail");
assert_eq!(err, "unexpected client response for request id Integer(6)");
}
}

View File

@@ -1,166 +0,0 @@
use std::net::SocketAddr;
use std::str::FromStr;
use tokio::net::TcpListener;
use tokio_tungstenite::accept_async;
use tracing::warn;
use crate::connection::JsonRpcConnection;
use crate::server::processor::run_connection;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ExecServerTransport {
Stdio,
WebSocket { bind_address: SocketAddr },
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum ExecServerTransportParseError {
UnsupportedListenUrl(String),
InvalidWebSocketListenUrl(String),
}
impl std::fmt::Display for ExecServerTransportParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ExecServerTransportParseError::UnsupportedListenUrl(listen_url) => write!(
f,
"unsupported --listen URL `{listen_url}`; expected `stdio://` or `ws://IP:PORT`"
),
ExecServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!(
f,
"invalid websocket --listen URL `{listen_url}`; expected `ws://IP:PORT`"
),
}
}
}
impl std::error::Error for ExecServerTransportParseError {}
impl ExecServerTransport {
pub const DEFAULT_LISTEN_URL: &str = "stdio://";
pub fn from_listen_url(listen_url: &str) -> Result<Self, ExecServerTransportParseError> {
if listen_url == Self::DEFAULT_LISTEN_URL {
return Ok(Self::Stdio);
}
if let Some(socket_addr) = listen_url.strip_prefix("ws://") {
let bind_address = socket_addr.parse::<SocketAddr>().map_err(|_| {
ExecServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string())
})?;
return Ok(Self::WebSocket { bind_address });
}
Err(ExecServerTransportParseError::UnsupportedListenUrl(
listen_url.to_string(),
))
}
}
impl FromStr for ExecServerTransport {
type Err = ExecServerTransportParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_listen_url(s)
}
}
pub(crate) async fn run_transport(
transport: ExecServerTransport,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match transport {
ExecServerTransport::Stdio => {
run_connection(JsonRpcConnection::from_stdio(
tokio::io::stdin(),
tokio::io::stdout(),
"exec-server stdio".to_string(),
))
.await;
Ok(())
}
ExecServerTransport::WebSocket { bind_address } => {
run_websocket_listener(bind_address).await
}
}
}
async fn run_websocket_listener(
bind_address: SocketAddr,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let listener = TcpListener::bind(bind_address).await?;
let local_addr = listener.local_addr()?;
print_websocket_startup_banner(local_addr);
loop {
let (stream, peer_addr) = listener.accept().await?;
tokio::spawn(async move {
match accept_async(stream).await {
Ok(websocket) => {
run_connection(JsonRpcConnection::from_websocket(
websocket,
format!("exec-server websocket {peer_addr}"),
))
.await;
}
Err(err) => {
warn!(
"failed to accept exec-server websocket connection from {peer_addr}: {err}"
);
}
}
});
}
}
#[allow(clippy::print_stderr)]
fn print_websocket_startup_banner(addr: SocketAddr) {
eprintln!("codex-exec-server listening on ws://{addr}");
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use super::ExecServerTransport;
#[test]
fn exec_server_transport_parses_stdio_listen_url() {
let transport =
ExecServerTransport::from_listen_url(ExecServerTransport::DEFAULT_LISTEN_URL)
.expect("stdio listen URL should parse");
assert_eq!(transport, ExecServerTransport::Stdio);
}
#[test]
fn exec_server_transport_parses_websocket_listen_url() {
let transport = ExecServerTransport::from_listen_url("ws://127.0.0.1:1234")
.expect("websocket listen URL should parse");
assert_eq!(
transport,
ExecServerTransport::WebSocket {
bind_address: "127.0.0.1:1234".parse().expect("valid socket address"),
}
);
}
#[test]
fn exec_server_transport_rejects_invalid_websocket_listen_url() {
let err = ExecServerTransport::from_listen_url("ws://localhost:1234")
.expect_err("hostname bind address should be rejected");
assert_eq!(
err.to_string(),
"invalid websocket --listen URL `ws://localhost:1234`; expected `ws://IP:PORT`"
);
}
#[test]
fn exec_server_transport_rejects_unsupported_listen_url() {
let err = ExecServerTransport::from_listen_url("http://127.0.0.1:1234")
.expect_err("unsupported scheme should fail");
assert_eq!(
err.to_string(),
"unsupported --listen URL `http://127.0.0.1:1234`; expected `stdio://` or `ws://IP:PORT`"
);
}
}

View File

@@ -1,295 +0,0 @@
#![cfg(unix)]
use std::process::Stdio;
use std::time::Duration;
use anyhow::Context;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_exec_server::ExecOutputStream;
use codex_exec_server::ExecParams;
use codex_exec_server::ExecServerClient;
use codex_exec_server::ExecServerClientConnectOptions;
use codex_exec_server::ExecServerEvent;
use codex_exec_server::ExecServerLaunchCommand;
use codex_exec_server::InitializeParams;
use codex_exec_server::InitializeResponse;
use codex_exec_server::RemoteExecServerConnectArgs;
use codex_exec_server::spawn_local_exec_server;
use codex_utils_cargo_bin::cargo_bin;
use pretty_assertions::assert_eq;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::process::Command;
use tokio::sync::broadcast;
use tokio::time::timeout;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn exec_server_accepts_initialize_over_stdio() -> anyhow::Result<()> {
let binary = cargo_bin("codex-exec-server")?;
let mut child = Command::new(binary);
child.stdin(Stdio::piped());
child.stdout(Stdio::piped());
child.stderr(Stdio::inherit());
let mut child = child.spawn()?;
let mut stdin = child.stdin.take().expect("stdin");
let stdout = child.stdout.take().expect("stdout");
let mut stdout = BufReader::new(stdout).lines();
let initialize = JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(1),
method: "initialize".to_string(),
params: Some(serde_json::to_value(InitializeParams {
client_name: "exec-server-test".to_string(),
})?),
trace: None,
});
stdin
.write_all(format!("{}\n", serde_json::to_string(&initialize)?).as_bytes())
.await?;
let response_line = timeout(Duration::from_secs(5), stdout.next_line()).await??;
let response_line = response_line.expect("response line");
let response: JSONRPCMessage = serde_json::from_str(&response_line)?;
let JSONRPCMessage::Response(JSONRPCResponse { id, result }) = response else {
panic!("expected initialize response");
};
assert_eq!(id, RequestId::Integer(1));
let initialize_response: InitializeResponse = serde_json::from_value(result)?;
assert_eq!(initialize_response.protocol_version, "exec-server.v0");
let initialized = JSONRPCMessage::Notification(JSONRPCNotification {
method: "initialized".to_string(),
params: Some(serde_json::json!({})),
});
stdin
.write_all(format!("{}\n", serde_json::to_string(&initialized)?).as_bytes())
.await?;
child.start_kill()?;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn exec_server_client_streams_output_and_accepts_writes() -> anyhow::Result<()> {
let mut env = std::collections::HashMap::new();
if let Some(path) = std::env::var_os("PATH") {
env.insert("PATH".to_string(), path.to_string_lossy().into_owned());
}
let server = spawn_local_exec_server(
ExecServerLaunchCommand {
program: cargo_bin("codex-exec-server")?,
args: Vec::new(),
},
ExecServerClientConnectOptions {
client_name: "exec-server-test".to_string(),
initialize_timeout: Duration::from_secs(5),
},
)
.await?;
let client = server.client();
let mut events = client.event_receiver();
let response = client
.exec(ExecParams {
argv: vec![
"bash".to_string(),
"-lc".to_string(),
"printf 'ready\\n'; while IFS= read -r line; do printf 'echo:%s\\n' \"$line\"; done"
.to_string(),
],
cwd: std::env::current_dir()?,
env,
tty: true,
arg0: None,
})
.await?;
let session_id = response.session_id;
let (stream, ready_output) = recv_until_contains(&mut events, &session_id, "ready").await?;
assert_eq!(stream, ExecOutputStream::Stdout);
assert!(
ready_output.contains("ready"),
"expected initial ready output"
);
client.write(&session_id, b"hello\n".to_vec()).await?;
let (stream, echoed_output) =
recv_until_contains(&mut events, &session_id, "echo:hello").await?;
assert_eq!(stream, ExecOutputStream::Stdout);
assert!(
echoed_output.contains("echo:hello"),
"expected echoed output"
);
client.terminate(&session_id).await?;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn exec_server_client_connects_over_websocket() -> anyhow::Result<()> {
let mut env = std::collections::HashMap::new();
if let Some(path) = std::env::var_os("PATH") {
env.insert("PATH".to_string(), path.to_string_lossy().into_owned());
}
let binary = cargo_bin("codex-exec-server")?;
let mut child = Command::new(binary);
child.args(["--listen", "ws://127.0.0.1:0"]);
child.stdin(Stdio::null());
child.stdout(Stdio::null());
child.stderr(Stdio::piped());
let mut child = child.spawn()?;
let stderr = child.stderr.take().expect("stderr");
let mut stderr_lines = BufReader::new(stderr).lines();
let websocket_url = read_websocket_url(&mut stderr_lines).await?;
let client = ExecServerClient::connect_websocket(RemoteExecServerConnectArgs {
websocket_url,
client_name: "exec-server-test".to_string(),
connect_timeout: Duration::from_secs(5),
initialize_timeout: Duration::from_secs(5),
})
.await?;
let mut events = client.event_receiver();
let response = client
.exec(ExecParams {
argv: vec![
"bash".to_string(),
"-lc".to_string(),
"printf 'ready\\n'; while IFS= read -r line; do printf 'echo:%s\\n' \"$line\"; done"
.to_string(),
],
cwd: std::env::current_dir()?,
env,
tty: true,
arg0: None,
})
.await?;
let session_id = response.session_id;
let (stream, ready_output) = recv_until_contains(&mut events, &session_id, "ready").await?;
assert_eq!(stream, ExecOutputStream::Stdout);
assert!(
ready_output.contains("ready"),
"expected initial ready output"
);
client.write(&session_id, b"hello\n".to_vec()).await?;
let (stream, echoed_output) =
recv_until_contains(&mut events, &session_id, "echo:hello").await?;
assert_eq!(stream, ExecOutputStream::Stdout);
assert!(
echoed_output.contains("echo:hello"),
"expected echoed output"
);
client.terminate(&session_id).await?;
child.start_kill()?;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn websocket_disconnect_terminates_processes_for_that_connection() -> anyhow::Result<()> {
let mut env = std::collections::HashMap::new();
if let Some(path) = std::env::var_os("PATH") {
env.insert("PATH".to_string(), path.to_string_lossy().into_owned());
}
let marker_path = std::env::temp_dir().join(format!(
"codex-exec-server-disconnect-{}-{}",
std::process::id(),
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)?
.as_nanos()
));
let _ = std::fs::remove_file(&marker_path);
let binary = cargo_bin("codex-exec-server")?;
let mut child = Command::new(binary);
child.args(["--listen", "ws://127.0.0.1:0"]);
child.stdin(Stdio::null());
child.stdout(Stdio::null());
child.stderr(Stdio::piped());
let mut child = child.spawn()?;
let stderr = child.stderr.take().expect("stderr");
let mut stderr_lines = BufReader::new(stderr).lines();
let websocket_url = read_websocket_url(&mut stderr_lines).await?;
{
let client = ExecServerClient::connect_websocket(RemoteExecServerConnectArgs {
websocket_url,
client_name: "exec-server-test".to_string(),
connect_timeout: Duration::from_secs(5),
initialize_timeout: Duration::from_secs(5),
})
.await?;
let _response = client
.exec(ExecParams {
argv: vec![
"bash".to_string(),
"-lc".to_string(),
format!("sleep 2; printf disconnected > {}", marker_path.display()),
],
cwd: std::env::current_dir()?,
env,
tty: false,
arg0: None,
})
.await?;
}
tokio::time::sleep(Duration::from_secs(3)).await;
assert!(
!marker_path.exists(),
"managed process should be terminated when the websocket client disconnects"
);
child.start_kill()?;
let _ = std::fs::remove_file(&marker_path);
Ok(())
}
async fn read_websocket_url<R>(lines: &mut tokio::io::Lines<BufReader<R>>) -> anyhow::Result<String>
where
R: tokio::io::AsyncRead + Unpin,
{
let line = timeout(Duration::from_secs(5), lines.next_line()).await??;
let line = line.context("missing websocket startup banner")?;
let websocket_url = line
.split_whitespace()
.find(|part| part.starts_with("ws://"))
.context("missing websocket URL in startup banner")?;
Ok(websocket_url.to_string())
}
async fn recv_until_contains(
events: &mut broadcast::Receiver<ExecServerEvent>,
session_id: &str,
needle: &str,
) -> anyhow::Result<(ExecOutputStream, String)> {
let deadline = tokio::time::Instant::now() + Duration::from_secs(5);
let mut collected = String::new();
loop {
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
let event = timeout(remaining, events.recv()).await??;
if let ExecServerEvent::OutputDelta(output_event) = event
&& output_event.session_id == session_id
{
collected.push_str(&String::from_utf8_lossy(&output_event.chunk.into_inner()));
if collected.contains(needle) {
return Ok((output_event.stream, collected));
}
}
}
}

View File

@@ -36,10 +36,12 @@ use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadUnsubscribeParams;
use codex_app_server_protocol::ThreadUnsubscribeResponse;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStatus;
use codex_arg0::Arg0DispatchPaths;
use codex_cloud_requirements::cloud_requirements_loader;
use codex_core::AuthManager;
@@ -70,6 +72,9 @@ use codex_protocol::protocol::ReviewRequest;
use codex_protocol::protocol::ReviewTarget;
use codex_protocol::protocol::SessionConfiguredEvent;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::TurnCompleteEvent;
use codex_protocol::user_input::UserInput;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_oss::ensure_oss_provider_ready;
@@ -782,6 +787,24 @@ async fn run_exec_session(args: ExecRunArgs) -> anyhow::Result<()> {
.await;
}
InProcessServerEvent::ServerNotification(notification) => {
if let Some((event, terminal_error)) = decode_turn_completed_notification_for_exec(
&notification,
primary_thread_id_for_requests.as_str(),
task_id.as_str(),
) {
error_seen |= terminal_error;
if handle_exec_status(
event_processor.process_event(event),
&client,
&mut request_ids,
&primary_thread_id_for_requests,
)
.await
{
break;
}
continue;
}
if let ServerNotification::Error(payload) = &notification
&& payload.thread_id == primary_thread_id_for_requests
&& payload.turn_id == task_id
@@ -850,25 +873,15 @@ async fn run_exec_session(args: ExecRunArgs) -> anyhow::Result<()> {
_ => {}
}
match event_processor.process_event(event) {
CodexStatus::Running => {}
CodexStatus::InitiateShutdown => {
if let Err(err) = request_shutdown(
&client,
&mut request_ids,
&primary_thread_id_for_requests,
)
.await
{
warn!("thread/unsubscribe failed during shutdown: {err}");
}
break;
}
CodexStatus::Shutdown => {
// `ShutdownComplete` does not identify which attached
// thread emitted it, so subagent shutdowns must not end
// the primary exec loop early.
}
if handle_exec_status(
event_processor.process_event(event),
&client,
&mut request_ids,
&primary_thread_id_for_requests,
)
.await
{
break;
}
}
InProcessServerEvent::Lagged { skipped } => {
@@ -1131,6 +1144,80 @@ fn canceled_mcp_server_elicitation_response() -> Result<Value, String> {
.map_err(|err| format!("failed to encode mcp elicitation response: {err}"))
}
async fn handle_exec_status(
status: CodexStatus,
client: &InProcessAppServerClient,
request_ids: &mut RequestIdSequencer,
primary_thread_id_for_requests: &str,
) -> bool {
match status {
CodexStatus::Running => false,
CodexStatus::InitiateShutdown => {
if let Err(err) =
request_shutdown(client, request_ids, primary_thread_id_for_requests).await
{
warn!("thread/unsubscribe failed during shutdown: {err}");
}
true
}
CodexStatus::Shutdown => {
// `ShutdownComplete` does not identify which attached
// thread emitted it, so subagent shutdowns must not end
// the primary exec loop early.
false
}
}
}
fn decode_turn_completed_notification_for_exec(
notification: &ServerNotification,
primary_thread_id_for_requests: &str,
task_id: &str,
) -> Option<(Event, bool)> {
let ServerNotification::TurnCompleted(TurnCompletedNotification { thread_id, turn }) =
notification
else {
return None;
};
if thread_id != primary_thread_id_for_requests || turn.id != task_id {
return None;
}
match turn.status {
TurnStatus::Completed => Some((
Event {
id: String::new(),
msg: EventMsg::TurnComplete(TurnCompleteEvent {
turn_id: turn.id.clone(),
last_agent_message: None,
}),
},
false,
)),
TurnStatus::Failed => Some((
Event {
id: String::new(),
msg: EventMsg::TurnComplete(TurnCompleteEvent {
turn_id: turn.id.clone(),
last_agent_message: None,
}),
},
true,
)),
TurnStatus::Interrupted => Some((
Event {
id: String::new(),
msg: EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: Some(turn.id.clone()),
reason: TurnAbortReason::Interrupted,
}),
},
false,
)),
TurnStatus::InProgress => None,
}
}
async fn request_shutdown(
client: &InProcessAppServerClient,
request_ids: &mut RequestIdSequencer,
@@ -1922,4 +2009,118 @@ mod tests {
ApprovalsReviewer::GuardianSubagent
);
}
#[test]
fn decode_turn_completed_notification_ignores_other_threads_and_turns() {
let thread_mismatch = ServerNotification::TurnCompleted(TurnCompletedNotification {
thread_id: "thread-a".to_string(),
turn: codex_app_server_protocol::Turn {
id: "turn-a".to_string(),
items: Vec::new(),
status: TurnStatus::Completed,
error: None,
},
});
assert!(
decode_turn_completed_notification_for_exec(&thread_mismatch, "thread-b", "turn-a")
.is_none()
);
let turn_mismatch = ServerNotification::TurnCompleted(TurnCompletedNotification {
thread_id: "thread-a".to_string(),
turn: codex_app_server_protocol::Turn {
id: "turn-a".to_string(),
items: Vec::new(),
status: TurnStatus::Completed,
error: None,
},
});
assert!(
decode_turn_completed_notification_for_exec(&turn_mismatch, "thread-a", "turn-b")
.is_none()
);
}
#[test]
fn decode_turn_completed_notification_maps_completed_and_failed_turns() {
let completed_notification = ServerNotification::TurnCompleted(TurnCompletedNotification {
thread_id: "thread-a".to_string(),
turn: codex_app_server_protocol::Turn {
id: "turn-a".to_string(),
items: Vec::new(),
status: TurnStatus::Completed,
error: None,
},
});
let Some((completed, completed_error)) = decode_turn_completed_notification_for_exec(
&completed_notification,
"thread-a",
"turn-a",
) else {
panic!("completed turn should decode");
};
assert!(!completed_error);
match completed.msg {
EventMsg::TurnComplete(event) => {
assert_eq!(event.turn_id, "turn-a");
assert_eq!(event.last_agent_message, None);
}
other => panic!("unexpected event: {other:?}"),
}
let failed_notification = ServerNotification::TurnCompleted(TurnCompletedNotification {
thread_id: "thread-a".to_string(),
turn: codex_app_server_protocol::Turn {
id: "turn-a".to_string(),
items: Vec::new(),
status: TurnStatus::Failed,
error: Some(codex_app_server_protocol::TurnError {
message: "synthetic".to_string(),
codex_error_info: None,
additional_details: None,
}),
},
});
let Some((failed, failed_error)) =
decode_turn_completed_notification_for_exec(&failed_notification, "thread-a", "turn-a")
else {
panic!("failed turn should decode");
};
assert!(failed_error);
match failed.msg {
EventMsg::TurnComplete(event) => {
assert_eq!(event.turn_id, "turn-a");
assert_eq!(event.last_agent_message, None);
}
other => panic!("unexpected event: {other:?}"),
}
}
#[test]
fn decode_turn_completed_notification_maps_interrupted_turns() {
let interrupted_notification =
ServerNotification::TurnCompleted(TurnCompletedNotification {
thread_id: "thread-a".to_string(),
turn: codex_app_server_protocol::Turn {
id: "turn-a".to_string(),
items: Vec::new(),
status: TurnStatus::Interrupted,
error: None,
},
});
let Some((event, terminal_error)) = decode_turn_completed_notification_for_exec(
&interrupted_notification,
"thread-a",
"turn-a",
) else {
panic!("interrupted turn should decode");
};
assert!(!terminal_error);
match event.msg {
EventMsg::TurnAborted(event) => {
assert_eq!(event.turn_id.as_deref(), Some("turn-a"));
assert_eq!(event.reason, TurnAbortReason::Interrupted);
}
other => panic!("unexpected event: {other:?}"),
}
}
}

View File

@@ -0,0 +1,237 @@
#![cfg(not(target_os = "windows"))]
#![allow(clippy::expect_used, clippy::unwrap_used)]
use anyhow::Result;
use core_test_support::test_codex_exec::test_codex_exec;
use serde_json::Value;
use serde_json::json;
use std::fs;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::time::Duration;
use wiremock::Mock;
use wiremock::Respond;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path_regex;
struct AgentJobsResponder {
spawn_args_json: String,
seen_main: AtomicBool,
call_counter: AtomicUsize,
}
impl AgentJobsResponder {
fn new(spawn_args_json: String) -> Self {
Self {
spawn_args_json,
seen_main: AtomicBool::new(false),
call_counter: AtomicUsize::new(0),
}
}
}
impl Respond for AgentJobsResponder {
fn respond(&self, request: &wiremock::Request) -> ResponseTemplate {
let body_bytes = decode_body_bytes(request);
let body: Value = serde_json::from_slice(&body_bytes).unwrap_or(Value::Null);
if has_function_call_output(&body) {
return sse_response(sse(vec![
ev_response_created("resp-tool"),
ev_completed("resp-tool"),
]));
}
if let Some((job_id, item_id)) = extract_job_and_item(&body) {
let call_id = format!(
"call-worker-{}",
self.call_counter.fetch_add(1, Ordering::SeqCst)
);
let args = json!({
"job_id": job_id,
"item_id": item_id,
"result": { "item_id": item_id }
});
let args_json = serde_json::to_string(&args).unwrap_or_else(|err| {
panic!("worker args serialize: {err}");
});
return sse_response(sse(vec![
ev_response_created("resp-worker"),
ev_function_call(&call_id, "report_agent_job_result", &args_json),
ev_completed("resp-worker"),
]));
}
if !self.seen_main.swap(true, Ordering::SeqCst) {
return sse_response(sse(vec![
ev_response_created("resp-main"),
ev_function_call("call-spawn", "spawn_agents_on_csv", &self.spawn_args_json),
ev_completed("resp-main"),
]));
}
sse_response(sse(vec![
ev_response_created("resp-default"),
ev_completed("resp-default"),
]))
}
}
fn decode_body_bytes(request: &wiremock::Request) -> Vec<u8> {
request.body.clone()
}
fn has_function_call_output(body: &Value) -> bool {
body.get("input")
.and_then(Value::as_array)
.is_some_and(|items| {
items.iter().any(|item| {
item.get("type").and_then(Value::as_str) == Some("function_call_output")
})
})
}
fn extract_job_and_item(body: &Value) -> Option<(String, String)> {
let texts = message_input_texts(body);
let mut combined = texts.join("\n");
if let Some(instructions) = body.get("instructions").and_then(Value::as_str) {
combined.push('\n');
combined.push_str(instructions);
}
if !combined.contains("You are processing one item for a generic agent job.") {
return None;
}
let mut job_id = None;
let mut item_id = None;
for line in combined.lines() {
if let Some(value) = line.strip_prefix("Job ID: ") {
job_id = Some(value.trim().to_string());
}
if let Some(value) = line.strip_prefix("Item ID: ") {
item_id = Some(value.trim().to_string());
}
}
Some((job_id?, item_id?))
}
fn message_input_texts(body: &Value) -> Vec<String> {
let Some(items) = body.get("input").and_then(Value::as_array) else {
return Vec::new();
};
items
.iter()
.filter(|item| item.get("type").and_then(Value::as_str) == Some("message"))
.filter_map(|item| item.get("content").and_then(Value::as_array))
.flatten()
.filter(|span| span.get("type").and_then(Value::as_str) == Some("input_text"))
.filter_map(|span| span.get("text").and_then(Value::as_str))
.map(str::to_string)
.collect()
}
fn sse(events: Vec<serde_json::Value>) -> String {
let mut body = String::new();
for event in events {
body.push_str("data: ");
body.push_str(&event.to_string());
body.push_str("\n\n");
}
body.push_str("data: [DONE]\n\n");
body
}
fn sse_response(body: String) -> ResponseTemplate {
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_string(body)
}
fn ev_response_created(response_id: &str) -> serde_json::Value {
json!({
"type": "response.created",
"response": {
"id": response_id,
"model": "gpt-5",
"output": []
}
})
}
fn ev_function_call(call_id: &str, name: &str, arguments: &str) -> serde_json::Value {
json!({
"type": "response.output_item.done",
"output_index": 0,
"item": {
"type": "function_call",
"id": format!("item-{call_id}"),
"call_id": call_id,
"name": name,
"arguments": arguments,
"status": "completed"
}
})
}
fn ev_completed(response_id: &str) -> serde_json::Value {
json!({
"type": "response.completed",
"response": {
"id": response_id,
"usage": {
"input_tokens": 1,
"input_tokens_details": {"cached_tokens": 0},
"output_tokens": 1,
"output_tokens_details": {"reasoning_tokens": 0},
"total_tokens": 2
}
}
})
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn exec_spawn_agents_on_csv_exits_after_mock_job_completion() -> Result<()> {
let test = test_codex_exec();
let server = wiremock::MockServer::start().await;
let input_path = test.cwd_path().join("agent_jobs_input.csv");
let output_path = test.cwd_path().join("agent_jobs_output.csv");
let mut csv = String::from("name\n");
for index in 1..=100 {
csv.push_str(&format!("cat_{index}\n"));
}
fs::write(&input_path, csv)?;
let args = json!({
"csv_path": input_path.display().to_string(),
"instruction": "Write a playful 2-line poem about the cat named {name}. Return a JSON object with keys name and poem. Call report_agent_job_result exactly once and then stop.",
"output_csv_path": output_path.display().to_string(),
"max_concurrency": 64,
});
let args_json = serde_json::to_string(&args)?;
Mock::given(method("POST"))
.and(path_regex(".*/responses$"))
.respond_with(AgentJobsResponder::new(args_json))
.mount(&server)
.await;
let mut cmd = test.cmd_with_server(&server);
cmd.timeout(Duration::from_secs(60));
cmd.arg("-c")
.arg("features.enable_fanout=true")
.arg("-c")
.arg("agents.max_threads=64")
.arg("--skip-git-repo-check")
.arg("Use spawn_agents_on_csv on the provided CSV and do not do work yourself.")
.assert()
.success();
let output = fs::read_to_string(&output_path)?;
assert_eq!(output.lines().count(), 101);
Ok(())
}

View File

@@ -1,5 +1,6 @@
// Aggregates all former standalone integration tests as modules.
mod add_dir;
mod agent_jobs;
mod apply_patch;
mod auth_env;
mod ephemeral;