Add exec-server exec RPC implementation (#15090)

Stacked PR 2/3, based on the stub PR.

Adds the exec RPC implementation and process/event flow in exec-server
only.

---------

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
starr-openai
2026-03-19 12:00:36 -07:00
committed by GitHub
parent b87ba0a3cc
commit 1d210f639e
16 changed files with 1891 additions and 141 deletions

View File

@@ -0,0 +1,170 @@
use std::io;
use std::sync::Arc;
use base64::Engine as _;
use base64::engine::general_purpose::STANDARD;
use codex_app_server_protocol::FsCopyParams;
use codex_app_server_protocol::FsCopyResponse;
use codex_app_server_protocol::FsCreateDirectoryParams;
use codex_app_server_protocol::FsCreateDirectoryResponse;
use codex_app_server_protocol::FsGetMetadataParams;
use codex_app_server_protocol::FsGetMetadataResponse;
use codex_app_server_protocol::FsReadDirectoryEntry;
use codex_app_server_protocol::FsReadDirectoryParams;
use codex_app_server_protocol::FsReadDirectoryResponse;
use codex_app_server_protocol::FsReadFileParams;
use codex_app_server_protocol::FsReadFileResponse;
use codex_app_server_protocol::FsRemoveParams;
use codex_app_server_protocol::FsRemoveResponse;
use codex_app_server_protocol::FsWriteFileParams;
use codex_app_server_protocol::FsWriteFileResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use crate::CopyOptions;
use crate::CreateDirectoryOptions;
use crate::Environment;
use crate::ExecutorFileSystem;
use crate::RemoveOptions;
use crate::rpc::internal_error;
use crate::rpc::invalid_request;
#[derive(Clone)]
pub(crate) struct ExecServerFileSystem {
file_system: Arc<dyn ExecutorFileSystem>,
}
impl Default for ExecServerFileSystem {
fn default() -> Self {
Self {
file_system: Arc::new(Environment.get_filesystem()),
}
}
}
impl ExecServerFileSystem {
pub(crate) async fn read_file(
&self,
params: FsReadFileParams,
) -> Result<FsReadFileResponse, JSONRPCErrorError> {
let bytes = self
.file_system
.read_file(&params.path)
.await
.map_err(map_fs_error)?;
Ok(FsReadFileResponse {
data_base64: STANDARD.encode(bytes),
})
}
pub(crate) async fn write_file(
&self,
params: FsWriteFileParams,
) -> Result<FsWriteFileResponse, JSONRPCErrorError> {
let bytes = STANDARD.decode(params.data_base64).map_err(|err| {
invalid_request(format!(
"fs/writeFile requires valid base64 dataBase64: {err}"
))
})?;
self.file_system
.write_file(&params.path, bytes)
.await
.map_err(map_fs_error)?;
Ok(FsWriteFileResponse {})
}
pub(crate) async fn create_directory(
&self,
params: FsCreateDirectoryParams,
) -> Result<FsCreateDirectoryResponse, JSONRPCErrorError> {
self.file_system
.create_directory(
&params.path,
CreateDirectoryOptions {
recursive: params.recursive.unwrap_or(true),
},
)
.await
.map_err(map_fs_error)?;
Ok(FsCreateDirectoryResponse {})
}
pub(crate) async fn get_metadata(
&self,
params: FsGetMetadataParams,
) -> Result<FsGetMetadataResponse, JSONRPCErrorError> {
let metadata = self
.file_system
.get_metadata(&params.path)
.await
.map_err(map_fs_error)?;
Ok(FsGetMetadataResponse {
is_directory: metadata.is_directory,
is_file: metadata.is_file,
created_at_ms: metadata.created_at_ms,
modified_at_ms: metadata.modified_at_ms,
})
}
pub(crate) async fn read_directory(
&self,
params: FsReadDirectoryParams,
) -> Result<FsReadDirectoryResponse, JSONRPCErrorError> {
let entries = self
.file_system
.read_directory(&params.path)
.await
.map_err(map_fs_error)?;
Ok(FsReadDirectoryResponse {
entries: entries
.into_iter()
.map(|entry| FsReadDirectoryEntry {
file_name: entry.file_name,
is_directory: entry.is_directory,
is_file: entry.is_file,
})
.collect(),
})
}
pub(crate) async fn remove(
&self,
params: FsRemoveParams,
) -> Result<FsRemoveResponse, JSONRPCErrorError> {
self.file_system
.remove(
&params.path,
RemoveOptions {
recursive: params.recursive.unwrap_or(true),
force: params.force.unwrap_or(true),
},
)
.await
.map_err(map_fs_error)?;
Ok(FsRemoveResponse {})
}
pub(crate) async fn copy(
&self,
params: FsCopyParams,
) -> Result<FsCopyResponse, JSONRPCErrorError> {
self.file_system
.copy(
&params.source_path,
&params.destination_path,
CopyOptions {
recursive: params.recursive,
},
)
.await
.map_err(map_fs_error)?;
Ok(FsCopyResponse {})
}
}
fn map_fs_error(err: io::Error) -> JSONRPCErrorError {
if err.kind() == io::ErrorKind::InvalidInput {
invalid_request(err.to_string())
} else {
internal_error(err.to_string())
}
}

View File

@@ -1,25 +1,112 @@
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::time::Duration;
use codex_app_server_protocol::FsCopyParams;
use codex_app_server_protocol::FsCopyResponse;
use codex_app_server_protocol::FsCreateDirectoryParams;
use codex_app_server_protocol::FsCreateDirectoryResponse;
use codex_app_server_protocol::FsGetMetadataParams;
use codex_app_server_protocol::FsGetMetadataResponse;
use codex_app_server_protocol::FsReadDirectoryParams;
use codex_app_server_protocol::FsReadDirectoryResponse;
use codex_app_server_protocol::FsReadFileParams;
use codex_app_server_protocol::FsReadFileResponse;
use codex_app_server_protocol::FsRemoveParams;
use codex_app_server_protocol::FsRemoveResponse;
use codex_app_server_protocol::FsWriteFileParams;
use codex_app_server_protocol::FsWriteFileResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_utils_pty::ExecCommandSession;
use codex_utils_pty::TerminalSize;
use tokio::sync::Mutex;
use tokio::sync::Notify;
use tracing::warn;
use crate::protocol::ExecExitedNotification;
use crate::protocol::ExecOutputDeltaNotification;
use crate::protocol::ExecOutputStream;
use crate::protocol::ExecParams;
use crate::protocol::ExecResponse;
use crate::protocol::InitializeResponse;
use crate::server::jsonrpc::invalid_request;
use crate::protocol::ProcessOutputChunk;
use crate::protocol::ReadParams;
use crate::protocol::ReadResponse;
use crate::protocol::TerminateParams;
use crate::protocol::TerminateResponse;
use crate::protocol::WriteParams;
use crate::protocol::WriteResponse;
use crate::rpc::RpcNotificationSender;
use crate::rpc::internal_error;
use crate::rpc::invalid_params;
use crate::rpc::invalid_request;
use crate::server::filesystem::ExecServerFileSystem;
const RETAINED_OUTPUT_BYTES_PER_PROCESS: usize = 1024 * 1024;
#[cfg(test)]
const EXITED_PROCESS_RETENTION: Duration = Duration::from_millis(25);
#[cfg(not(test))]
const EXITED_PROCESS_RETENTION: Duration = Duration::from_secs(30);
#[derive(Clone)]
struct RetainedOutputChunk {
seq: u64,
stream: ExecOutputStream,
chunk: Vec<u8>,
}
struct RunningProcess {
session: ExecCommandSession,
tty: bool,
output: VecDeque<RetainedOutputChunk>,
retained_bytes: usize,
next_seq: u64,
exit_code: Option<i32>,
output_notify: Arc<Notify>,
}
enum ProcessEntry {
Starting,
Running(Box<RunningProcess>),
}
pub(crate) struct ExecServerHandler {
notifications: RpcNotificationSender,
file_system: ExecServerFileSystem,
processes: Arc<Mutex<HashMap<String, ProcessEntry>>>,
initialize_requested: AtomicBool,
initialized: AtomicBool,
}
impl ExecServerHandler {
pub(crate) fn new() -> Self {
pub(crate) fn new(notifications: RpcNotificationSender) -> Self {
Self {
notifications,
file_system: ExecServerFileSystem::default(),
processes: Arc::new(Mutex::new(HashMap::new())),
initialize_requested: AtomicBool::new(false),
initialized: AtomicBool::new(false),
}
}
pub(crate) async fn shutdown(&self) {}
pub(crate) async fn shutdown(&self) {
let remaining = {
let mut processes = self.processes.lock().await;
processes
.drain()
.filter_map(|(_, process)| match process {
ProcessEntry::Starting => None,
ProcessEntry::Running(process) => Some(process),
})
.collect::<Vec<_>>()
};
for process in remaining {
process.session.terminate();
}
}
pub(crate) fn initialize(&self) -> Result<InitializeResponse, JSONRPCErrorError> {
if self.initialize_requested.swap(true, Ordering::SeqCst) {
@@ -37,4 +124,394 @@ impl ExecServerHandler {
self.initialized.store(true, Ordering::SeqCst);
Ok(())
}
fn require_initialized_for(&self, method_family: &str) -> Result<(), JSONRPCErrorError> {
if !self.initialize_requested.load(Ordering::SeqCst) {
return Err(invalid_request(format!(
"client must call initialize before using {method_family} methods"
)));
}
if !self.initialized.load(Ordering::SeqCst) {
return Err(invalid_request(format!(
"client must send initialized before using {method_family} methods"
)));
}
Ok(())
}
pub(crate) async fn exec(&self, params: ExecParams) -> Result<ExecResponse, JSONRPCErrorError> {
self.require_initialized_for("exec")?;
let process_id = params.process_id.clone();
let (program, args) = params
.argv
.split_first()
.ok_or_else(|| invalid_params("argv must not be empty".to_string()))?;
{
let mut process_map = self.processes.lock().await;
if process_map.contains_key(&process_id) {
return Err(invalid_request(format!(
"process {process_id} already exists"
)));
}
process_map.insert(process_id.clone(), ProcessEntry::Starting);
}
let spawned_result = if params.tty {
codex_utils_pty::spawn_pty_process(
program,
args,
params.cwd.as_path(),
&params.env,
&params.arg0,
TerminalSize::default(),
)
.await
} else {
codex_utils_pty::spawn_pipe_process_no_stdin(
program,
args,
params.cwd.as_path(),
&params.env,
&params.arg0,
)
.await
};
let spawned = match spawned_result {
Ok(spawned) => spawned,
Err(err) => {
let mut process_map = self.processes.lock().await;
if matches!(process_map.get(&process_id), Some(ProcessEntry::Starting)) {
process_map.remove(&process_id);
}
return Err(internal_error(err.to_string()));
}
};
let output_notify = Arc::new(Notify::new());
{
let mut process_map = self.processes.lock().await;
process_map.insert(
process_id.clone(),
ProcessEntry::Running(Box::new(RunningProcess {
session: spawned.session,
tty: params.tty,
output: VecDeque::new(),
retained_bytes: 0,
next_seq: 1,
exit_code: None,
output_notify: Arc::clone(&output_notify),
})),
);
}
tokio::spawn(stream_output(
process_id.clone(),
if params.tty {
ExecOutputStream::Pty
} else {
ExecOutputStream::Stdout
},
spawned.stdout_rx,
self.notifications.clone(),
Arc::clone(&self.processes),
Arc::clone(&output_notify),
));
tokio::spawn(stream_output(
process_id.clone(),
if params.tty {
ExecOutputStream::Pty
} else {
ExecOutputStream::Stderr
},
spawned.stderr_rx,
self.notifications.clone(),
Arc::clone(&self.processes),
Arc::clone(&output_notify),
));
tokio::spawn(watch_exit(
process_id.clone(),
spawned.exit_rx,
self.notifications.clone(),
Arc::clone(&self.processes),
output_notify,
));
Ok(ExecResponse { process_id })
}
pub(crate) async fn exec_read(
&self,
params: ReadParams,
) -> Result<ReadResponse, JSONRPCErrorError> {
self.require_initialized_for("exec")?;
let after_seq = params.after_seq.unwrap_or(0);
let max_bytes = params.max_bytes.unwrap_or(usize::MAX);
let wait = Duration::from_millis(params.wait_ms.unwrap_or(0));
let deadline = tokio::time::Instant::now() + wait;
loop {
let (response, output_notify) = {
let process_map = self.processes.lock().await;
let process = process_map.get(&params.process_id).ok_or_else(|| {
invalid_request(format!("unknown process id {}", params.process_id))
})?;
let ProcessEntry::Running(process) = process else {
return Err(invalid_request(format!(
"process id {} is starting",
params.process_id
)));
};
let mut chunks = Vec::new();
let mut total_bytes = 0;
let mut next_seq = process.next_seq;
for retained in process.output.iter().filter(|chunk| chunk.seq > after_seq) {
let chunk_len = retained.chunk.len();
if !chunks.is_empty() && total_bytes + chunk_len > max_bytes {
break;
}
total_bytes += chunk_len;
chunks.push(ProcessOutputChunk {
seq: retained.seq,
stream: retained.stream,
chunk: retained.chunk.clone().into(),
});
next_seq = retained.seq + 1;
if total_bytes >= max_bytes {
break;
}
}
(
ReadResponse {
chunks,
next_seq,
exited: process.exit_code.is_some(),
exit_code: process.exit_code,
},
Arc::clone(&process.output_notify),
)
};
if !response.chunks.is_empty()
|| response.exited
|| tokio::time::Instant::now() >= deadline
{
return Ok(response);
}
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
if remaining.is_zero() {
return Ok(response);
}
let _ = tokio::time::timeout(remaining, output_notify.notified()).await;
}
}
pub(crate) async fn exec_write(
&self,
params: WriteParams,
) -> Result<WriteResponse, JSONRPCErrorError> {
self.require_initialized_for("exec")?;
let writer_tx = {
let process_map = self.processes.lock().await;
let process = process_map.get(&params.process_id).ok_or_else(|| {
invalid_request(format!("unknown process id {}", params.process_id))
})?;
let ProcessEntry::Running(process) = process else {
return Err(invalid_request(format!(
"process id {} is starting",
params.process_id
)));
};
if !process.tty {
return Err(invalid_request(format!(
"stdin is closed for process {}",
params.process_id
)));
}
process.session.writer_sender()
};
writer_tx
.send(params.chunk.into_inner())
.await
.map_err(|_| internal_error("failed to write to process stdin".to_string()))?;
Ok(WriteResponse { accepted: true })
}
pub(crate) async fn terminate(
&self,
params: TerminateParams,
) -> Result<TerminateResponse, JSONRPCErrorError> {
self.require_initialized_for("exec")?;
let running = {
let process_map = self.processes.lock().await;
match process_map.get(&params.process_id) {
Some(ProcessEntry::Running(process)) => {
if process.exit_code.is_some() {
return Ok(TerminateResponse { running: false });
}
process.session.terminate();
true
}
Some(ProcessEntry::Starting) | None => false,
}
};
Ok(TerminateResponse { running })
}
pub(crate) async fn fs_read_file(
&self,
params: FsReadFileParams,
) -> Result<FsReadFileResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.read_file(params).await
}
pub(crate) async fn fs_write_file(
&self,
params: FsWriteFileParams,
) -> Result<FsWriteFileResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.write_file(params).await
}
pub(crate) async fn fs_create_directory(
&self,
params: FsCreateDirectoryParams,
) -> Result<FsCreateDirectoryResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.create_directory(params).await
}
pub(crate) async fn fs_get_metadata(
&self,
params: FsGetMetadataParams,
) -> Result<FsGetMetadataResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.get_metadata(params).await
}
pub(crate) async fn fs_read_directory(
&self,
params: FsReadDirectoryParams,
) -> Result<FsReadDirectoryResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.read_directory(params).await
}
pub(crate) async fn fs_remove(
&self,
params: FsRemoveParams,
) -> Result<FsRemoveResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.remove(params).await
}
pub(crate) async fn fs_copy(
&self,
params: FsCopyParams,
) -> Result<FsCopyResponse, JSONRPCErrorError> {
self.require_initialized_for("filesystem")?;
self.file_system.copy(params).await
}
}
async fn stream_output(
process_id: String,
stream: ExecOutputStream,
mut receiver: tokio::sync::mpsc::Receiver<Vec<u8>>,
notifications: RpcNotificationSender,
processes: Arc<Mutex<HashMap<String, ProcessEntry>>>,
output_notify: Arc<Notify>,
) {
while let Some(chunk) = receiver.recv().await {
let notification = {
let mut processes = processes.lock().await;
let Some(entry) = processes.get_mut(&process_id) else {
break;
};
let ProcessEntry::Running(process) = entry else {
break;
};
let seq = process.next_seq;
process.next_seq += 1;
process.retained_bytes += chunk.len();
process.output.push_back(RetainedOutputChunk {
seq,
stream,
chunk: chunk.clone(),
});
while process.retained_bytes > RETAINED_OUTPUT_BYTES_PER_PROCESS {
let Some(evicted) = process.output.pop_front() else {
break;
};
process.retained_bytes = process.retained_bytes.saturating_sub(evicted.chunk.len());
warn!(
"retained output cap exceeded for process {process_id}; dropping oldest output"
);
}
ExecOutputDeltaNotification {
process_id: process_id.clone(),
stream,
chunk: chunk.into(),
}
};
output_notify.notify_waiters();
if notifications
.notify(crate::protocol::EXEC_OUTPUT_DELTA_METHOD, &notification)
.await
.is_err()
{
break;
}
}
}
async fn watch_exit(
process_id: String,
exit_rx: tokio::sync::oneshot::Receiver<i32>,
notifications: RpcNotificationSender,
processes: Arc<Mutex<HashMap<String, ProcessEntry>>>,
output_notify: Arc<Notify>,
) {
let exit_code = exit_rx.await.unwrap_or(-1);
{
let mut processes = processes.lock().await;
if let Some(ProcessEntry::Running(process)) = processes.get_mut(&process_id) {
process.exit_code = Some(exit_code);
}
}
output_notify.notify_waiters();
if notifications
.notify(
crate::protocol::EXEC_EXITED_METHOD,
&ExecExitedNotification {
process_id: process_id.clone(),
exit_code,
},
)
.await
.is_err()
{
return;
}
tokio::time::sleep(EXITED_PROCESS_RETENTION).await;
let mut processes = processes.lock().await;
if matches!(
processes.get(&process_id),
Some(ProcessEntry::Running(process)) if process.exit_code == Some(exit_code)
) {
processes.remove(&process_id);
}
}
#[cfg(test)]
mod tests;

View File

@@ -0,0 +1,102 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use pretty_assertions::assert_eq;
use tokio::sync::mpsc;
use super::ExecServerHandler;
use crate::protocol::ExecParams;
use crate::protocol::InitializeResponse;
use crate::protocol::TerminateParams;
use crate::protocol::TerminateResponse;
use crate::rpc::RpcNotificationSender;
fn exec_params(process_id: &str) -> ExecParams {
let mut env = HashMap::new();
if let Some(path) = std::env::var_os("PATH") {
env.insert("PATH".to_string(), path.to_string_lossy().into_owned());
}
ExecParams {
process_id: process_id.to_string(),
argv: vec![
"bash".to_string(),
"-lc".to_string(),
"sleep 0.1".to_string(),
],
cwd: std::env::current_dir().expect("cwd"),
env,
tty: false,
arg0: None,
}
}
async fn initialized_handler() -> Arc<ExecServerHandler> {
let (outgoing_tx, _outgoing_rx) = mpsc::channel(16);
let handler = Arc::new(ExecServerHandler::new(RpcNotificationSender::new(
outgoing_tx,
)));
assert_eq!(
handler.initialize().expect("initialize"),
InitializeResponse {}
);
handler.initialized().expect("initialized");
handler
}
#[tokio::test]
async fn duplicate_process_ids_allow_only_one_successful_start() {
let handler = initialized_handler().await;
let first_handler = Arc::clone(&handler);
let second_handler = Arc::clone(&handler);
let (first, second) = tokio::join!(
first_handler.exec(exec_params("proc-1")),
second_handler.exec(exec_params("proc-1")),
);
let (successes, failures): (Vec<_>, Vec<_>) =
[first, second].into_iter().partition(Result::is_ok);
assert_eq!(successes.len(), 1);
assert_eq!(failures.len(), 1);
let error = failures
.into_iter()
.next()
.expect("one failed request")
.expect_err("expected duplicate process error");
assert_eq!(error.code, -32600);
assert_eq!(error.message, "process proc-1 already exists");
tokio::time::sleep(Duration::from_millis(150)).await;
handler.shutdown().await;
}
#[tokio::test]
async fn terminate_reports_false_after_process_exit() {
let handler = initialized_handler().await;
handler
.exec(exec_params("proc-1"))
.await
.expect("start process");
let deadline = tokio::time::Instant::now() + Duration::from_secs(1);
loop {
let response = handler
.terminate(TerminateParams {
process_id: "proc-1".to_string(),
})
.await
.expect("terminate response");
if response == (TerminateResponse { running: false }) {
break;
}
assert!(
tokio::time::Instant::now() < deadline,
"process should have exited within 1s"
);
tokio::time::sleep(Duration::from_millis(25)).await;
}
handler.shutdown().await;
}

View File

@@ -1,53 +1,109 @@
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use tracing::debug;
use std::sync::Arc;
use crate::connection::JsonRpcConnection;
use crate::connection::JsonRpcConnectionEvent;
use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeParams;
use crate::server::ExecServerHandler;
use crate::server::jsonrpc::invalid_params;
use crate::server::jsonrpc::invalid_request_message;
use crate::server::jsonrpc::method_not_found;
use crate::server::jsonrpc::response_message;
use tokio::sync::mpsc;
use tracing::debug;
use tracing::warn;
pub(crate) async fn run_connection(connection: JsonRpcConnection) {
let (json_outgoing_tx, mut incoming_rx, _connection_tasks) = connection.into_parts();
let handler = ExecServerHandler::new();
use crate::connection::CHANNEL_CAPACITY;
use crate::connection::JsonRpcConnection;
use crate::connection::JsonRpcConnectionEvent;
use crate::rpc::RpcNotificationSender;
use crate::rpc::RpcServerOutboundMessage;
use crate::rpc::encode_server_message;
use crate::rpc::invalid_request;
use crate::rpc::method_not_found;
use crate::server::ExecServerHandler;
use crate::server::registry::build_router;
while let Some(event) = incoming_rx.recv().await {
match event {
JsonRpcConnectionEvent::Message(message) => {
let response = match handle_connection_message(&handler, message).await {
Ok(response) => response,
Err(err) => {
tracing::warn!(
"closing exec-server connection after protocol error: {err}"
);
break;
}
};
let Some(response) = response else {
continue;
};
if json_outgoing_tx.send(response).await.is_err() {
pub(crate) async fn run_connection(connection: JsonRpcConnection) {
let router = Arc::new(build_router());
let (json_outgoing_tx, mut incoming_rx, connection_tasks) = connection.into_parts();
let (outgoing_tx, mut outgoing_rx) =
mpsc::channel::<RpcServerOutboundMessage>(CHANNEL_CAPACITY);
let notifications = RpcNotificationSender::new(outgoing_tx.clone());
let handler = Arc::new(ExecServerHandler::new(notifications));
let outbound_task = tokio::spawn(async move {
while let Some(message) = outgoing_rx.recv().await {
let json_message = match encode_server_message(message) {
Ok(json_message) => json_message,
Err(err) => {
warn!("failed to serialize exec-server outbound message: {err}");
break;
}
};
if json_outgoing_tx.send(json_message).await.is_err() {
break;
}
}
});
// Process inbound events sequentially to preserve initialize/initialized ordering.
while let Some(event) = incoming_rx.recv().await {
match event {
JsonRpcConnectionEvent::MalformedMessage { reason } => {
warn!("ignoring malformed exec-server message: {reason}");
if json_outgoing_tx
.send(invalid_request_message(reason))
if outgoing_tx
.send(RpcServerOutboundMessage::Error {
request_id: codex_app_server_protocol::RequestId::Integer(-1),
error: invalid_request(reason),
})
.await
.is_err()
{
break;
}
}
JsonRpcConnectionEvent::Message(message) => match message {
codex_app_server_protocol::JSONRPCMessage::Request(request) => {
if let Some(route) = router.request_route(request.method.as_str()) {
let message = route(handler.clone(), request).await;
if outgoing_tx.send(message).await.is_err() {
break;
}
} else if outgoing_tx
.send(RpcServerOutboundMessage::Error {
request_id: request.id,
error: method_not_found(format!(
"exec-server stub does not implement `{}` yet",
request.method
)),
})
.await
.is_err()
{
break;
}
}
codex_app_server_protocol::JSONRPCMessage::Notification(notification) => {
let Some(route) = router.notification_route(notification.method.as_str())
else {
warn!(
"closing exec-server connection after unexpected notification: {}",
notification.method
);
break;
};
if let Err(err) = route(handler.clone(), notification).await {
warn!("closing exec-server connection after protocol error: {err}");
break;
}
}
codex_app_server_protocol::JSONRPCMessage::Response(response) => {
warn!(
"closing exec-server connection after unexpected client response: {:?}",
response.id
);
break;
}
codex_app_server_protocol::JSONRPCMessage::Error(error) => {
warn!(
"closing exec-server connection after unexpected client error: {:?}",
error.id
);
break;
}
},
JsonRpcConnectionEvent::Disconnected { reason } => {
if let Some(reason) = reason {
debug!("exec-server connection disconnected: {reason}");
@@ -58,64 +114,10 @@ pub(crate) async fn run_connection(connection: JsonRpcConnection) {
}
handler.shutdown().await;
}
pub(crate) async fn handle_connection_message(
handler: &ExecServerHandler,
message: JSONRPCMessage,
) -> Result<Option<JSONRPCMessage>, String> {
match message {
JSONRPCMessage::Request(request) => Ok(Some(dispatch_request(handler, request))),
JSONRPCMessage::Notification(notification) => {
handle_notification(handler, notification)?;
Ok(None)
}
JSONRPCMessage::Response(response) => Err(format!(
"unexpected client response for request id {:?}",
response.id
)),
JSONRPCMessage::Error(error) => Err(format!(
"unexpected client error for request id {:?}",
error.id
)),
}
}
fn dispatch_request(handler: &ExecServerHandler, request: JSONRPCRequest) -> JSONRPCMessage {
let JSONRPCRequest {
id,
method,
params,
trace: _,
} = request;
match method.as_str() {
INITIALIZE_METHOD => {
let result = serde_json::from_value::<InitializeParams>(
params.unwrap_or(serde_json::Value::Null),
)
.map_err(|err| invalid_params(err.to_string()))
.and_then(|_params| handler.initialize())
.and_then(|response| {
serde_json::to_value(response).map_err(|err| invalid_params(err.to_string()))
});
response_message(id, result)
}
other => response_message(
id,
Err(method_not_found(format!(
"exec-server stub does not implement `{other}` yet"
))),
),
}
}
fn handle_notification(
handler: &ExecServerHandler,
notification: JSONRPCNotification,
) -> Result<(), String> {
match notification.method.as_str() {
INITIALIZED_METHOD => handler.initialized(),
other => Err(format!("unexpected notification method: {other}")),
drop(outgoing_tx);
for task in connection_tasks {
task.abort();
let _ = task.await;
}
let _ = outbound_task.await;
}

View File

@@ -0,0 +1,110 @@
use std::sync::Arc;
use crate::protocol::EXEC_METHOD;
use crate::protocol::EXEC_READ_METHOD;
use crate::protocol::EXEC_TERMINATE_METHOD;
use crate::protocol::EXEC_WRITE_METHOD;
use crate::protocol::ExecParams;
use crate::protocol::FS_COPY_METHOD;
use crate::protocol::FS_CREATE_DIRECTORY_METHOD;
use crate::protocol::FS_GET_METADATA_METHOD;
use crate::protocol::FS_READ_DIRECTORY_METHOD;
use crate::protocol::FS_READ_FILE_METHOD;
use crate::protocol::FS_REMOVE_METHOD;
use crate::protocol::FS_WRITE_FILE_METHOD;
use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeParams;
use crate::protocol::ReadParams;
use crate::protocol::TerminateParams;
use crate::protocol::WriteParams;
use crate::rpc::RpcRouter;
use crate::server::ExecServerHandler;
use codex_app_server_protocol::FsCopyParams;
use codex_app_server_protocol::FsCreateDirectoryParams;
use codex_app_server_protocol::FsGetMetadataParams;
use codex_app_server_protocol::FsReadDirectoryParams;
use codex_app_server_protocol::FsReadFileParams;
use codex_app_server_protocol::FsRemoveParams;
use codex_app_server_protocol::FsWriteFileParams;
pub(crate) fn build_router() -> RpcRouter<ExecServerHandler> {
let mut router = RpcRouter::new();
router.request(
INITIALIZE_METHOD,
|handler: Arc<ExecServerHandler>, _params: InitializeParams| async move {
handler.initialize()
},
);
router.notification(
INITIALIZED_METHOD,
|handler: Arc<ExecServerHandler>, _params: serde_json::Value| async move {
handler.initialized()
},
);
router.request(
EXEC_METHOD,
|handler: Arc<ExecServerHandler>, params: ExecParams| async move { handler.exec(params).await },
);
router.request(
EXEC_READ_METHOD,
|handler: Arc<ExecServerHandler>, params: ReadParams| async move {
handler.exec_read(params).await
},
);
router.request(
EXEC_WRITE_METHOD,
|handler: Arc<ExecServerHandler>, params: WriteParams| async move {
handler.exec_write(params).await
},
);
router.request(
EXEC_TERMINATE_METHOD,
|handler: Arc<ExecServerHandler>, params: TerminateParams| async move {
handler.terminate(params).await
},
);
router.request(
FS_READ_FILE_METHOD,
|handler: Arc<ExecServerHandler>, params: FsReadFileParams| async move {
handler.fs_read_file(params).await
},
);
router.request(
FS_WRITE_FILE_METHOD,
|handler: Arc<ExecServerHandler>, params: FsWriteFileParams| async move {
handler.fs_write_file(params).await
},
);
router.request(
FS_CREATE_DIRECTORY_METHOD,
|handler: Arc<ExecServerHandler>, params: FsCreateDirectoryParams| async move {
handler.fs_create_directory(params).await
},
);
router.request(
FS_GET_METADATA_METHOD,
|handler: Arc<ExecServerHandler>, params: FsGetMetadataParams| async move {
handler.fs_get_metadata(params).await
},
);
router.request(
FS_READ_DIRECTORY_METHOD,
|handler: Arc<ExecServerHandler>, params: FsReadDirectoryParams| async move {
handler.fs_read_directory(params).await
},
);
router.request(
FS_REMOVE_METHOD,
|handler: Arc<ExecServerHandler>, params: FsRemoveParams| async move {
handler.fs_remove(params).await
},
);
router.request(
FS_COPY_METHOD,
|handler: Arc<ExecServerHandler>, params: FsCopyParams| async move {
handler.fs_copy(params).await
},
);
router
}