[3/6] Add pushed exec process events (#18020)

## Summary
- Add a pushed `ExecProcessEvent` stream alongside retained
`process/read` output.
- Publish local and remote output, exit, close, and failure events.
- Cover the event stream with shared local/remote exec process tests.

## Testing
- `cargo check -p codex-exec-server`
- `cargo check -p codex-rmcp-client`
- Not run: `cargo test` per repo instruction; CI will cover.

## Stack
```text
o  #18027 [6/6] Fail exec client operations after disconnect
│
o  #18212 [5/6] Wire executor-backed MCP stdio
│
o  #18087 [4/6] Abstract MCP stdio server launching
│
@  #18020 [3/6] Add pushed exec process events
│
o  #18086 [2/6] Support piped stdin in exec process API
│
o  #18085 [1/6] Add MCP server environment config
│
o  main
```

---------

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
Ahmed Ibrahim
2026-04-17 12:07:43 -07:00
committed by GitHub
parent eaf78e43f2
commit 9d3a5cf05e
7 changed files with 703 additions and 14 deletions

View File

@@ -1,5 +1,7 @@
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use std::time::Duration;
use arc_swap::ArcSwap;
@@ -16,6 +18,9 @@ use crate::ProcessId;
use crate::client_api::ExecServerClientConnectOptions;
use crate::client_api::RemoteExecServerConnectArgs;
use crate::connection::JsonRpcConnection;
use crate::process::ExecProcessEvent;
use crate::process::ExecProcessEventLog;
use crate::process::ExecProcessEventReceiver;
use crate::protocol::EXEC_CLOSED_METHOD;
use crate::protocol::EXEC_EXITED_METHOD;
use crate::protocol::EXEC_METHOD;
@@ -53,6 +58,7 @@ use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeParams;
use crate::protocol::InitializeResponse;
use crate::protocol::ProcessOutputChunk;
use crate::protocol::ReadParams;
use crate::protocol::ReadResponse;
use crate::protocol::TerminateParams;
@@ -65,6 +71,8 @@ use crate::rpc::RpcClientEvent;
const CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
const INITIALIZE_TIMEOUT: Duration = Duration::from_secs(10);
const PROCESS_EVENT_CHANNEL_CAPACITY: usize = 256;
const PROCESS_EVENT_RETAINED_BYTES: usize = 1024 * 1024;
impl Default for ExecServerClientConnectOptions {
fn default() -> Self {
@@ -100,9 +108,20 @@ impl RemoteExecServerConnectArgs {
pub(crate) struct SessionState {
wake_tx: watch::Sender<u64>,
events: ExecProcessEventLog,
ordered_events: StdMutex<OrderedSessionEvents>,
failure: Mutex<Option<String>>,
}
#[derive(Default)]
struct OrderedSessionEvents {
last_published_seq: u64,
// Server-side output, exit, and closed notifications are emitted by
// different tasks and can reach the client out of order. Keep future events
// here until all lower sequence numbers have been published.
pending: BTreeMap<u64, ExecProcessEvent>,
}
#[derive(Clone)]
pub(crate) struct Session {
client: ExecServerClient,
@@ -452,6 +471,11 @@ impl SessionState {
let (wake_tx, _wake_rx) = watch::channel(0);
Self {
wake_tx,
events: ExecProcessEventLog::new(
PROCESS_EVENT_CHANNEL_CAPACITY,
PROCESS_EVENT_RETAINED_BYTES,
),
ordered_events: StdMutex::new(OrderedSessionEvents::default()),
failure: Mutex::new(None),
}
}
@@ -460,19 +484,71 @@ impl SessionState {
self.wake_tx.subscribe()
}
pub(crate) fn subscribe_events(&self) -> ExecProcessEventReceiver {
self.events.subscribe()
}
fn note_change(&self, seq: u64) {
let next = (*self.wake_tx.borrow()).max(seq);
let _ = self.wake_tx.send(next);
}
/// Publishes a process event only when all earlier sequenced events have
/// already been published.
///
/// Returns `true` only when this call actually publishes the ordered
/// `Closed` event. The caller uses that signal to remove the session route
/// after the terminal event is visible to subscribers, rather than when a
/// possibly-early closed notification first arrives.
fn publish_ordered_event(&self, event: ExecProcessEvent) -> bool {
let Some(seq) = event.seq() else {
self.events.publish(event);
return false;
};
let mut ready = Vec::new();
{
let mut ordered_events = self
.ordered_events
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
// We have already delivered this sequence number or moved past it,
// so accepting it again would duplicate output or lifecycle events.
if seq <= ordered_events.last_published_seq {
return false;
}
ordered_events.pending.entry(seq).or_insert(event);
loop {
let next_seq = ordered_events.last_published_seq + 1;
let Some(event) = ordered_events.pending.remove(&next_seq) else {
break;
};
ordered_events.last_published_seq += 1;
ready.push(event);
}
}
let mut published_closed = false;
for event in ready {
published_closed |= matches!(&event, ExecProcessEvent::Closed { .. });
self.events.publish(event);
}
published_closed
}
async fn set_failure(&self, message: String) {
let mut failure = self.failure.lock().await;
if failure.is_none() {
*failure = Some(message);
let should_publish = failure.is_none();
if should_publish {
*failure = Some(message.clone());
}
drop(failure);
let next = (*self.wake_tx.borrow()).saturating_add(1);
let _ = self.wake_tx.send(next);
if should_publish {
let _ = self.publish_ordered_event(ExecProcessEvent::Failed(message));
}
}
async fn failed_response(&self) -> Option<ReadResponse> {
@@ -505,6 +581,10 @@ impl Session {
self.state.subscribe()
}
pub(crate) fn subscribe_events(&self) -> ExecProcessEventReceiver {
self.state.subscribe_events()
}
pub(crate) async fn read(
&self,
after_seq: Option<u64>,
@@ -628,6 +708,15 @@ async fn handle_server_notification(
serde_json::from_value(notification.params.unwrap_or(Value::Null))?;
if let Some(session) = inner.get_session(&params.process_id) {
session.note_change(params.seq);
let published_closed =
session.publish_ordered_event(ExecProcessEvent::Output(ProcessOutputChunk {
seq: params.seq,
stream: params.stream,
chunk: params.chunk,
}));
if published_closed {
inner.remove_session(&params.process_id).await;
}
}
}
EXEC_EXITED_METHOD => {
@@ -635,16 +724,28 @@ async fn handle_server_notification(
serde_json::from_value(notification.params.unwrap_or(Value::Null))?;
if let Some(session) = inner.get_session(&params.process_id) {
session.note_change(params.seq);
let published_closed = session.publish_ordered_event(ExecProcessEvent::Exited {
seq: params.seq,
exit_code: params.exit_code,
});
if published_closed {
inner.remove_session(&params.process_id).await;
}
}
}
EXEC_CLOSED_METHOD => {
let params: ExecClosedNotification =
serde_json::from_value(notification.params.unwrap_or(Value::Null))?;
// Closed is the terminal lifecycle event for this process, so drop
// the routing entry before forwarding it.
let session = inner.remove_session(&params.process_id).await;
if let Some(session) = session {
if let Some(session) = inner.get_session(&params.process_id) {
session.note_change(params.seq);
// Closed is terminal, but it can arrive before tail output or
// exited. Keep routing this process until the ordered publisher
// says Closed has actually been delivered.
let published_closed =
session.publish_ordered_event(ExecProcessEvent::Closed { seq: params.seq });
if published_closed {
inner.remove_session(&params.process_id).await;
}
}
}
other => {
@@ -673,14 +774,18 @@ mod tests {
use super::ExecServerClientConnectOptions;
use crate::ProcessId;
use crate::connection::JsonRpcConnection;
use crate::process::ExecProcessEvent;
use crate::protocol::EXEC_CLOSED_METHOD;
use crate::protocol::EXEC_EXITED_METHOD;
use crate::protocol::EXEC_OUTPUT_DELTA_METHOD;
use crate::protocol::ExecClosedNotification;
use crate::protocol::ExecExitedNotification;
use crate::protocol::ExecOutputDeltaNotification;
use crate::protocol::ExecOutputStream;
use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeResponse;
use crate::protocol::ProcessOutputChunk;
async fn read_jsonrpc_line<R>(lines: &mut tokio::io::Lines<BufReader<R>>) -> JSONRPCMessage
where
@@ -705,6 +810,149 @@ mod tests {
.expect("json-rpc line should write");
}
#[tokio::test]
async fn process_events_are_delivered_in_seq_order_when_notifications_are_reordered() {
let (client_stdin, server_reader) = duplex(1 << 20);
let (mut server_writer, client_stdout) = duplex(1 << 20);
let (notifications_tx, mut notifications_rx) = mpsc::channel(16);
let server = tokio::spawn(async move {
let mut lines = BufReader::new(server_reader).lines();
let initialize = read_jsonrpc_line(&mut lines).await;
let request = match initialize {
JSONRPCMessage::Request(request) if request.method == INITIALIZE_METHOD => request,
other => panic!("expected initialize request, got {other:?}"),
};
write_jsonrpc_line(
&mut server_writer,
JSONRPCMessage::Response(JSONRPCResponse {
id: request.id,
result: serde_json::to_value(InitializeResponse {
session_id: "session-1".to_string(),
})
.expect("initialize response should serialize"),
}),
)
.await;
let initialized = read_jsonrpc_line(&mut lines).await;
match initialized {
JSONRPCMessage::Notification(notification)
if notification.method == INITIALIZED_METHOD => {}
other => panic!("expected initialized notification, got {other:?}"),
}
while let Some(message) = notifications_rx.recv().await {
write_jsonrpc_line(&mut server_writer, message).await;
}
});
let client = ExecServerClient::connect(
JsonRpcConnection::from_stdio(
client_stdout,
client_stdin,
"test-exec-server-client".to_string(),
),
ExecServerClientConnectOptions::default(),
)
.await
.expect("client should connect");
let process_id = ProcessId::from("reordered");
let session = client
.register_session(&process_id)
.await
.expect("session should register");
let mut events = session.subscribe_events();
for message in [
JSONRPCMessage::Notification(JSONRPCNotification {
method: EXEC_CLOSED_METHOD.to_string(),
params: Some(
serde_json::to_value(ExecClosedNotification {
process_id: process_id.clone(),
seq: 4,
})
.expect("closed notification should serialize"),
),
}),
JSONRPCMessage::Notification(JSONRPCNotification {
method: EXEC_OUTPUT_DELTA_METHOD.to_string(),
params: Some(
serde_json::to_value(ExecOutputDeltaNotification {
process_id: process_id.clone(),
seq: 1,
stream: ExecOutputStream::Stdout,
chunk: b"one".to_vec().into(),
})
.expect("output notification should serialize"),
),
}),
JSONRPCMessage::Notification(JSONRPCNotification {
method: EXEC_EXITED_METHOD.to_string(),
params: Some(
serde_json::to_value(ExecExitedNotification {
process_id: process_id.clone(),
seq: 3,
exit_code: 0,
})
.expect("exit notification should serialize"),
),
}),
JSONRPCMessage::Notification(JSONRPCNotification {
method: EXEC_OUTPUT_DELTA_METHOD.to_string(),
params: Some(
serde_json::to_value(ExecOutputDeltaNotification {
process_id: process_id.clone(),
seq: 2,
stream: ExecOutputStream::Stderr,
chunk: b"two".to_vec().into(),
})
.expect("output notification should serialize"),
),
}),
] {
notifications_tx
.send(message)
.await
.expect("notification should queue");
}
let mut delivered = Vec::new();
for _ in 0..4 {
delivered.push(
timeout(Duration::from_secs(1), events.recv())
.await
.expect("process event should not time out")
.expect("process event stream should stay open"),
);
}
assert_eq!(
delivered,
vec![
ExecProcessEvent::Output(ProcessOutputChunk {
seq: 1,
stream: ExecOutputStream::Stdout,
chunk: b"one".to_vec().into(),
}),
ExecProcessEvent::Output(ProcessOutputChunk {
seq: 2,
stream: ExecOutputStream::Stderr,
chunk: b"two".to_vec().into(),
}),
ExecProcessEvent::Exited {
seq: 3,
exit_code: 0,
},
ExecProcessEvent::Closed { seq: 4 },
]
);
drop(notifications_tx);
drop(client);
server.await.expect("server task should finish");
}
#[tokio::test]
async fn wake_notifications_do_not_block_other_sessions() {
let (client_stdin, server_reader) = duplex(1 << 20);

View File

@@ -39,6 +39,8 @@ pub use local_file_system::LOCAL_FS;
pub use local_file_system::LocalFileSystem;
pub use process::ExecBackend;
pub use process::ExecProcess;
pub use process::ExecProcessEvent;
pub use process::ExecProcessEventReceiver;
pub use process::StartedExecProcess;
pub use process_id::ProcessId;
pub use protocol::ExecClosedNotification;
@@ -65,6 +67,7 @@ pub use protocol::FsWriteFileParams;
pub use protocol::FsWriteFileResponse;
pub use protocol::InitializeParams;
pub use protocol::InitializeResponse;
pub use protocol::ProcessOutputChunk;
pub use protocol::ReadParams;
pub use protocol::ReadResponse;
pub use protocol::TerminateParams;

View File

@@ -17,9 +17,12 @@ use tokio::sync::watch;
use crate::ExecBackend;
use crate::ExecProcess;
use crate::ExecProcessEvent;
use crate::ExecProcessEventReceiver;
use crate::ExecServerError;
use crate::ProcessId;
use crate::StartedExecProcess;
use crate::process::ExecProcessEventLog;
use crate::protocol::EXEC_CLOSED_METHOD;
use crate::protocol::ExecClosedNotification;
use crate::protocol::ExecEnvPolicy;
@@ -44,6 +47,7 @@ use crate::rpc::invalid_request;
const RETAINED_OUTPUT_BYTES_PER_PROCESS: usize = 1024 * 1024;
const NOTIFICATION_CHANNEL_CAPACITY: usize = 256;
const PROCESS_EVENT_CHANNEL_CAPACITY: usize = 256;
#[cfg(test)]
const EXITED_PROCESS_RETENTION: Duration = Duration::from_millis(25);
#[cfg(not(test))]
@@ -65,6 +69,7 @@ struct RunningProcess {
next_seq: u64,
exit_code: Option<i32>,
wake_tx: watch::Sender<u64>,
events: ExecProcessEventLog,
output_notify: Arc<Notify>,
open_streams: usize,
closed: bool,
@@ -89,6 +94,7 @@ struct LocalExecProcess {
process_id: ProcessId,
backend: LocalProcess,
wake_tx: watch::Sender<u64>,
events: ExecProcessEventLog,
}
impl Default for LocalProcess {
@@ -138,7 +144,7 @@ impl LocalProcess {
async fn start_process(
&self,
params: ExecParams,
) -> Result<(ExecResponse, watch::Sender<u64>), JSONRPCErrorError> {
) -> Result<(ExecResponse, watch::Sender<u64>, ExecProcessEventLog), JSONRPCErrorError> {
let process_id = params.process_id.clone();
let (program, args) = params
.argv
@@ -198,6 +204,10 @@ impl LocalProcess {
let output_notify = Arc::new(Notify::new());
let (wake_tx, _wake_rx) = watch::channel(0);
let events = ExecProcessEventLog::new(
PROCESS_EVENT_CHANNEL_CAPACITY,
RETAINED_OUTPUT_BYTES_PER_PROCESS,
);
{
let mut process_map = self.inner.processes.lock().await;
process_map.insert(
@@ -211,6 +221,7 @@ impl LocalProcess {
next_seq: 1,
exit_code: None,
wake_tx: wake_tx.clone(),
events: events.clone(),
output_notify: Arc::clone(&output_notify),
open_streams: 2,
closed: false,
@@ -247,13 +258,13 @@ impl LocalProcess {
output_notify,
));
Ok((ExecResponse { process_id }, wake_tx))
Ok((ExecResponse { process_id }, wake_tx, events))
}
pub(crate) async fn exec(&self, params: ExecParams) -> Result<ExecResponse, JSONRPCErrorError> {
self.start_process(params)
.await
.map(|(response, _)| response)
.map(|(response, _, _)| response)
}
pub(crate) async fn exec_read(
@@ -424,7 +435,7 @@ fn shell_environment_policy(env_policy: &ExecEnvPolicy) -> ShellEnvironmentPolic
#[async_trait]
impl ExecBackend for LocalProcess {
async fn start(&self, params: ExecParams) -> Result<StartedExecProcess, ExecServerError> {
let (response, wake_tx) = self
let (response, wake_tx, events) = self
.start_process(params)
.await
.map_err(map_handler_error)?;
@@ -433,6 +444,7 @@ impl ExecBackend for LocalProcess {
process_id: response.process_id,
backend: self.clone(),
wake_tx,
events,
}),
})
}
@@ -448,6 +460,10 @@ impl ExecProcess for LocalExecProcess {
self.wake_tx.subscribe()
}
fn subscribe_events(&self) -> ExecProcessEventReceiver {
self.events.subscribe()
}
async fn read(
&self,
after_seq: Option<u64>,
@@ -548,11 +564,19 @@ async fn stream_output(
process.retained_bytes = process.retained_bytes.saturating_sub(evicted.chunk.len());
}
let _ = process.wake_tx.send(seq);
let output = ProcessOutputChunk {
seq,
stream,
chunk: chunk.into(),
};
process
.events
.publish(ExecProcessEvent::Output(output.clone()));
ExecOutputDeltaNotification {
process_id: process_id.clone(),
seq,
stream,
chunk: chunk.into(),
chunk: output.chunk,
}
};
output_notify.notify_waiters();
@@ -580,6 +604,9 @@ async fn watch_exit(
process.next_seq += 1;
process.exit_code = Some(exit_code);
let _ = process.wake_tx.send(seq);
process
.events
.publish(ExecProcessEvent::Exited { seq, exit_code });
Some(ExecExitedNotification {
process_id: process_id.clone(),
seq,
@@ -640,6 +667,7 @@ async fn maybe_emit_closed(process_id: ProcessId, inner: Arc<Inner>) {
let seq = process.next_seq;
process.next_seq += 1;
let _ = process.wake_tx.send(seq);
process.events.publish(ExecProcessEvent::Closed { seq });
Some(ExecClosedNotification {
process_id: process_id.clone(),
seq,

View File

@@ -1,11 +1,15 @@
use std::collections::VecDeque;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use async_trait::async_trait;
use tokio::sync::broadcast;
use tokio::sync::watch;
use crate::ExecServerError;
use crate::ProcessId;
use crate::protocol::ExecParams;
use crate::protocol::ProcessOutputChunk;
use crate::protocol::ReadResponse;
use crate::protocol::WriteResponse;
@@ -13,12 +17,158 @@ pub struct StartedExecProcess {
pub process: Arc<dyn ExecProcess>,
}
/// Pushed process events for consumers that want to follow process output as it
/// arrives instead of polling retained output with [`ExecProcess::read`].
///
/// The stream is scoped to one [`ExecProcess`] handle. `Output` events carry
/// stdout, stderr, or pty bytes. `Exited` reports the process exit status, while
/// `Closed` means all output streams have ended and no more output events will
/// arrive. `Failed` is used when the process session cannot continue, for
/// example because the remote executor connection disconnected.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExecProcessEvent {
Output(ProcessOutputChunk),
Exited { seq: u64, exit_code: i32 },
Closed { seq: u64 },
Failed(String),
}
/// Replay buffer plus live fan-out for pushed process events.
///
/// New subscribers first drain a bounded replay history, then continue on the
/// live broadcast channel. The history is bounded by event count and retained
/// output bytes: count protects against many tiny events, while bytes protects
/// against a few very large output chunks.
#[derive(Clone)]
pub(crate) struct ExecProcessEventLog {
inner: Arc<ExecProcessEventLogInner>,
}
struct ExecProcessEventLogInner {
history: StdMutex<ExecProcessEventHistory>,
live_tx: broadcast::Sender<ExecProcessEvent>,
event_capacity: usize,
byte_capacity: usize,
}
#[derive(Default)]
struct ExecProcessEventHistory {
events: VecDeque<ExecProcessEvent>,
retained_bytes: usize,
}
impl ExecProcessEvent {
/// Sequence number used to order process-owned events.
///
/// `Failed` is intentionally unsequenced because it is synthesized by the
/// client when the session or transport fails, not emitted by the process.
pub(crate) fn seq(&self) -> Option<u64> {
match self {
ExecProcessEvent::Output(chunk) => Some(chunk.seq),
ExecProcessEvent::Exited { seq, .. } | ExecProcessEvent::Closed { seq } => Some(*seq),
ExecProcessEvent::Failed(_) => None,
}
}
fn retained_len(&self) -> usize {
match self {
ExecProcessEvent::Output(chunk) => chunk.chunk.0.len(),
ExecProcessEvent::Failed(message) => message.len(),
ExecProcessEvent::Exited { .. } | ExecProcessEvent::Closed { .. } => 0,
}
}
}
impl ExecProcessEventLog {
pub(crate) fn new(event_capacity: usize, byte_capacity: usize) -> Self {
let (live_tx, _live_rx) = broadcast::channel(event_capacity);
Self {
inner: Arc::new(ExecProcessEventLogInner {
history: StdMutex::new(ExecProcessEventHistory::default()),
live_tx,
event_capacity,
byte_capacity,
}),
}
}
pub(crate) fn publish(&self, event: ExecProcessEvent) {
let mut history = self
.inner
.history
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
history.retained_bytes += event.retained_len();
history.events.push_back(event.clone());
while history.events.len() > self.inner.event_capacity
|| history.retained_bytes > self.inner.byte_capacity
{
let Some(evicted) = history.events.pop_front() else {
break;
};
history.retained_bytes = history
.retained_bytes
.saturating_sub(evicted.retained_len());
}
let _ = self.inner.live_tx.send(event);
}
pub(crate) fn subscribe(&self) -> ExecProcessEventReceiver {
let history = self
.inner
.history
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let live_rx = self.inner.live_tx.subscribe();
let replay = history.events.iter().cloned().collect();
ExecProcessEventReceiver { replay, live_rx }
}
}
pub struct ExecProcessEventReceiver {
replay: VecDeque<ExecProcessEvent>,
live_rx: broadcast::Receiver<ExecProcessEvent>,
}
impl ExecProcessEventReceiver {
pub fn empty() -> Self {
let (_live_tx, live_rx) = broadcast::channel(1);
Self {
replay: VecDeque::new(),
live_rx,
}
}
/// Returns the next replayed or live event.
///
/// `Lagged` means this receiver fell behind the bounded live channel. The
/// caller should recover through [`ExecProcess::read`] using the last
/// delivered sequence number, then continue receiving pushed events.
pub async fn recv(&mut self) -> Result<ExecProcessEvent, broadcast::error::RecvError> {
if let Some(event) = self.replay.pop_front() {
return Ok(event);
}
self.live_rx.recv().await
}
}
/// Handle for an executor-managed process.
///
/// Implementations must support both retained-output reads and pushed events:
/// `read` is the request/response API for callers that want to page through
/// buffered output, while `subscribe_events` is the streaming API for callers
/// that want output and lifecycle changes delivered as they happen.
#[async_trait]
pub trait ExecProcess: Send + Sync {
fn process_id(&self) -> &ProcessId;
fn subscribe_wake(&self) -> watch::Receiver<u64>;
fn subscribe_events(&self) -> ExecProcessEventReceiver;
async fn read(
&self,
after_seq: Option<u64>,
@@ -35,3 +185,54 @@ pub trait ExecProcess: Send + Sync {
pub trait ExecBackend: Send + Sync {
async fn start(&self, params: ExecParams) -> Result<StartedExecProcess, ExecServerError>;
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use tokio::time::Duration;
use tokio::time::timeout;
use super::ExecProcessEvent;
use super::ExecProcessEventLog;
use crate::protocol::ExecOutputStream;
use crate::protocol::ProcessOutputChunk;
#[tokio::test]
async fn event_history_replay_is_bounded_by_retained_bytes() {
let log = ExecProcessEventLog::new(/*event_capacity*/ 8, /*byte_capacity*/ 3);
log.publish(ExecProcessEvent::Output(ProcessOutputChunk {
seq: 1,
stream: ExecOutputStream::Stdout,
chunk: b"large".to_vec().into(),
}));
log.publish(ExecProcessEvent::Exited {
seq: 2,
exit_code: 0,
});
log.publish(ExecProcessEvent::Closed { seq: 3 });
let mut events = log.subscribe();
let replay = vec![
timeout(Duration::from_secs(1), events.recv())
.await
.expect("exit event replay should not time out")
.expect("exit event replay should be available"),
timeout(Duration::from_secs(1), events.recv())
.await
.expect("closed event replay should not time out")
.expect("closed event replay should be available"),
];
assert_eq!(
replay,
vec![
ExecProcessEvent::Exited {
seq: 2,
exit_code: 0,
},
ExecProcessEvent::Closed { seq: 3 },
]
);
}
}

View File

@@ -6,6 +6,7 @@ use tracing::trace;
use crate::ExecBackend;
use crate::ExecProcess;
use crate::ExecProcessEventReceiver;
use crate::ExecServerError;
use crate::StartedExecProcess;
use crate::client::ExecServerClient;
@@ -56,6 +57,10 @@ impl ExecProcess for RemoteExecProcess {
self.session.subscribe_wake()
}
fn subscribe_events(&self) -> ExecProcessEventReceiver {
self.session.subscribe_events()
}
async fn read(
&self,
after_seq: Option<u64>,