mirror of
https://github.com/openai/codex.git
synced 2026-05-04 21:32:21 +03:00
## Why Once the repo-local lint exists, `codex-rs` needs to follow the checked-in convention and CI needs to keep it from drifting. This commit applies the fallback `/*param*/` style consistently across existing positional literal call sites without changing those APIs. The longer-term preference is still to avoid APIs that require comments by choosing clearer parameter types and call shapes. This PR is intentionally the mechanical follow-through for the places where the existing signatures stay in place. After rebasing onto newer `main`, the rollout also had to cover newly introduced `tui_app_server` call sites. That made it clear the first cut of the CI job was too expensive for the common path: it was spending almost as much time installing `cargo-dylint` and re-testing the lint crate as a representative test job spends running product tests. The CI update keeps the full workspace enforcement but trims that extra overhead from ordinary `codex-rs` PRs. ## What changed - keep a dedicated `argument_comment_lint` job in `rust-ci` - mechanically annotate remaining opaque positional literals across `codex-rs` with exact `/*param*/` comments, including the rebased `tui_app_server` call sites that now fall under the lint - keep the checked-in style aligned with the lint policy by using `/*param*/` and leaving string and char literals uncommented - cache `cargo-dylint`, `dylint-link`, and the relevant Cargo registry/git metadata in the lint job - split changed-path detection so the lint crate's own `cargo test` step runs only when `tools/argument-comment-lint/*` or `rust-ci.yml` changes - continue to run the repo wrapper over the `codex-rs` workspace, so product-code enforcement is unchanged Most of the code changes in this commit are intentionally mechanical comment rewrites or insertions driven by the lint itself. ## Verification - `./tools/argument-comment-lint/run.sh --workspace` - `cargo test -p codex-tui-app-server -p codex-tui` - parsed `.github/workflows/rust-ci.yml` locally with PyYAML --- * -> #14652 * #14651
150 lines
5.5 KiB
Rust
150 lines
5.5 KiB
Rust
use std::sync::Arc;
|
|
|
|
use codex_core::CodexThread;
|
|
use codex_core::NewThread;
|
|
use codex_core::ThreadManager;
|
|
use codex_core::config::Config;
|
|
use codex_protocol::protocol::Event;
|
|
use codex_protocol::protocol::EventMsg;
|
|
use codex_protocol::protocol::Op;
|
|
use tokio::sync::mpsc::UnboundedSender;
|
|
use tokio::sync::mpsc::unbounded_channel;
|
|
|
|
use crate::app_event::AppEvent;
|
|
use crate::app_event_sender::AppEventSender;
|
|
|
|
const TUI_NOTIFY_CLIENT: &str = "codex-tui";
|
|
|
|
async fn initialize_app_server_client_name(thread: &CodexThread) {
|
|
if let Err(err) = thread
|
|
.set_app_server_client_name(Some(TUI_NOTIFY_CLIENT.to_string()))
|
|
.await
|
|
{
|
|
tracing::error!("failed to set app server client name: {err}");
|
|
}
|
|
}
|
|
|
|
/// Spawn the agent bootstrapper and op forwarding loop, returning the
|
|
/// `UnboundedSender<Op>` used by the UI to submit operations.
|
|
pub(crate) fn spawn_agent(
|
|
config: Config,
|
|
app_event_tx: AppEventSender,
|
|
server: Arc<ThreadManager>,
|
|
) -> UnboundedSender<Op> {
|
|
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
|
|
|
|
let app_event_tx_clone = app_event_tx;
|
|
tokio::spawn(async move {
|
|
let NewThread {
|
|
thread,
|
|
session_configured,
|
|
..
|
|
} = match server.start_thread(config).await {
|
|
Ok(v) => v,
|
|
Err(err) => {
|
|
let message = format!("Failed to initialize codex: {err}");
|
|
tracing::error!("{message}");
|
|
app_event_tx_clone.send(AppEvent::CodexEvent(Event {
|
|
id: "".to_string(),
|
|
msg: EventMsg::Error(err.to_error_event(/*message_prefix*/ None)),
|
|
}));
|
|
app_event_tx_clone.send(AppEvent::FatalExitRequest(message));
|
|
tracing::error!("failed to initialize codex: {err}");
|
|
return;
|
|
}
|
|
};
|
|
initialize_app_server_client_name(thread.as_ref()).await;
|
|
|
|
// Forward the captured `SessionConfigured` event so it can be rendered in the UI.
|
|
let ev = codex_protocol::protocol::Event {
|
|
// The `id` does not matter for rendering, so we can use a fake value.
|
|
id: "".to_string(),
|
|
msg: codex_protocol::protocol::EventMsg::SessionConfigured(session_configured),
|
|
};
|
|
app_event_tx_clone.send(AppEvent::CodexEvent(ev));
|
|
|
|
let thread_clone = thread.clone();
|
|
tokio::spawn(async move {
|
|
while let Some(op) = codex_op_rx.recv().await {
|
|
let id = thread_clone.submit(op).await;
|
|
if let Err(e) = id {
|
|
tracing::error!("failed to submit op: {e}");
|
|
}
|
|
}
|
|
});
|
|
|
|
while let Ok(event) = thread.next_event().await {
|
|
let is_shutdown_complete = matches!(event.msg, EventMsg::ShutdownComplete);
|
|
app_event_tx_clone.send(AppEvent::CodexEvent(event));
|
|
if is_shutdown_complete {
|
|
// ShutdownComplete is terminal for a thread; drop this receiver task so
|
|
// the Arc<CodexThread> can be released and thread resources can clean up.
|
|
break;
|
|
}
|
|
}
|
|
});
|
|
|
|
codex_op_tx
|
|
}
|
|
|
|
/// Spawn agent loops for an existing thread (e.g., a forked thread).
|
|
/// Sends the provided `SessionConfiguredEvent` immediately, then forwards subsequent
|
|
/// events and accepts Ops for submission.
|
|
pub(crate) fn spawn_agent_from_existing(
|
|
thread: std::sync::Arc<CodexThread>,
|
|
session_configured: codex_protocol::protocol::SessionConfiguredEvent,
|
|
app_event_tx: AppEventSender,
|
|
) -> UnboundedSender<Op> {
|
|
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
|
|
|
|
let app_event_tx_clone = app_event_tx;
|
|
tokio::spawn(async move {
|
|
initialize_app_server_client_name(thread.as_ref()).await;
|
|
|
|
// Forward the captured `SessionConfigured` event so it can be rendered in the UI.
|
|
let ev = codex_protocol::protocol::Event {
|
|
id: "".to_string(),
|
|
msg: codex_protocol::protocol::EventMsg::SessionConfigured(session_configured),
|
|
};
|
|
app_event_tx_clone.send(AppEvent::CodexEvent(ev));
|
|
|
|
let thread_clone = thread.clone();
|
|
tokio::spawn(async move {
|
|
while let Some(op) = codex_op_rx.recv().await {
|
|
let id = thread_clone.submit(op).await;
|
|
if let Err(e) = id {
|
|
tracing::error!("failed to submit op: {e}");
|
|
}
|
|
}
|
|
});
|
|
|
|
while let Ok(event) = thread.next_event().await {
|
|
let is_shutdown_complete = matches!(event.msg, EventMsg::ShutdownComplete);
|
|
app_event_tx_clone.send(AppEvent::CodexEvent(event));
|
|
if is_shutdown_complete {
|
|
// ShutdownComplete is terminal for a thread; drop this receiver task so
|
|
// the Arc<CodexThread> can be released and thread resources can clean up.
|
|
break;
|
|
}
|
|
}
|
|
});
|
|
|
|
codex_op_tx
|
|
}
|
|
|
|
/// Spawn an op-forwarding loop for an existing thread without subscribing to events.
|
|
pub(crate) fn spawn_op_forwarder(thread: std::sync::Arc<CodexThread>) -> UnboundedSender<Op> {
|
|
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
|
|
|
|
tokio::spawn(async move {
|
|
initialize_app_server_client_name(thread.as_ref()).await;
|
|
while let Some(op) = codex_op_rx.recv().await {
|
|
if let Err(e) = thread.submit(op).await {
|
|
tracing::error!("failed to submit op: {e}");
|
|
}
|
|
}
|
|
});
|
|
|
|
codex_op_tx
|
|
}
|