This commit is contained in:
jif-oai
2025-10-14 12:12:25 +01:00
parent a8278b5423
commit f7d8e12ae0
19 changed files with 2186 additions and 2155 deletions

View File

@@ -1,836 +0,0 @@
use std::fs;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use anyhow::bail;
use chrono::SecondsFormat;
use chrono::Utc;
use clap::Parser;
use clap::Subcommand;
use codex_common::CliConfigOverrides;
use codex_common::elapsed::format_duration;
use codex_core::CodexAuth;
use codex_core::auth::read_codex_api_key_from_env;
use codex_core::auth::read_openai_api_key_from_env;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::EventMsg;
use codex_infty::AggregatedVerifierVerdict;
use codex_infty::DirectiveResponse;
use codex_infty::InftyOrchestrator;
use codex_infty::ProgressReporter;
use codex_infty::ResumeParams;
use codex_infty::RoleConfig;
use codex_infty::RunExecutionOptions;
use codex_infty::RunParams;
use codex_infty::RunStore;
use codex_infty::VerifierDecision;
use codex_infty::VerifierVerdict;
use crossterm::terminal;
use owo_colors::OwoColorize;
use serde::Serialize;
use supports_color::Stream;
use textwrap::Options as WrapOptions;
use textwrap::wrap;
const DEFAULT_TIMEOUT_SECS: u64 = 60;
#[derive(Debug, Parser)]
pub struct InftyCli {
#[clap(flatten)]
pub config_overrides: CliConfigOverrides,
/// Override the default runs root (`~/.codex/infty`).
#[arg(long = "runs-root", value_name = "DIR")]
pub runs_root: Option<PathBuf>,
#[command(subcommand)]
command: InftyCommand,
}
#[derive(Debug, Subcommand)]
enum InftyCommand {
/// Create a new run store and spawn solver/director sessions.
Create(CreateArgs),
/// List stored runs.
List(ListArgs),
/// Show metadata for a stored run.
Show(ShowArgs),
/// Send a message to a role within a run and print the first reply.
Drive(DriveArgs),
}
#[derive(Debug, Parser)]
struct CreateArgs {
/// Explicit run id. If omitted, a timestamp-based id is generated.
#[arg(long = "run-id", value_name = "RUN_ID")]
run_id: Option<String>,
/// Optional objective to send to the solver immediately after creation.
#[arg(long)]
objective: Option<String>,
/// Timeout in seconds when waiting for the solver reply to --objective.
#[arg(long = "timeout-secs", default_value_t = DEFAULT_TIMEOUT_SECS)]
timeout_secs: u64,
}
#[derive(Debug, Parser)]
struct ListArgs {
/// Emit JSON describing the stored runs.
#[arg(long)]
json: bool,
}
#[derive(Debug, Parser)]
struct ShowArgs {
/// Run id to display.
run_id: String,
/// Emit JSON metadata instead of human-readable text.
#[arg(long)]
json: bool,
}
#[derive(Debug, Parser)]
struct DriveArgs {
/// Run id to resume.
run_id: String,
/// Role to address (e.g. solver, director).
role: String,
/// Message to send to the role.
message: String,
/// Timeout in seconds to await the first assistant message.
#[arg(long = "timeout-secs", default_value_t = DEFAULT_TIMEOUT_SECS)]
timeout_secs: u64,
}
#[derive(Debug, Serialize)]
struct RunSummary {
run_id: String,
path: String,
created_at: String,
updated_at: String,
roles: Vec<String>,
}
struct TerminalProgressReporter {
color_enabled: bool,
}
impl TerminalProgressReporter {
fn decision_label(decision: VerifierDecision) -> &'static str {
match decision {
VerifierDecision::Pass => "pass",
VerifierDecision::Fail => "fail",
}
}
fn with_color(color_enabled: bool) -> Self {
Self { color_enabled }
}
fn format_decision(&self, decision: VerifierDecision) -> String {
let label = Self::decision_label(decision);
if !self.color_enabled {
return label.to_string();
}
match decision {
VerifierDecision::Pass => format!("{}", label.green().bold()),
VerifierDecision::Fail => format!("{}", label.red().bold()),
}
}
}
impl ProgressReporter for TerminalProgressReporter {
fn objective_posted(&self, objective: &str) {
let line = format!("→ objective sent to solver: {objective}");
if self.color_enabled {
println!("{}", line.cyan());
} else {
println!("{line}");
}
}
fn waiting_for_solver(&self) {
if self.color_enabled {
println!("{}", "Waiting for solver response...".dimmed());
} else {
println!("Waiting for solver response...");
}
}
fn solver_event(&self, event: &EventMsg) {
match serde_json::to_string_pretty(event) {
Ok(json) => {
tracing::trace!("[solver:event]\n{json}");
}
Err(err) => {
tracing::warn!("[solver:event] (failed to serialize: {err}) {event:?}");
}
}
}
fn solver_agent_message(&self, agent_msg: &AgentMessageEvent) {
let prefix = if self.color_enabled {
format!("{}", "[solver]".magenta().bold())
} else {
"[solver]".to_string()
};
println!("{prefix} {}", agent_msg.message);
}
fn direction_request(&self, prompt: &str) {
let line = format!("→ solver requested direction: {prompt}");
if self.color_enabled {
println!("{}", line.yellow().bold());
} else {
println!("{line}");
}
}
fn director_response(&self, directive: &DirectiveResponse) {
match directive.rationale.as_deref() {
Some(rationale) if !rationale.is_empty() => {
let line = format!(
"[director] directive: {} (rationale: {rationale})",
directive.directive
);
if self.color_enabled {
println!("{}", line.blue());
} else {
println!("{line}");
}
}
_ => {
let line = format!("[director] directive: {}", directive.directive);
if self.color_enabled {
println!("{}", line.blue());
} else {
println!("{line}");
}
}
}
}
fn verification_request(&self, claim_path: &str, notes: Option<&str>) {
let line = format!("→ solver requested verification for {claim_path}");
if self.color_enabled {
println!("{}", line.yellow().bold());
} else {
println!("{line}");
}
if let Some(notes) = notes {
if !notes.is_empty() {
let notes_line = format!(" notes: {notes}");
if self.color_enabled {
println!("{}", notes_line.dimmed());
} else {
println!("{notes_line}");
}
}
}
}
fn verifier_verdict(&self, role: &str, verdict: &VerifierVerdict) {
let decision = self.format_decision(verdict.verdict);
let prefix = if self.color_enabled {
format!("{}", format!("[{role}]").magenta().bold())
} else {
format!("[{role}]")
};
println!("{prefix} verdict: {decision}");
if !verdict.reasons.is_empty() {
let reasons = verdict.reasons.join("; ");
let line = format!(" reasons: {reasons}");
if self.color_enabled {
println!("{}", line.dimmed());
} else {
println!("{line}");
}
}
if !verdict.suggestions.is_empty() {
let suggestions = verdict.suggestions.join("; ");
let line = format!(" suggestions: {suggestions}");
if self.color_enabled {
println!("{}", line.dimmed());
} else {
println!("{line}");
}
}
}
fn verification_summary(&self, summary: &AggregatedVerifierVerdict) {
println!();
let decision = self.format_decision(summary.overall);
let heading = if self.color_enabled {
format!("{}", "Verification summary".bold())
} else {
"Verification summary".to_string()
};
println!("{heading}: {decision}");
for report in &summary.verdicts {
let report_decision = self.format_decision(report.verdict);
let line = format!(" {}{report_decision}", report.role);
println!("{line}");
if !report.reasons.is_empty() {
let reasons = report.reasons.join("; ");
let reason_line = format!(" reasons: {reasons}");
if self.color_enabled {
println!("{}", reason_line.dimmed());
} else {
println!("{reason_line}");
}
}
if !report.suggestions.is_empty() {
let suggestions = report.suggestions.join("; ");
let suggestion_line = format!(" suggestions: {suggestions}");
if self.color_enabled {
println!("{}", suggestion_line.dimmed());
} else {
println!("{suggestion_line}");
}
}
}
}
fn final_delivery(&self, deliverable_path: &Path, summary: Option<&str>) {
println!();
let line = format!(
"✓ solver reported final delivery at {}",
deliverable_path.display()
);
if self.color_enabled {
println!("{}", line.green().bold());
} else {
println!("{line}");
}
if let Some(summary) = summary {
if !summary.is_empty() {
let hint = " (final summary will be shown below)";
if self.color_enabled {
println!("{}", hint.dimmed());
} else {
println!("{hint}");
}
}
}
}
fn run_interrupted(&self) {
let line = "Run interrupted by Ctrl+C. Shutting down sessions…";
if self.color_enabled {
println!("{}", line.red().bold());
} else {
println!("{line}");
}
}
}
impl InftyCli {
pub async fn run(self) -> Result<()> {
let InftyCli {
config_overrides,
runs_root,
command,
} = self;
match command {
InftyCommand::Create(args) => {
run_create(config_overrides, runs_root, args).await?;
}
InftyCommand::List(args) => run_list(runs_root, args)?,
InftyCommand::Show(args) => run_show(runs_root, args)?,
InftyCommand::Drive(args) => {
run_drive(config_overrides, runs_root, args).await?;
}
}
Ok(())
}
}
async fn run_create(
config_overrides: CliConfigOverrides,
runs_root_override: Option<PathBuf>,
args: CreateArgs,
) -> Result<()> {
let config = load_config(config_overrides).await?;
let auth = load_auth(&config)?;
let runs_root = resolve_runs_root(runs_root_override)?;
let color_enabled = supports_color::on(Stream::Stdout).is_some();
let mut run_id = if let Some(id) = args.run_id {
id
} else {
generate_run_id()
};
run_id = run_id.trim().to_string();
validate_run_id(&run_id)?;
let run_path = runs_root.join(&run_id);
if run_path.exists() {
bail!("run {run_id} already exists at {}", run_path.display());
}
let orchestrator = InftyOrchestrator::with_runs_root(auth, runs_root.clone()).with_progress(
Arc::new(TerminalProgressReporter::with_color(color_enabled)),
);
let run_params = RunParams {
run_id: run_id.clone(),
run_root: Some(run_path.clone()),
solver: RoleConfig::new("solver", config.clone()),
director: RoleConfig::new("director", config.clone()),
verifiers: Vec::new(),
};
if let Some(objective) = args.objective {
let mut options = RunExecutionOptions::default();
options.objective = Some(objective);
let timeout = Duration::from_secs(args.timeout_secs);
options.director_timeout = timeout;
options.verifier_timeout = timeout;
let start = Instant::now();
let start_header = format!("Starting run {run_id}");
if color_enabled {
println!("{}", start_header.blue().bold());
} else {
println!("{start_header}");
}
let location_line = format!(" run directory: {}", run_path.display());
if color_enabled {
println!("{}", location_line.dimmed());
} else {
println!("{location_line}");
}
if let Some(objective_text) = options.objective.as_deref() {
if !objective_text.trim().is_empty() {
let objective_line = format!(" objective: {objective_text}");
if color_enabled {
println!("{}", objective_line.dimmed());
} else {
println!("{objective_line}");
}
}
}
println!();
let objective_snapshot = options.objective.clone();
let outcome = orchestrator
.execute_new_run(run_params, options)
.await
.with_context(|| format!("failed to execute run {run_id}"))?;
let duration = start.elapsed();
print_run_summary_box(
color_enabled,
&run_id,
&run_path,
&outcome.deliverable_path,
outcome.summary.as_deref(),
objective_snapshot.as_deref(),
duration,
);
} else {
let sessions = orchestrator
.spawn_run(run_params)
.await
.with_context(|| format!("failed to create run {run_id}"))?;
println!(
"Created run {run_id} at {}",
sessions.store.path().display()
);
}
Ok(())
}
fn run_list(runs_root_override: Option<PathBuf>, args: ListArgs) -> Result<()> {
let runs_root = resolve_runs_root(runs_root_override)?;
let listings = collect_run_summaries(&runs_root)?;
if args.json {
println!("{}", serde_json::to_string_pretty(&listings)?);
return Ok(());
}
if listings.is_empty() {
println!("No runs found under {}", runs_root.display());
return Ok(());
}
println!("Runs in {}", runs_root.display());
for summary in listings {
println!(
"{}\t{}\t{}",
summary.run_id, summary.updated_at, summary.path
);
}
Ok(())
}
fn run_show(runs_root_override: Option<PathBuf>, args: ShowArgs) -> Result<()> {
validate_run_id(&args.run_id)?;
let runs_root = resolve_runs_root(runs_root_override)?;
let run_path = runs_root.join(&args.run_id);
let store =
RunStore::load(&run_path).with_context(|| format!("failed to load run {}", args.run_id))?;
let metadata = store.metadata();
let summary = RunSummary {
run_id: metadata.run_id.clone(),
path: run_path.display().to_string(),
created_at: metadata
.created_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
updated_at: metadata
.updated_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
roles: metadata
.roles
.iter()
.map(|role| role.role.clone())
.collect(),
};
if args.json {
println!("{}", serde_json::to_string_pretty(&summary)?);
return Ok(());
}
println!("Run: {}", summary.run_id);
println!("Path: {}", summary.path);
println!("Created: {}", summary.created_at);
println!("Updated: {}", summary.updated_at);
println!("Roles: {}", summary.roles.join(", "));
Ok(())
}
async fn run_drive(
config_overrides: CliConfigOverrides,
runs_root_override: Option<PathBuf>,
args: DriveArgs,
) -> Result<()> {
validate_run_id(&args.run_id)?;
let config = load_config(config_overrides).await?;
let auth = load_auth(&config)?;
let runs_root = resolve_runs_root(runs_root_override)?;
let run_path = runs_root.join(&args.run_id);
let store =
RunStore::load(&run_path).with_context(|| format!("failed to load run {}", args.run_id))?;
let solver_role = store
.role_metadata("solver")
.ok_or_else(|| anyhow!("run {} is missing solver role", args.run_id))?;
let director_role = store
.role_metadata("director")
.ok_or_else(|| anyhow!("run {} is missing director role", args.run_id))?;
let verifiers: Vec<_> = store
.metadata()
.roles
.iter()
.filter(|role| role.role != solver_role.role && role.role != director_role.role)
.map(|role| RoleConfig::new(role.role.clone(), config.clone()))
.collect();
let orchestrator = InftyOrchestrator::with_runs_root(auth, runs_root)
.with_progress(Arc::new(TerminalProgressReporter::default()));
let sessions = orchestrator
.resume_run(ResumeParams {
run_path: run_path.clone(),
solver: RoleConfig::new(solver_role.role.clone(), config.clone()),
director: RoleConfig::new(director_role.role.clone(), config.clone()),
verifiers,
})
.await
.with_context(|| format!("failed to resume run {}", args.run_id))?;
let timeout = Duration::from_secs(args.timeout_secs);
let reply = orchestrator
.call_role(&sessions.run_id, &args.role, args.message, timeout, None)
.await
.with_context(|| {
format!(
"failed to deliver message to role {} in run {}",
args.role, sessions.run_id
)
})?;
println!("{}", reply.message.message);
Ok(())
}
fn validate_run_id(run_id: &str) -> Result<()> {
if run_id.is_empty() {
bail!("run id must not be empty");
}
if run_id.starts_with('.') || run_id.ends_with('.') {
bail!("run id must not begin or end with '.'");
}
if run_id
.chars()
.any(|c| !(c.is_ascii_alphanumeric() || matches!(c, '-' | '_' | '.')))
{
bail!("run id may only contain ASCII alphanumerics, '-', '_', or '.'");
}
Ok(())
}
fn generate_run_id() -> String {
let timestamp = Utc::now().format("run-%Y%m%d-%H%M%S");
format!("{timestamp}")
}
async fn load_config(cli_overrides: CliConfigOverrides) -> Result<Config> {
let overrides = cli_overrides
.parse_overrides()
.map_err(|err| anyhow!("failed to parse -c overrides: {err}"))?;
Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load Codex configuration")
}
fn load_auth(config: &Config) -> Result<CodexAuth> {
if let Some(auth) =
CodexAuth::from_codex_home(&config.codex_home).context("failed to read auth.json")?
{
return Ok(auth);
}
if let Some(api_key) = read_codex_api_key_from_env() {
return Ok(CodexAuth::from_api_key(&api_key));
}
if let Some(api_key) = read_openai_api_key_from_env() {
return Ok(CodexAuth::from_api_key(&api_key));
}
bail!("no Codex authentication found. Run `codex login` or set OPENAI_API_KEY.");
}
fn resolve_runs_root(override_path: Option<PathBuf>) -> Result<PathBuf> {
if let Some(path) = override_path {
return Ok(path);
}
Ok(codex_infty::default_runs_root()?)
}
fn collect_run_summaries(root: &Path) -> Result<Vec<RunSummary>> {
let mut summaries = Vec::new();
let iter = match fs::read_dir(root) {
Ok(read_dir) => read_dir,
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(summaries),
Err(err) => {
return Err(
anyhow!(err).context(format!("failed to read runs root {}", root.display()))
);
}
};
for entry in iter {
let entry = entry?;
if !entry.file_type()?.is_dir() {
continue;
}
let run_path = entry.path();
let store = match RunStore::load(&run_path) {
Ok(store) => store,
Err(err) => {
eprintln!(
"Skipping {}: failed to load run metadata: {err}",
run_path.display()
);
continue;
}
};
let metadata = store.metadata();
summaries.push(RunSummary {
run_id: metadata.run_id.clone(),
path: run_path.display().to_string(),
created_at: metadata
.created_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
updated_at: metadata
.updated_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
roles: metadata
.roles
.iter()
.map(|role| role.role.clone())
.collect(),
});
}
summaries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
Ok(summaries)
}
impl Default for TerminalProgressReporter {
fn default() -> Self {
Self::with_color(supports_color::on(Stream::Stdout).is_some())
}
}
fn print_run_summary_box(
color_enabled: bool,
run_id: &str,
run_path: &Path,
deliverable_path: &Path,
summary: Option<&str>,
objective: Option<&str>,
duration: Duration,
) {
let mut items = Vec::new();
items.push(("Run ID".to_string(), run_id.to_string()));
items.push(("Run Directory".to_string(), run_path.display().to_string()));
if let Some(objective) = objective {
if !objective.trim().is_empty() {
items.push(("Objective".to_string(), objective.trim().to_string()));
}
}
items.push((
"Deliverable".to_string(),
deliverable_path.display().to_string(),
));
items.push(("Total Time".to_string(), format_duration(duration)));
if let Some(summary) = summary {
let trimmed = summary.trim();
if !trimmed.is_empty() {
items.push(("Summary".to_string(), trimmed.to_string()));
}
}
let label_width = items
.iter()
.map(|(label, _)| label.len())
.max()
.unwrap_or(0)
.max(12);
const DEFAULT_MAX_WIDTH: usize = 84;
const MIN_VALUE_WIDTH: usize = 20;
let label_padding = label_width + 7;
let min_total_width = label_padding + MIN_VALUE_WIDTH;
let available_width = terminal::size()
.ok()
.map(|(cols, _)| usize::from(cols).saturating_sub(2))
.unwrap_or(DEFAULT_MAX_WIDTH);
let max_width = available_width.min(DEFAULT_MAX_WIDTH);
let lower_bound = min_total_width.min(available_width);
let mut total_width = max_width.max(lower_bound).max(label_padding + 1);
let mut value_width = total_width.saturating_sub(label_padding);
if value_width < MIN_VALUE_WIDTH {
value_width = MIN_VALUE_WIDTH;
total_width = label_padding + value_width;
}
let inner_width = total_width.saturating_sub(4);
let top_border = format!("+{}+", "=".repeat(total_width.saturating_sub(2)));
let separator = format!("+{}+", "-".repeat(total_width.saturating_sub(2)));
let title_line = format!(
"| {:^inner_width$} |",
"Run Summary",
inner_width = inner_width
);
println!();
println!("{top_border}");
if color_enabled {
println!("{}", title_line.bold());
} else {
println!("{title_line}");
}
println!("{separator}");
for (index, (label, value)) in items.iter().enumerate() {
let mut rows = Vec::new();
for (idx, paragraph) in value.split('\n').enumerate() {
let trimmed = paragraph.trim();
if trimmed.is_empty() {
if idx > 0 {
rows.push(String::new());
}
continue;
}
let wrapped = wrap(trimmed, WrapOptions::new(value_width).break_words(false));
if wrapped.is_empty() {
rows.push(String::new());
} else {
rows.extend(wrapped.into_iter().map(|line| line.into_owned()));
}
}
if rows.is_empty() {
rows.push(String::new());
}
for (line_idx, line) in rows.iter().enumerate() {
let label_cell = if line_idx == 0 { label.as_str() } else { "" };
let row_line = format!(
"| {label_cell:<label_width$} | {line:<value_width$} |",
label_cell = label_cell,
line = line,
label_width = label_width,
value_width = value_width
);
if color_enabled {
match label.as_str() {
"Deliverable" => println!("{}", row_line.green()),
"Summary" => println!("{}", row_line.bold()),
_ => println!("{row_line}"),
}
} else {
println!("{row_line}");
}
}
if index + 1 < items.len() {
println!("{separator}");
}
}
println!("{top_border}");
println!();
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn validates_run_ids() {
assert!(validate_run_id("run-20241030-123000").is_ok());
assert!(validate_run_id("run.alpha").is_ok());
assert!(validate_run_id("").is_err());
assert!(validate_run_id("..bad").is_err());
assert!(validate_run_id("bad/value").is_err());
}
#[test]
fn generates_timestamped_run_id() {
let run_id = generate_run_id();
assert!(run_id.starts_with("run-"));
assert_eq!(run_id.len(), "run-YYYYMMDD-HHMMSS".len());
}
#[test]
fn collect_summaries_returns_empty_for_missing_root() {
let temp = TempDir::new().expect("temp dir");
let missing = temp.path().join("not-present");
let summaries = collect_run_summaries(&missing).expect("collect");
assert!(summaries.is_empty());
}
}

View File

@@ -0,0 +1,108 @@
use std::path::PathBuf;
use anyhow::Result;
use clap::Parser;
use clap::Subcommand;
use codex_common::CliConfigOverrides;
use super::commands;
#[derive(Debug, Parser)]
pub struct InftyCli {
#[clap(flatten)]
pub config_overrides: CliConfigOverrides,
/// Override the default runs root (`~/.codex/infty`).
#[arg(long = "runs-root", value_name = "DIR")]
pub runs_root: Option<PathBuf>,
#[command(subcommand)]
command: InftyCommand,
}
#[derive(Debug, Subcommand)]
enum InftyCommand {
/// Create a new run store and spawn solver/director sessions.
Create(CreateArgs),
/// List stored runs.
List(ListArgs),
/// Show metadata for a stored run.
Show(ShowArgs),
/// Send a message to a role within a run and print the first reply.
Drive(DriveArgs),
}
#[derive(Debug, Parser)]
pub(crate) struct CreateArgs {
/// Explicit run id. If omitted, a timestamp-based id is generated.
#[arg(long = "run-id", value_name = "RUN_ID")]
pub run_id: Option<String>,
/// Optional objective to send to the solver immediately after creation.
#[arg(long)]
pub objective: Option<String>,
/// Timeout in seconds when waiting for the solver reply to --objective.
#[arg(long = "timeout-secs", default_value_t = super::commands::DEFAULT_TIMEOUT_SECS)]
pub timeout_secs: u64,
}
#[derive(Debug, Parser)]
pub(crate) struct ListArgs {
/// Emit JSON describing the stored runs.
#[arg(long)]
pub json: bool,
}
#[derive(Debug, Parser)]
pub(crate) struct ShowArgs {
/// Run id to display.
#[arg(value_name = "RUN_ID")]
pub run_id: String,
/// Emit JSON metadata instead of human-readable text.
#[arg(long)]
pub json: bool,
}
#[derive(Debug, Parser)]
pub(crate) struct DriveArgs {
/// Run id to resume.
pub run_id: String,
/// Role to address (e.g. solver, director).
pub role: String,
/// Message to send to the role.
pub message: String,
/// Timeout in seconds to await the first assistant message.
#[arg(long = "timeout-secs", default_value_t = super::commands::DEFAULT_TIMEOUT_SECS)]
pub timeout_secs: u64,
}
impl InftyCli {
pub async fn run(self) -> Result<()> {
let InftyCli {
config_overrides,
runs_root,
command,
} = self;
match command {
InftyCommand::Create(args) => {
commands::run_create(config_overrides, runs_root, args).await?;
}
InftyCommand::List(args) => commands::run_list(runs_root, args)?,
InftyCommand::Show(args) => commands::run_show(runs_root, args)?,
InftyCommand::Drive(args) => {
commands::run_drive(config_overrides, runs_root, args).await?;
}
}
Ok(())
}
}

View File

@@ -0,0 +1,393 @@
use std::fs;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use anyhow::bail;
use chrono::SecondsFormat;
use chrono::Utc;
use codex_common::CliConfigOverrides;
use codex_core::CodexAuth;
use codex_core::auth::read_codex_api_key_from_env;
use codex_core::auth::read_openai_api_key_from_env;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_infty::InftyOrchestrator;
use codex_infty::ResumeParams;
use codex_infty::RoleConfig;
use codex_infty::RunExecutionOptions;
use codex_infty::RunParams;
use codex_infty::RunStore;
use owo_colors::OwoColorize;
use serde::Serialize;
use supports_color::Stream;
use super::args::CreateArgs;
use super::args::DriveArgs;
use super::args::ListArgs;
use super::args::ShowArgs;
use super::progress::TerminalProgressReporter;
use super::summary::print_run_summary_box;
pub(crate) const DEFAULT_TIMEOUT_SECS: u64 = 60;
#[derive(Debug, Serialize)]
struct RunSummary {
run_id: String,
path: String,
created_at: String,
updated_at: String,
roles: Vec<String>,
}
pub(crate) async fn run_create(
config_overrides: CliConfigOverrides,
runs_root_override: Option<PathBuf>,
args: CreateArgs,
) -> Result<()> {
let config = load_config(config_overrides).await?;
let auth = load_auth(&config)?;
let runs_root = resolve_runs_root(runs_root_override)?;
let color_enabled = supports_color::on(Stream::Stdout).is_some();
let mut run_id = if let Some(id) = args.run_id {
id
} else {
generate_run_id()
};
run_id = run_id.trim().to_string();
validate_run_id(&run_id)?;
let run_path = runs_root.join(&run_id);
if run_path.exists() {
bail!("run {run_id} already exists at {}", run_path.display());
}
let orchestrator = InftyOrchestrator::with_runs_root(auth, runs_root.clone()).with_progress(
Arc::new(TerminalProgressReporter::with_color(color_enabled)),
);
let run_params = RunParams {
run_id: run_id.clone(),
run_root: Some(run_path.clone()),
solver: RoleConfig::new("solver", config.clone()),
director: RoleConfig::new("director", config.clone()),
verifiers: Vec::new(),
};
if let Some(objective) = args.objective {
let mut options = RunExecutionOptions::default();
options.objective = Some(objective);
let timeout = Duration::from_secs(args.timeout_secs);
options.director_timeout = timeout;
options.verifier_timeout = timeout;
let start = Instant::now();
let start_header = format!("Starting run {run_id}");
if color_enabled {
println!("{}", start_header.blue().bold());
} else {
println!("{start_header}");
}
let location_line = format!(" run directory: {}", run_path.display());
if color_enabled {
println!("{}", location_line.dimmed());
} else {
println!("{location_line}");
}
if let Some(objective_text) = options.objective.as_deref() {
if !objective_text.trim().is_empty() {
let objective_line = format!(" objective: {objective_text}");
if color_enabled {
println!("{}", objective_line.cyan());
} else {
println!("{objective_line}");
}
}
}
println!();
let objective_snapshot = options.objective.clone();
let outcome = orchestrator
.execute_new_run(run_params, options)
.await
.with_context(|| format!("failed to execute run {run_id}"))?;
let duration = start.elapsed();
print_run_summary_box(
color_enabled,
&run_id,
&run_path,
&outcome.deliverable_path,
outcome.summary.as_deref(),
objective_snapshot.as_deref(),
duration,
);
} else {
let sessions = orchestrator
.spawn_run(run_params)
.await
.with_context(|| format!("failed to create run {run_id}"))?;
println!(
"Created run {run_id} at {}",
sessions.store.path().display()
);
}
Ok(())
}
pub(crate) fn run_list(runs_root_override: Option<PathBuf>, args: ListArgs) -> Result<()> {
let runs_root = resolve_runs_root(runs_root_override)?;
let listings = collect_run_summaries(&runs_root)?;
if args.json {
println!("{}", serde_json::to_string_pretty(&listings)?);
return Ok(());
}
if listings.is_empty() {
println!("No runs found under {}", runs_root.display());
return Ok(());
}
println!("Runs in {}", runs_root.display());
for summary in listings {
println!(
"{}\t{}\t{}",
summary.run_id, summary.updated_at, summary.path
);
}
Ok(())
}
pub(crate) fn run_show(runs_root_override: Option<PathBuf>, args: ShowArgs) -> Result<()> {
validate_run_id(&args.run_id)?;
let runs_root = resolve_runs_root(runs_root_override)?;
let run_path = runs_root.join(&args.run_id);
let store =
RunStore::load(&run_path).with_context(|| format!("failed to load run {}", args.run_id))?;
let metadata = store.metadata();
let summary = RunSummary {
run_id: metadata.run_id.clone(),
path: run_path.display().to_string(),
created_at: metadata
.created_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
updated_at: metadata
.updated_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
roles: metadata
.roles
.iter()
.map(|role| role.role.clone())
.collect(),
};
if args.json {
println!("{}", serde_json::to_string_pretty(&summary)?);
return Ok(());
}
println!("Run: {}", summary.run_id);
println!("Path: {}", summary.path);
println!("Created: {}", summary.created_at);
println!("Updated: {}", summary.updated_at);
println!("Roles: {}", summary.roles.join(", "));
Ok(())
}
pub(crate) async fn run_drive(
config_overrides: CliConfigOverrides,
runs_root_override: Option<PathBuf>,
args: DriveArgs,
) -> Result<()> {
validate_run_id(&args.run_id)?;
let config = load_config(config_overrides).await?;
let auth = load_auth(&config)?;
let runs_root = resolve_runs_root(runs_root_override)?;
let run_path = runs_root.join(&args.run_id);
let store =
RunStore::load(&run_path).with_context(|| format!("failed to load run {}", args.run_id))?;
let solver_role = store
.role_metadata("solver")
.ok_or_else(|| anyhow!("run {} is missing solver role", args.run_id))?;
let director_role = store
.role_metadata("director")
.ok_or_else(|| anyhow!("run {} is missing director role", args.run_id))?;
let verifiers: Vec<_> = store
.metadata()
.roles
.iter()
.filter(|role| role.role != solver_role.role && role.role != director_role.role)
.map(|role| RoleConfig::new(role.role.clone(), config.clone()))
.collect();
let orchestrator = InftyOrchestrator::with_runs_root(auth, runs_root)
.with_progress(Arc::new(TerminalProgressReporter::default()));
let sessions = orchestrator
.resume_run(ResumeParams {
run_path: run_path.clone(),
solver: RoleConfig::new(solver_role.role.clone(), config.clone()),
director: RoleConfig::new(director_role.role.clone(), config.clone()),
verifiers,
})
.await
.with_context(|| format!("failed to resume run {}", args.run_id))?;
let timeout = Duration::from_secs(args.timeout_secs);
let reply = orchestrator
.call_role(&sessions.run_id, &args.role, args.message, timeout, None)
.await
.with_context(|| {
format!(
"failed to deliver message to role {} in run {}",
args.role, sessions.run_id
)
})?;
println!("{}", reply.message.message);
Ok(())
}
fn generate_run_id() -> String {
let timestamp = Utc::now().format("run-%Y%m%d-%H%M%S");
format!("{timestamp}")
}
pub(crate) fn validate_run_id(run_id: &str) -> Result<()> {
if run_id.is_empty() {
bail!("run id must not be empty");
}
if run_id.starts_with('.') || run_id.ends_with('.') {
bail!("run id must not begin or end with '.'");
}
if run_id
.chars()
.any(|c| !(c.is_ascii_alphanumeric() || matches!(c, '-' | '_' | '.')))
{
bail!("run id may only contain ASCII alphanumerics, '-', '_', or '.'");
}
Ok(())
}
async fn load_config(cli_overrides: CliConfigOverrides) -> Result<Config> {
let overrides = cli_overrides
.parse_overrides()
.map_err(|err| anyhow!("failed to parse -c overrides: {err}"))?;
Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load Codex configuration")
}
fn load_auth(config: &Config) -> Result<CodexAuth> {
if let Some(auth) =
CodexAuth::from_codex_home(&config.codex_home).context("failed to read auth.json")?
{
return Ok(auth);
}
if let Some(api_key) = read_codex_api_key_from_env() {
return Ok(CodexAuth::from_api_key(&api_key));
}
if let Some(api_key) = read_openai_api_key_from_env() {
return Ok(CodexAuth::from_api_key(&api_key));
}
bail!("no Codex authentication found. Run `codex login` or set OPENAI_API_KEY.");
}
fn resolve_runs_root(override_path: Option<PathBuf>) -> Result<PathBuf> {
if let Some(path) = override_path {
return Ok(path);
}
Ok(codex_infty::default_runs_root()?)
}
fn collect_run_summaries(root: &Path) -> Result<Vec<RunSummary>> {
let mut summaries = Vec::new();
let iter = match fs::read_dir(root) {
Ok(read_dir) => read_dir,
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(summaries),
Err(err) => {
return Err(
anyhow!(err).context(format!("failed to read runs root {}", root.display()))
);
}
};
for entry in iter {
let entry = entry?;
if !entry.file_type()?.is_dir() {
continue;
}
let run_path = entry.path();
let store = match RunStore::load(&run_path) {
Ok(store) => store,
Err(err) => {
eprintln!(
"Skipping {}: failed to load run metadata: {err}",
run_path.display()
);
continue;
}
};
let metadata = store.metadata();
summaries.push(RunSummary {
run_id: metadata.run_id.clone(),
path: run_path.display().to_string(),
created_at: metadata
.created_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
updated_at: metadata
.updated_at
.to_rfc3339_opts(SecondsFormat::Secs, true),
roles: metadata
.roles
.iter()
.map(|role| role.role.clone())
.collect(),
});
}
summaries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
Ok(summaries)
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn validates_run_ids() {
assert!(validate_run_id("run-20241030-123000").is_ok());
assert!(validate_run_id("run.alpha").is_ok());
assert!(validate_run_id("").is_err());
assert!(validate_run_id("..bad").is_err());
assert!(validate_run_id("bad/value").is_err());
}
#[test]
fn generates_timestamped_run_id() {
let run_id = generate_run_id();
assert!(run_id.starts_with("run-"));
assert_eq!(run_id.len(), "run-YYYYMMDD-HHMMSS".len());
}
#[test]
fn collect_summaries_returns_empty_for_missing_root() {
let temp = TempDir::new().expect("temp dir");
let missing = temp.path().join("not-present");
let summaries = collect_run_summaries(&missing).expect("collect");
assert!(summaries.is_empty());
}
}

View File

@@ -0,0 +1,6 @@
mod args;
mod commands;
mod progress;
mod summary;
pub use args::InftyCli;

View File

@@ -0,0 +1,226 @@
use std::path::Path;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::EventMsg;
use codex_infty::AggregatedVerifierVerdict;
use codex_infty::DirectiveResponse;
use codex_infty::ProgressReporter;
use codex_infty::VerifierDecision;
use codex_infty::VerifierVerdict;
use owo_colors::OwoColorize;
use supports_color::Stream;
pub(crate) struct TerminalProgressReporter {
color_enabled: bool,
}
impl TerminalProgressReporter {
pub(crate) fn with_color(color_enabled: bool) -> Self {
Self { color_enabled }
}
fn format_decision(&self, decision: VerifierDecision) -> String {
let label = match decision {
VerifierDecision::Pass => "pass",
VerifierDecision::Fail => "fail",
};
if !self.color_enabled {
return label.to_string();
}
match decision {
VerifierDecision::Pass => format!("{}", label.green().bold()),
VerifierDecision::Fail => format!("{}", label.red().bold()),
}
}
}
impl Default for TerminalProgressReporter {
fn default() -> Self {
Self::with_color(supports_color::on(Stream::Stdout).is_some())
}
}
impl ProgressReporter for TerminalProgressReporter {
fn objective_posted(&self, objective: &str) {
let line = format!("→ objective sent to solver: {objective}");
if self.color_enabled {
println!("{}", line.cyan());
} else {
println!("{line}");
}
}
fn waiting_for_solver(&self) {
if self.color_enabled {
println!("{}", "Waiting for solver response...".dimmed());
} else {
println!("Waiting for solver response...");
}
}
fn solver_event(&self, event: &EventMsg) {
match serde_json::to_string_pretty(event) {
Ok(json) => {
tracing::trace!("[solver:event]\n{json}");
}
Err(err) => {
tracing::warn!("[solver:event] (failed to serialize: {err}) {event:?}");
}
}
}
fn solver_agent_message(&self, agent_msg: &AgentMessageEvent) {
let prefix = if self.color_enabled {
format!("{}", "[solver]".magenta().bold())
} else {
"[solver]".to_string()
};
println!("{prefix} {}", agent_msg.message);
}
fn direction_request(&self, prompt: &str) {
let line = format!("→ solver requested direction: {prompt}");
if self.color_enabled {
println!("{}", line.yellow().bold());
} else {
println!("{line}");
}
}
fn director_response(&self, directive: &DirectiveResponse) {
match directive.rationale.as_deref() {
Some(rationale) if !rationale.is_empty() => {
let line = format!(
"[director] directive: {} (rationale: {rationale})",
directive.directive
);
if self.color_enabled {
println!("{}", line.blue());
} else {
println!("{line}");
}
}
_ => {
let line = format!("[director] directive: {}", directive.directive);
if self.color_enabled {
println!("{}", line.blue());
} else {
println!("{line}");
}
}
}
}
fn verification_request(&self, claim_path: &str, notes: Option<&str>) {
let line = format!("→ solver requested verification for {claim_path}");
if self.color_enabled {
println!("{}", line.yellow().bold());
} else {
println!("{line}");
}
if let Some(notes) = notes {
if !notes.is_empty() {
let notes_line = format!(" notes: {notes}");
if self.color_enabled {
println!("{}", notes_line.dimmed());
} else {
println!("{notes_line}");
}
}
}
}
fn verifier_verdict(&self, role: &str, verdict: &VerifierVerdict) {
let decision = self.format_decision(verdict.verdict);
let prefix = if self.color_enabled {
format!("{}", format!("[{role}]").magenta().bold())
} else {
format!("[{role}]")
};
println!("{prefix} verdict: {decision}");
if !verdict.reasons.is_empty() {
let reasons = verdict.reasons.join("; ");
let line = format!(" reasons: {reasons}");
if self.color_enabled {
println!("{}", line.dimmed());
} else {
println!("{line}");
}
}
if !verdict.suggestions.is_empty() {
let suggestions = verdict.suggestions.join("; ");
let line = format!(" suggestions: {suggestions}");
if self.color_enabled {
println!("{}", line.dimmed());
} else {
println!("{line}");
}
}
}
fn verification_summary(&self, summary: &AggregatedVerifierVerdict) {
println!();
let decision = self.format_decision(summary.overall);
let heading = if self.color_enabled {
format!("{}", "Verification summary".bold())
} else {
"Verification summary".to_string()
};
println!("{heading}: {decision}");
for report in &summary.verdicts {
let report_decision = self.format_decision(report.verdict);
let line = format!(" {}{report_decision}", report.role);
println!("{line}");
if !report.reasons.is_empty() {
let reasons = report.reasons.join("; ");
let reason_line = format!(" reasons: {reasons}");
if self.color_enabled {
println!("{}", reason_line.dimmed());
} else {
println!("{reason_line}");
}
}
if !report.suggestions.is_empty() {
let suggestions = report.suggestions.join("; ");
let suggestion_line = format!(" suggestions: {suggestions}");
if self.color_enabled {
println!("{}", suggestion_line.dimmed());
} else {
println!("{suggestion_line}");
}
}
}
}
fn final_delivery(&self, deliverable_path: &Path, summary: Option<&str>) {
println!();
let line = format!(
"✓ solver reported final delivery at {}",
deliverable_path.display()
);
if self.color_enabled {
println!("{}", line.green().bold());
} else {
println!("{line}");
}
if let Some(summary) = summary {
if !summary.is_empty() {
let hint = " (final summary will be shown below)";
if self.color_enabled {
println!("{}", hint.dimmed());
} else {
println!("{hint}");
}
}
}
}
fn run_interrupted(&self) {
let line = "Run interrupted by Ctrl+C. Shutting down sessions…";
if self.color_enabled {
println!("{}", line.red().bold());
} else {
println!("{line}");
}
}
}

View File

@@ -0,0 +1,129 @@
use std::path::Path;
use std::time::Duration;
use codex_common::elapsed::format_duration;
use crossterm::terminal;
use owo_colors::OwoColorize;
use textwrap::Options as WrapOptions;
use textwrap::wrap;
pub(crate) fn print_run_summary_box(
color_enabled: bool,
run_id: &str,
run_path: &Path,
deliverable_path: &Path,
summary: Option<&str>,
objective: Option<&str>,
duration: Duration,
) {
let mut items = Vec::new();
items.push(("Run ID".to_string(), run_id.to_string()));
items.push(("Run Directory".to_string(), run_path.display().to_string()));
if let Some(objective) = objective {
if !objective.trim().is_empty() {
items.push(("Objective".to_string(), objective.trim().to_string()));
}
}
items.push((
"Deliverable".to_string(),
deliverable_path.display().to_string(),
));
items.push(("Total Time".to_string(), format_duration(duration)));
if let Some(summary) = summary {
let trimmed = summary.trim();
if !trimmed.is_empty() {
items.push(("Summary".to_string(), trimmed.to_string()));
}
}
let label_width = items
.iter()
.map(|(label, _)| label.len())
.max()
.unwrap_or(0)
.max(12);
const DEFAULT_MAX_WIDTH: usize = 84;
const MIN_VALUE_WIDTH: usize = 20;
let label_padding = label_width + 7;
let min_total_width = label_padding + MIN_VALUE_WIDTH;
let available_width = terminal::size()
.ok()
.map(|(cols, _)| usize::from(cols).saturating_sub(2))
.unwrap_or(DEFAULT_MAX_WIDTH);
let max_width = available_width.min(DEFAULT_MAX_WIDTH);
let lower_bound = min_total_width.min(available_width);
let mut total_width = max_width.max(lower_bound).max(label_padding + 1);
let mut value_width = total_width.saturating_sub(label_padding);
if value_width < MIN_VALUE_WIDTH {
value_width = MIN_VALUE_WIDTH;
total_width = label_padding + value_width;
}
let inner_width = total_width.saturating_sub(4);
let top_border = format!("+{}+", "=".repeat(total_width.saturating_sub(2)));
let separator = format!("+{}+", "-".repeat(total_width.saturating_sub(2)));
let title_line = format!(
"| {:^inner_width$} |",
"Run Summary",
inner_width = inner_width
);
println!();
println!("{top_border}");
if color_enabled {
println!("{}", title_line.bold());
} else {
println!("{title_line}");
}
println!("{separator}");
for (index, (label, value)) in items.iter().enumerate() {
let mut rows = Vec::new();
for (idx, paragraph) in value.split('\n').enumerate() {
let trimmed = paragraph.trim();
if trimmed.is_empty() {
if idx > 0 {
rows.push(String::new());
}
continue;
}
let wrapped = wrap(trimmed, WrapOptions::new(value_width).break_words(false));
if wrapped.is_empty() {
rows.push(String::new());
} else {
rows.extend(wrapped.into_iter().map(|line| line.into_owned()));
}
}
if rows.is_empty() {
rows.push(String::new());
}
for (line_idx, line) in rows.iter().enumerate() {
let label_cell = if line_idx == 0 { label.as_str() } else { "" };
let row_line = format!(
"| {label_cell:<label_width$} | {line:<value_width$} |",
label_cell = label_cell,
line = line,
label_width = label_width,
value_width = value_width
);
if color_enabled {
match label.as_str() {
"Deliverable" => println!("{}", row_line.green()),
"Summary" => println!("{}", row_line.bold()),
_ => println!("{row_line}"),
}
} else {
println!("{row_line}");
}
}
if index + 1 < items.len() {
println!("{separator}");
}
}
println!("{top_border}");
println!();
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,967 @@
use std::any::type_name;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use anyhow::bail;
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::CrossSessionSpawnParams;
use codex_core::cross_session::AssistantMessage;
use codex_core::cross_session::CrossSessionHub;
use codex_core::cross_session::PostUserTurnRequest;
use codex_core::cross_session::RoleOrId;
use codex_core::cross_session::SessionEventStream;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_protocol::ConversationId;
use serde::Deserialize;
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde::de::Error as _;
use serde_json::Value;
use serde_json::json;
use tokio::signal;
use tokio_stream::StreamExt;
use tracing::warn;
use crate::progress::ProgressReporter;
use crate::prompts;
use crate::run_store::RoleMetadata;
use crate::run_store::RunStore;
use crate::signals::AggregatedVerifierVerdict;
use crate::signals::DirectiveResponse;
use crate::signals::VerifierDecision;
use crate::signals::VerifierReport;
use crate::signals::VerifierVerdict;
use crate::types::FINALIZATION_PROMPT;
use crate::types::ResumeParams;
use crate::types::RoleConfig;
use crate::types::RoleSession;
use crate::types::RunExecutionOptions;
use crate::types::RunOutcome;
use crate::types::RunParams;
use crate::types::RunSessions;
#[derive(Debug, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
enum SolverSignal {
DirectionRequest {
#[serde(default)]
prompt: Option<String>,
},
VerificationRequest {
#[serde(default)]
claim_path: Option<String>,
#[serde(default)]
notes: Option<String>,
},
FinalDelivery {
#[serde(default)]
deliverable_path: Option<String>,
#[serde(default)]
summary: Option<String>,
},
}
#[derive(Serialize)]
struct DirectionRequestPayload<'a> {
#[serde(rename = "type")]
kind: &'static str,
prompt: &'a str,
}
#[derive(Serialize)]
struct VerificationRequestPayload<'a> {
#[serde(rename = "type")]
kind: &'static str,
claim_path: &'a str,
#[serde(skip_serializing_if = "Option::is_none")]
notes: Option<&'a str>,
}
struct SessionCleanup {
conversation_id: ConversationId,
conversation: Arc<CodexConversation>,
}
impl SessionCleanup {
fn new(session: &RoleSession) -> Self {
Self {
conversation_id: session.conversation_id,
conversation: Arc::clone(&session.conversation),
}
}
}
pub struct InftyOrchestrator {
hub: Arc<CrossSessionHub>,
conversation_manager: ConversationManager,
runs_root: PathBuf,
progress: Option<Arc<dyn ProgressReporter>>,
}
impl InftyOrchestrator {
pub fn new(auth: CodexAuth) -> Result<Self> {
let runs_root = crate::default_runs_root()?;
Ok(Self::with_runs_root(auth, runs_root))
}
pub fn with_runs_root(auth: CodexAuth, runs_root: impl Into<PathBuf>) -> Self {
Self {
hub: Arc::new(CrossSessionHub::new()),
conversation_manager: ConversationManager::with_auth(auth),
runs_root: runs_root.into(),
progress: None,
}
}
pub fn runs_root(&self) -> &PathBuf {
&self.runs_root
}
pub fn hub(&self) -> Arc<CrossSessionHub> {
Arc::clone(&self.hub)
}
pub fn with_progress(mut self, reporter: Arc<dyn ProgressReporter>) -> Self {
self.progress = Some(reporter);
self
}
pub async fn execute_new_run(
&self,
params: RunParams,
options: RunExecutionOptions,
) -> Result<RunOutcome> {
let sessions = self.spawn_run(params).await?;
self.drive_run(sessions, options).await
}
pub async fn execute_existing_run(
&self,
params: ResumeParams,
options: RunExecutionOptions,
) -> Result<RunOutcome> {
let sessions = self.resume_run(params).await?;
self.drive_run(sessions, options).await
}
pub async fn spawn_run(&self, params: RunParams) -> Result<RunSessions> {
let RunParams {
run_id,
run_root,
solver,
director,
verifiers,
} = params;
let run_path = run_root.unwrap_or_else(|| self.runs_root.join(&run_id));
let role_metadata = collect_role_metadata(&solver, &director, &verifiers);
let mut store = RunStore::initialize(&run_path, &run_id, &role_metadata)?;
let mut cleanup = Vec::new();
let solver_session = match self
.spawn_and_register_role(&run_id, &run_path, &solver, &mut store, &mut cleanup)
.await
{
Ok(session) => session,
Err(err) => {
self.cleanup_failed_spawn(cleanup, &run_path).await;
return Err(err);
}
};
let director_session = match self
.spawn_and_register_role(&run_id, &run_path, &director, &mut store, &mut cleanup)
.await
{
Ok(session) => session,
Err(err) => {
self.cleanup_failed_spawn(cleanup, &run_path).await;
return Err(err);
}
};
let mut verifier_sessions = Vec::with_capacity(verifiers.len());
for verifier in verifiers {
let session = match self
.spawn_and_register_role(&run_id, &run_path, &verifier, &mut store, &mut cleanup)
.await
{
Ok(session) => session,
Err(err) => {
self.cleanup_failed_spawn(cleanup, &run_path).await;
return Err(err);
}
};
verifier_sessions.push(session);
}
Ok(RunSessions {
run_id,
solver: solver_session,
director: director_session,
verifiers: verifier_sessions,
store,
})
}
pub async fn resume_run(&self, params: ResumeParams) -> Result<RunSessions> {
let ResumeParams {
run_path,
solver,
director,
verifiers,
} = params;
let mut store = RunStore::load(&run_path)?;
let run_id = store.metadata().run_id.clone();
let mut cleanup = Vec::new();
let run_path = store.path().to_path_buf();
let solver_session = match self
.resume_and_register_role(&run_id, &run_path, &solver, &mut store, &mut cleanup)
.await
{
Ok(session) => session,
Err(err) => {
self.cleanup_failed_resume(cleanup).await;
return Err(err);
}
};
let director_session = match self
.resume_and_register_role(&run_id, &run_path, &director, &mut store, &mut cleanup)
.await
{
Ok(session) => session,
Err(err) => {
self.cleanup_failed_resume(cleanup).await;
return Err(err);
}
};
let mut verifier_sessions = Vec::with_capacity(verifiers.len());
for verifier in verifiers.iter() {
let session = match self
.resume_and_register_role(&run_id, &run_path, verifier, &mut store, &mut cleanup)
.await
{
Ok(session) => session,
Err(err) => {
self.cleanup_failed_resume(cleanup).await;
return Err(err);
}
};
verifier_sessions.push(session);
}
store.touch()?;
Ok(RunSessions {
run_id,
solver: solver_session,
director: director_session,
verifiers: verifier_sessions,
store,
})
}
async fn drive_run(
&self,
mut sessions: RunSessions,
options: RunExecutionOptions,
) -> Result<RunOutcome> {
let result = self.inner_drive_run(&mut sessions, &options).await;
let cleanup = collect_session_cleanup(&sessions);
self.shutdown_sessions(cleanup).await;
result
}
async fn inner_drive_run(
&self,
sessions: &mut RunSessions,
options: &RunExecutionOptions,
) -> Result<RunOutcome> {
let mut solver_events = self.stream_events(sessions.solver.conversation_id)?;
let mut waiting_for_signal = false;
let mut pending_solver_turn_completion = false;
if let Some(objective) = &options.objective {
self.post_to_role(
&sessions.run_id,
&sessions.solver.role,
objective.as_str(),
Some(solver_signal_schema()),
)
.await?;
sessions.store.touch()?;
waiting_for_signal = true;
if let Some(progress) = self.progress.as_ref() {
progress.objective_posted(objective);
progress.waiting_for_solver();
}
}
let ctrl_c = signal::ctrl_c();
tokio::pin!(ctrl_c);
'event_loop: loop {
tokio::select! {
maybe_event = solver_events.next() => {
let Some(event) = maybe_event else {
break 'event_loop;
};
if let Some(progress) = self.progress.as_ref() {
progress.solver_event(&event.event.msg);
}
match &event.event.msg {
EventMsg::AgentMessage(agent_msg) => {
if let Some(progress) = self.progress.as_ref() {
progress.solver_agent_message(agent_msg);
}
if let Some(signal) = parse_solver_signal(&agent_msg.message) {
waiting_for_signal = false;
match signal {
SolverSignal::DirectionRequest { prompt } => {
let prompt = prompt
.and_then(|p| {
let trimmed = p.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
})
.ok_or_else(|| {
anyhow!(
"solver direction_request missing prompt text"
)
})?;
if let Some(progress) = self.progress.as_ref() {
progress.direction_request(&prompt);
}
self.handle_direction_request(
sessions,
&prompt,
options,
)
.await?;
sessions.store.touch()?;
pending_solver_turn_completion = true;
}
SolverSignal::VerificationRequest { claim_path, notes } => {
let claim_path = claim_path
.and_then(|p| {
let trimmed = p.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
})
.ok_or_else(|| {
anyhow!(
"solver verification_request missing claim_path"
)
})?;
if let Some(progress) = self.progress.as_ref() {
progress.verification_request(
&claim_path,
notes.as_deref(),
);
}
let verified = self
.handle_verification_request(
sessions,
&claim_path,
notes.as_deref(),
options,
)
.await?;
sessions.store.touch()?;
if verified {
pending_solver_turn_completion = true;
}
}
SolverSignal::FinalDelivery {
deliverable_path,
summary,
} => {
let deliverable_path = deliverable_path
.and_then(|p| {
let trimmed = p.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
})
.ok_or_else(|| {
anyhow!(
"solver final_delivery missing deliverable_path"
)
})?;
if deliverable_path.is_empty() {
bail!("solver final_delivery provided empty path");
}
let resolved = resolve_deliverable_path(
sessions.store.path(),
&deliverable_path,
)?;
let outcome = RunOutcome {
run_id: sessions.run_id.clone(),
deliverable_path: resolved,
summary: summary.filter(|s| !s.trim().is_empty()),
raw_message: agent_msg.message.clone(),
};
if let Some(progress) = self.progress.as_ref() {
progress.final_delivery(
&outcome.deliverable_path,
outcome.summary.as_deref(),
);
}
return Ok(outcome);
}
}
}
}
EventMsg::TaskComplete(..) => {
if waiting_for_signal {
// The solver completed its turn without issuing a signal; ask for one now.
self.request_solver_signal(&sessions.run_id, &sessions.solver.role)
.await?;
} else if pending_solver_turn_completion {
// We handled a signal earlier in the loop; this completion corresponds to it.
pending_solver_turn_completion = false;
}
}
_ => {}
}
}
_ = &mut ctrl_c => {
if let Some(progress) = self.progress.as_ref() {
progress.run_interrupted();
}
let cleanup = collect_session_cleanup(sessions);
self.shutdown_sessions(cleanup).await;
bail!("run interrupted by Ctrl+C");
}
}
}
Err(anyhow!(
"run {} ended before emitting final_delivery message",
sessions.run_id
))
}
async fn handle_direction_request(
&self,
sessions: &RunSessions,
prompt: &str,
options: &RunExecutionOptions,
) -> Result<()> {
let request = DirectionRequestPayload {
kind: "direction_request",
prompt,
};
let request_text = serde_json::to_string_pretty(&request)?;
let handle = self
.post_to_role(
&sessions.run_id,
&sessions.director.role,
request_text,
None,
)
.await?;
let directive = self
.await_first_assistant(&handle, options.director_timeout)
.await?;
let directive_payload: DirectiveResponse = parse_json_struct(&directive.message.message)
.context("director response was not valid directive JSON")?;
if let Some(progress) = self.progress.as_ref() {
progress.director_response(&directive_payload);
}
let directive_text = serde_json::to_string_pretty(&directive_payload)?;
let _ = self
.post_to_role(
&sessions.run_id,
&sessions.solver.role,
directive_text,
Some(solver_signal_schema()),
)
.await?;
Ok(())
}
async fn handle_verification_request(
&self,
sessions: &RunSessions,
claim_path: &str,
notes: Option<&str>,
options: &RunExecutionOptions,
) -> Result<bool> {
if sessions.verifiers.is_empty() {
let summary = aggregate_verdicts(Vec::new());
if let Some(progress) = self.progress.as_ref() {
progress.verification_summary(&summary);
}
let summary_text = serde_json::to_string_pretty(&summary)?;
let _ = self
.post_to_role(
&sessions.run_id,
&sessions.solver.role,
summary_text,
Some(solver_signal_schema()),
)
.await?;
return Ok(true);
}
let request = VerificationRequestPayload {
kind: "verification_request",
claim_path,
notes,
};
let request_text = serde_json::to_string_pretty(&request)?;
let mut collected = Vec::with_capacity(sessions.verifiers.len());
for verifier in &sessions.verifiers {
let handle = self
.post_to_role(
&sessions.run_id,
&verifier.role,
request_text.as_str(),
None,
)
.await?;
let response = self
.await_first_assistant(&handle, options.verifier_timeout)
.await?;
let verdict: VerifierVerdict = parse_json_struct(&response.message.message)
.with_context(|| {
format!("verifier {} returned invalid verdict JSON", verifier.role)
})?;
if let Some(progress) = self.progress.as_ref() {
progress.verifier_verdict(&verifier.role, &verdict);
}
collected.push((verifier.role.clone(), verdict));
}
let summary = aggregate_verdicts(collected);
if let Some(progress) = self.progress.as_ref() {
progress.verification_summary(&summary);
}
let summary_text = serde_json::to_string_pretty(&summary)?;
let _ = self
.post_to_role(
&sessions.run_id,
&sessions.solver.role,
summary_text,
Some(solver_signal_schema()),
)
.await?;
Ok(summary.overall.is_pass())
}
async fn request_solver_signal(&self, run_id: &str, solver_role: &str) -> Result<()> {
let handle = self
.post_to_role(
run_id,
solver_role,
FINALIZATION_PROMPT,
Some(final_delivery_schema()),
)
.await?;
let _ = self
.await_first_assistant(&handle, Duration::from_secs(5))
.await?;
Ok(())
}
async fn cleanup_failed_spawn(&self, sessions: Vec<SessionCleanup>, run_path: &Path) {
self.shutdown_sessions(sessions).await;
if run_path.exists()
&& let Err(err) = fs::remove_dir_all(run_path)
{
warn!(
path = %run_path.display(),
?err,
"failed to remove run directory after spawn failure"
);
}
}
async fn cleanup_failed_resume(&self, sessions: Vec<SessionCleanup>) {
self.shutdown_sessions(sessions).await;
}
async fn shutdown_sessions(&self, sessions: Vec<SessionCleanup>) {
for session in sessions {
if let Err(err) = session.conversation.submit(Op::Shutdown).await {
warn!(
%session.conversation_id,
?err,
"failed to shutdown session during cleanup"
);
}
let _ = self
.conversation_manager
.remove_conversation(&session.conversation_id)
.await;
}
}
pub async fn post_to_role(
&self,
run_id: &str,
role: &str,
text: impl Into<String>,
final_output_json_schema: Option<Value>,
) -> Result<codex_core::cross_session::TurnHandle> {
let handle = self
.hub
.post_user_turn(PostUserTurnRequest {
target: RoleOrId::RunRole {
run_id: run_id.to_string(),
role: role.to_string(),
},
text: text.into(),
final_output_json_schema,
})
.await?;
Ok(handle)
}
pub async fn await_first_assistant(
&self,
handle: &codex_core::cross_session::TurnHandle,
timeout: Duration,
) -> Result<AssistantMessage> {
let message = self.hub.await_first_assistant(handle, timeout).await?;
Ok(message)
}
pub async fn call_role(
&self,
run_id: &str,
role: &str,
text: impl Into<String>,
timeout: Duration,
final_output_json_schema: Option<Value>,
) -> Result<AssistantMessage> {
let handle = self
.post_to_role(run_id, role, text, final_output_json_schema)
.await?;
self.await_first_assistant(&handle, timeout).await
}
pub async fn relay_assistant_to_role(
&self,
run_id: &str,
target_role: &str,
assistant: &AssistantMessage,
timeout: Duration,
final_output_json_schema: Option<Value>,
) -> Result<AssistantMessage> {
let handle = self
.post_to_role(
run_id,
target_role,
assistant.message.message.clone(),
final_output_json_schema,
)
.await?;
self.await_first_assistant(&handle, timeout).await
}
pub fn stream_events(
&self,
conversation_id: ConversationId,
) -> Result<SessionEventStream, codex_core::cross_session::CrossSessionError> {
self.hub.stream_events(conversation_id)
}
async fn spawn_and_register_role(
&self,
run_id: &str,
run_path: &Path,
role_config: &RoleConfig,
store: &mut RunStore,
cleanup: &mut Vec<SessionCleanup>,
) -> Result<RoleSession> {
let session = self
.spawn_role_session(run_id, run_path, role_config.clone())
.await?;
cleanup.push(SessionCleanup::new(&session));
store.update_rollout_path(&session.role, session.rollout_path.clone())?;
if let Some(path) = role_config.config_path.clone() {
store.set_role_config_path(&session.role, path)?;
}
Ok(session)
}
async fn resume_and_register_role(
&self,
run_id: &str,
run_path: &Path,
role_config: &RoleConfig,
store: &mut RunStore,
cleanup: &mut Vec<SessionCleanup>,
) -> Result<RoleSession> {
let session = self
.resume_role_session(run_id, run_path, role_config, store)
.await?;
cleanup.push(SessionCleanup::new(&session));
store.update_rollout_path(&session.role, session.rollout_path.clone())?;
if let Some(path) = role_config.config_path.clone() {
store.set_role_config_path(&session.role, path)?;
}
Ok(session)
}
async fn spawn_role_session(
&self,
run_id: &str,
run_path: &Path,
role_config: RoleConfig,
) -> Result<RoleSession> {
let RoleConfig {
role, mut config, ..
} = role_config;
config.cwd = run_path.to_path_buf();
prompts::ensure_instructions(&role, &mut config);
let session = self
.conversation_manager
.new_conversation_with_cross_session(
config,
CrossSessionSpawnParams {
hub: Arc::clone(&self.hub),
run_id: Some(run_id.to_string()),
role: Some(role.clone()),
},
)
.await?;
Ok(RoleSession::from_new(role, session))
}
async fn resume_role_session(
&self,
run_id: &str,
run_path: &Path,
role_config: &RoleConfig,
store: &RunStore,
) -> Result<RoleSession> {
let metadata = store
.role_metadata(&role_config.role)
.ok_or_else(|| anyhow!("role {} not found in run metadata", role_config.role))?;
let rollout_path = metadata
.rollout_path
.as_ref()
.ok_or_else(|| anyhow!("missing rollout path for role {}", role_config.role))?;
let mut config = role_config.config.clone();
config.cwd = run_path.to_path_buf();
prompts::ensure_instructions(&role_config.role, &mut config);
let session = self
.conversation_manager
.resume_conversation_with_cross_session(
config,
rollout_path.clone(),
CrossSessionSpawnParams {
hub: Arc::clone(&self.hub),
run_id: Some(run_id.to_string()),
role: Some(role_config.role.clone()),
},
)
.await?;
Ok(RoleSession::from_new(role_config.role.clone(), session))
}
}
fn collect_role_metadata(
solver: &RoleConfig,
director: &RoleConfig,
verifiers: &[RoleConfig],
) -> Vec<RoleMetadata> {
solver_and_director_metadata(solver, director)
.into_iter()
.chain(verifiers.iter().map(|verifier| RoleMetadata {
role: verifier.role.clone(),
rollout_path: None,
config_path: verifier.config_path.clone(),
}))
.collect()
}
fn solver_and_director_metadata(solver: &RoleConfig, director: &RoleConfig) -> Vec<RoleMetadata> {
vec![
RoleMetadata {
role: solver.role.clone(),
rollout_path: None,
config_path: solver.config_path.clone(),
},
RoleMetadata {
role: director.role.clone(),
rollout_path: None,
config_path: director.config_path.clone(),
},
]
}
fn collect_session_cleanup(sessions: &RunSessions) -> Vec<SessionCleanup> {
let mut cleanup = Vec::with_capacity(2 + sessions.verifiers.len());
cleanup.push(SessionCleanup::new(&sessions.solver));
cleanup.push(SessionCleanup::new(&sessions.director));
cleanup.extend(sessions.verifiers.iter().map(SessionCleanup::new));
cleanup
}
fn parse_solver_signal(message: &str) -> Option<SolverSignal> {
let trimmed = message.trim();
if trimmed.is_empty() {
return None;
}
serde_json::from_str(trimmed)
.or_else(|_| {
strip_json_code_fence(trimmed)
.map(|inner| serde_json::from_str(inner.trim()))
.unwrap_or_else(|| Err(serde_json::Error::custom("invalid payload")))
})
.ok()
}
fn strip_json_code_fence(text: &str) -> Option<&str> {
let trimmed = text.trim();
if let Some(rest) = trimmed.strip_prefix("```json") {
return rest.strip_suffix("```").map(str::trim);
}
if let Some(rest) = trimmed.strip_prefix("```JSON") {
return rest.strip_suffix("```").map(str::trim);
}
if let Some(rest) = trimmed.strip_prefix("```") {
return rest.strip_suffix("```").map(str::trim);
}
None
}
fn parse_json_struct<T>(message: &str) -> Result<T>
where
T: DeserializeOwned,
{
let trimmed = message.trim();
if trimmed.is_empty() {
return Err(anyhow!("message was empty"));
}
serde_json::from_str(trimmed)
.or_else(|err| {
strip_json_code_fence(trimmed)
.map(|inner| serde_json::from_str(inner))
.unwrap_or_else(|| Err(err))
})
.map_err(|err| anyhow!(err))
.with_context(|| format!("failed to parse message as {}", type_name::<T>()))
}
fn aggregate_verdicts(items: Vec<(String, VerifierVerdict)>) -> AggregatedVerifierVerdict {
let mut overall = VerifierDecision::Pass;
let mut verdicts = Vec::with_capacity(items.len());
for (role, verdict) in items {
if !verdict.verdict.is_pass() {
overall = VerifierDecision::Fail;
}
verdicts.push(VerifierReport {
role,
verdict: verdict.verdict,
reasons: verdict.reasons,
suggestions: verdict.suggestions,
});
}
AggregatedVerifierVerdict {
kind: "verification_feedback",
overall,
verdicts,
}
}
fn solver_signal_schema() -> Value {
json!({
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["direction_request", "verification_request", "final_delivery"]
},
"prompt": { "type": ["string", "null"] },
"claim_path": { "type": ["string", "null"] },
"notes": { "type": ["string", "null"] },
"deliverable_path": { "type": ["string", "null"] },
"summary": { "type": ["string", "null"] }
},
"required": [
"type",
"prompt",
"claim_path",
"notes",
"deliverable_path",
"summary"
],
"additionalProperties": false
})
}
fn final_delivery_schema() -> Value {
json!({
"type": "object",
"required": ["type", "deliverable_path"],
"properties": {
"type": { "const": "final_delivery" },
"deliverable_path": { "type": "string" },
"summary": { "type": ["string", "null"] }
},
"additionalProperties": false
})
}
fn resolve_deliverable_path(base: &Path, candidate: &str) -> Result<PathBuf> {
let base_abs = base
.canonicalize()
.with_context(|| format!("failed to canonicalize run store {}", base.display()))?;
let candidate_path = Path::new(candidate);
let joined = if candidate_path.is_absolute() {
candidate_path.to_path_buf()
} else {
base_abs.join(candidate_path)
};
let resolved = joined.canonicalize().with_context(|| {
format!(
"failed to canonicalize deliverable path {}",
joined.display()
)
})?;
if !resolved.starts_with(&base_abs) {
bail!(
"deliverable path {} escapes run store {}",
resolved.display(),
base_abs.display()
);
}
Ok(resolved)
}

View File

@@ -0,0 +1,22 @@
use std::path::Path;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::EventMsg;
use crate::signals::AggregatedVerifierVerdict;
use crate::signals::DirectiveResponse;
use crate::signals::VerifierVerdict;
pub trait ProgressReporter: Send + Sync {
fn objective_posted(&self, _objective: &str) {}
fn waiting_for_solver(&self) {}
fn solver_event(&self, _event: &EventMsg) {}
fn solver_agent_message(&self, _message: &AgentMessageEvent) {}
fn direction_request(&self, _prompt: &str) {}
fn director_response(&self, _directive: &DirectiveResponse) {}
fn verification_request(&self, _claim_path: &str, _notes: Option<&str>) {}
fn verifier_verdict(&self, _role: &str, _verdict: &VerifierVerdict) {}
fn verification_summary(&self, _summary: &AggregatedVerifierVerdict) {}
fn final_delivery(&self, _deliverable_path: &Path, _summary: Option<&str>) {}
fn run_interrupted(&self) {}
}

View File

@@ -1,147 +0,0 @@
use codex_core::config::Config;
const SOLVER_PROMPT: &str = r#"# Codex Infty Solver
You are the **Solver** role in a Codex Infty run. Drive the engagement end to end without waiting for humans. Maintain momentum for multi-hour or multi-day efforts.
Responsibilities:
- Understand the objective and break it into a living execution plan. Refine plans with `update_plan` and keep the run store up to date.
- Produce artifacts under `artifacts/`, durable notes under `memory/`, and supporting indexes under `index/`. Prefer `apply_patch` for text edits and use `shell` for other filesystem work.
- When you exit a task or take a dependency on external evidence, write JSON notes in `memory/claims/` that link to the supporting artifacts.
- Run verification steps (tests, linters, proofs) under the sandbox before claiming completion.
Available Codex tools mirror standard Codex sessions (e.g. `shell`, `apply_patch`). Assume all filesystem paths are relative to the current run store directory unless stated otherwise.
## Communication contract
The orchestrator routes your structured messages to the Director or Verifier roles. Respond with **JSON only**—no leading prose or trailing commentary. Wrap JSON in a fenced block only if the agent policy forces it.
- Every reply must populate the full schema, even when a field does not apply. Set unused string fields to `null`.
- Direction request (send to Director):
```json
{"type":"direction_request","prompt":"<concise question or decision>","claim_path":null,"notes":null,"deliverable_path":null,"summary":null}
```
- Verification request (send to Verifier):
```json
{"type":"verification_request","prompt":null,"claim_path":"memory/claims/<file>.json","notes":null,"deliverable_path":null,"summary":null}
```
- Final delivery (after receiving the finalization instruction):
```json
{"type":"final_delivery","prompt":null,"claim_path":null,"notes":null,"deliverable_path":"deliverable/summary.txt","summary":"<answer plus supporting context>"}
```
## Operating rhythm
- Never ask humans for approval to continue; the orchestrator supplies direction via the Director role.
- Create `deliverable/summary.txt` before every final delivery. Capture the final answer, how you reached it, and any follow-up instructions.
- Keep the run resilient to restarts: document intent, intermediate results, and follow-up tasks in `memory/`.
- Prefer concrete evidence (tests, diffs, logs). Link every claim to artifacts or durable notes so the Verifier can reproduce your reasoning.
- On failure feedback from a Verifier, update artifacts/notes/tests, then issue a new verification request referencing the superseding claim.
- When the orchestrator instructs you to finalize, build the `deliverable/` directory exactly as requested, summarise the outcome, and respond with the `final_delivery` JSON."#;
const DIRECTOR_PROMPT: &str = r#"# Codex Infty Director
You are the **Director** role. The Solver routes direction questions to you. Provide crisp guidance that keeps the run aligned with the objective, risks, and verification needs.
Guidelines:
- Read Solver context from the question, referenced notes, and run store artifacts.
- Fill gaps in requirements, adjust strategy, or re-prioritize tasks when the plan drifts.
- Highlight mandatory verification or documentation steps the Solver must complete.
Respond **only** with JSON in this exact shape:
```json
{"directive":"<go/no-go decision or next step>","rationale":"<why this is the right move>"}
```
Keep `directive` actionable and concise. Use `rationale` for supporting detail. Leave `rationale` empty if it adds no value."#;
const VERIFIER_PROMPT: &str = r#"# Codex Infty Verifier
You are a **Verifier**. Assess Solver completion claims objectively.
Process:
1. Inspect the referenced claim JSON and any linked artifacts, tests, or logs inside the run store.
2. Reproduce evidence when feasible (e.g. run tests via `shell`). Exit early if sandbox restrictions apply and explain the limitation.
3. Evaluate correctness, completeness, and policy alignment. Look for missing tests, undocumented gaps, regressions, or unverifiable assertions.
Respond **only** with JSON in this form:
```json
{"verdict":"pass","reasons":[],"suggestions":[]}
```
Use `"fail"` when the claim is not ready. Populate `reasons` with concrete blocking issues. Provide actionable `suggestions` for remediation. Omit entries when not needed.
Do not include extra commentary outside the JSON payload."#;
pub fn ensure_instructions(role: &str, config: &mut Config) {
if config.base_instructions.is_none()
&& let Some(text) = default_instructions_for_role(role)
{
config.base_instructions = Some(text.to_string());
}
}
fn default_instructions_for_role(role: &str) -> Option<&'static str> {
let normalized = role.to_ascii_lowercase();
if normalized == "solver" {
Some(SOLVER_PROMPT)
} else if normalized == "director" {
Some(DIRECTOR_PROMPT)
} else if normalized.starts_with("verifier") {
Some(VERIFIER_PROMPT)
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use core_test_support::load_default_config_for_test;
use tempfile::TempDir;
#[test]
fn provides_prompts_for_known_roles() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = None;
ensure_instructions("solver", &mut config);
assert!(
config
.base_instructions
.as_ref()
.unwrap()
.contains("Codex Infty Solver")
);
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = None;
ensure_instructions("director", &mut config);
assert!(
config
.base_instructions
.as_ref()
.unwrap()
.contains("Codex Infty Director")
);
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = None;
ensure_instructions("verifier-alpha", &mut config);
assert!(
config
.base_instructions
.as_ref()
.unwrap()
.contains("Codex Infty Verifier")
);
}
#[test]
fn does_not_override_existing_instructions() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = Some("custom".to_string());
ensure_instructions("solver", &mut config);
assert_eq!(config.base_instructions.as_deref(), Some("custom"));
}
}

View File

@@ -0,0 +1,15 @@
# Codex Infty Director
You are the **Director** role. The Solver routes direction questions to you. Provide crisp guidance that keeps the run aligned with the objective, risks, and verification needs.
Guidelines:
- Read Solver context from the question, referenced notes, and run store artifacts.
- Fill gaps in requirements, adjust strategy, or re-prioritize tasks when the plan drifts.
- Highlight mandatory verification or documentation steps the Solver must complete.
Respond **only** with JSON in this exact shape:
```json
{"directive":"<go/no-go decision or next step>","rationale":"<why this is the right move>"}
```
Keep `directive` actionable and concise. Use `rationale` for supporting detail. Leave `rationale` empty if it adds no value.

View File

@@ -0,0 +1 @@
pub(crate) const DIRECTOR_PROMPT: &str = include_str!("director.md");

View File

@@ -0,0 +1,85 @@
use codex_core::config::Config;
mod director;
mod solver;
mod verifier;
pub(crate) use director::DIRECTOR_PROMPT;
pub(crate) use solver::SOLVER_PROMPT;
pub(crate) use verifier::VERIFIER_PROMPT;
pub fn ensure_instructions(role: &str, config: &mut Config) {
if config.base_instructions.is_none()
&& let Some(text) = default_instructions_for_role(role)
{
config.base_instructions = Some(text.to_string());
}
}
fn default_instructions_for_role(role: &str) -> Option<&'static str> {
let normalized = role.to_ascii_lowercase();
if normalized == "solver" {
Some(SOLVER_PROMPT)
} else if normalized == "director" {
Some(DIRECTOR_PROMPT)
} else if normalized.starts_with("verifier") {
Some(VERIFIER_PROMPT)
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use core_test_support::load_default_config_for_test;
use tempfile::TempDir;
#[test]
fn provides_prompts_for_known_roles() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = None;
ensure_instructions("solver", &mut config);
assert!(
config
.base_instructions
.as_ref()
.unwrap()
.contains("Codex Infty Solver")
);
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = None;
ensure_instructions("director", &mut config);
assert!(
config
.base_instructions
.as_ref()
.unwrap()
.contains("Codex Infty Director")
);
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = None;
ensure_instructions("verifier-alpha", &mut config);
assert!(
config
.base_instructions
.as_ref()
.unwrap()
.contains("Codex Infty Verifier")
);
}
#[test]
fn does_not_override_existing_instructions() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.base_instructions = Some("custom".to_string());
ensure_instructions("solver", &mut config);
assert_eq!(config.base_instructions.as_deref(), Some("custom"));
}
}

View File

@@ -0,0 +1,36 @@
# Codex Infty Solver
You are the **Solver** role in a Codex Infty run. Drive the engagement end to end without waiting for humans. Maintain momentum for multi-hour or multi-day efforts.
Responsibilities:
- Understand the objective and break it into a living execution plan. Refine plans with `update_plan` and keep the run store up to date.
- Produce artifacts under `artifacts/`, durable notes under `memory/`, and supporting indexes under `index/`. Prefer `apply_patch` for text edits and use `shell` for other filesystem work.
- When you exit a task or take a dependency on external evidence, write JSON notes in `memory/claims/` that link to the supporting artifacts.
- Run verification steps (tests, linters, proofs) under the sandbox before claiming completion.
Available Codex tools mirror standard Codex sessions (e.g. `shell`, `apply_patch`). Assume all filesystem paths are relative to the current run store directory unless stated otherwise.
## Communication contract
The orchestrator routes your structured messages to the Director or Verifier roles. Respond with **JSON only**—no leading prose or trailing commentary. Wrap JSON in a fenced block only if the agent policy forces it.
- Every reply must populate the full schema, even when a field does not apply. Set unused string fields to `null`.
- Direction request (send to Director):
```json
{"type":"direction_request","prompt":"<concise question or decision>","claim_path":null,"notes":null,"deliverable_path":null,"summary":null}
```
- Verification request (send to Verifier):
```json
{"type":"verification_request","prompt":null,"claim_path":"memory/claims/<file>.json","notes":null,"deliverable_path":null,"summary":null}
```
- Final delivery (after receiving the finalization instruction):
```json
{"type":"final_delivery","prompt":null,"claim_path":null,"notes":null,"deliverable_path":"deliverable/summary.txt","summary":"<answer plus supporting context>"}
```
## Operating rhythm
- Never ask humans for approval to continue; the orchestrator supplies direction via the Director role.
- Create `deliverable/summary.txt` before every final delivery. Capture the final answer, how you reached it, and any follow-up instructions.
- Keep the run resilient to restarts: document intent, intermediate results, and follow-up tasks in `memory/`.
- Prefer concrete evidence (tests, diffs, logs). Link every claim to artifacts or durable notes so the Verifier can reproduce your reasoning.
- On failure feedback from a Verifier, update artifacts/notes/tests, then issue a new verification request referencing the superseding claim.
- When the orchestrator instructs you to finalize, build the `deliverable/` directory exactly as requested, summarise the outcome, and respond with the `final_delivery` JSON.

View File

@@ -0,0 +1 @@
pub(crate) const SOLVER_PROMPT: &str = include_str!("solver.md");

View File

@@ -0,0 +1,16 @@
# Codex Infty Verifier
You are a **Verifier**. Assess Solver completion claims objectively.
Process:
1. Inspect the referenced claim JSON and any linked artifacts, tests, or logs inside the run store.
2. Reproduce evidence when feasible (e.g. run tests via `shell`). Exit early if sandbox restrictions apply and explain the limitation.
3. Evaluate correctness, completeness, and policy alignment. Look for missing tests, undocumented gaps, regressions, or unverifiable assertions.
Respond **only** with JSON in this form:
```json
{"verdict":"pass","reasons":[],"suggestions":[]}
```
Use `"fail"` when the claim is not ready. Populate `reasons` with concrete blocking issues. Provide actionable `suggestions` for remediation. Omit entries when not needed.
Do not include extra commentary outside the JSON payload.

View File

@@ -0,0 +1 @@
pub(crate) const VERIFIER_PROMPT: &str = include_str!("verifier.md");

View File

@@ -0,0 +1,49 @@
use serde::Deserialize;
use serde::Serialize;
#[derive(Debug, Deserialize, Serialize)]
pub struct DirectiveResponse {
pub directive: String,
#[serde(default)]
pub rationale: Option<String>,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum VerifierDecision {
Pass,
Fail,
}
impl VerifierDecision {
pub fn is_pass(self) -> bool {
matches!(self, VerifierDecision::Pass)
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct VerifierVerdict {
pub verdict: VerifierDecision,
#[serde(default)]
pub reasons: Vec<String>,
#[serde(default)]
pub suggestions: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct VerifierReport {
pub role: String,
pub verdict: VerifierDecision,
#[serde(default)]
pub reasons: Vec<String>,
#[serde(default)]
pub suggestions: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct AggregatedVerifierVerdict {
#[serde(rename = "type")]
pub kind: &'static str,
pub overall: VerifierDecision,
pub verdicts: Vec<VerifierReport>,
}

View File

@@ -0,0 +1,108 @@
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use codex_core::CodexConversation;
use codex_core::NewConversation;
use codex_core::config::Config;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_protocol::ConversationId;
pub(crate) const DEFAULT_DIRECTOR_TIMEOUT: Duration = Duration::from_secs(120);
pub(crate) const DEFAULT_VERIFIER_TIMEOUT: Duration = Duration::from_secs(180);
pub(crate) const FINALIZATION_PROMPT: &str = "Create deliverable/: include compiled artifacts or scripts, usage docs, and tests. Write deliverable/summary.txt capturing the final answer, evidence, and follow-up steps. Also provide deliverable/README.md with overview, manifest (paths and sizes), verification steps, and limitations. Remove scratch files. Reply with JSON: {\"type\":\"final_delivery\",\"deliverable_path\":\"deliverable/summary.txt\",\"summary\":\"<answer plus supporting context>\"}.";
#[derive(Clone)]
pub struct RoleConfig {
pub role: String,
pub config: Config,
pub config_path: Option<PathBuf>,
}
impl RoleConfig {
pub fn new(role: impl Into<String>, mut config: Config) -> Self {
config.sandbox_policy = SandboxPolicy::DangerFullAccess;
config.approval_policy = AskForApproval::Never;
Self {
role: role.into(),
config,
config_path: None,
}
}
pub fn with_path(role: impl Into<String>, config: Config, config_path: PathBuf) -> Self {
Self {
role: role.into(),
config,
config_path: Some(config_path),
}
}
}
pub struct RunParams {
pub run_id: String,
pub run_root: Option<PathBuf>,
pub solver: RoleConfig,
pub director: RoleConfig,
pub verifiers: Vec<RoleConfig>,
}
pub struct ResumeParams {
pub run_path: PathBuf,
pub solver: RoleConfig,
pub director: RoleConfig,
pub verifiers: Vec<RoleConfig>,
}
#[derive(Clone)]
pub struct RunExecutionOptions {
pub objective: Option<String>,
pub director_timeout: Duration,
pub verifier_timeout: Duration,
}
impl Default for RunExecutionOptions {
fn default() -> Self {
Self {
objective: None,
director_timeout: DEFAULT_DIRECTOR_TIMEOUT,
verifier_timeout: DEFAULT_VERIFIER_TIMEOUT,
}
}
}
pub struct RunOutcome {
pub run_id: String,
pub deliverable_path: PathBuf,
pub summary: Option<String>,
pub raw_message: String,
}
pub struct RoleSession {
pub role: String,
pub conversation_id: ConversationId,
pub conversation: Arc<CodexConversation>,
pub session_configured: codex_core::protocol::SessionConfiguredEvent,
pub rollout_path: PathBuf,
}
impl RoleSession {
pub(crate) fn from_new(role: String, session: NewConversation) -> Self {
Self {
role,
conversation_id: session.conversation_id,
conversation: session.conversation,
session_configured: session.session_configured.clone(),
rollout_path: session.session_configured.rollout_path.clone(),
}
}
}
pub struct RunSessions {
pub run_id: String,
pub solver: RoleSession,
pub director: RoleSession,
pub verifiers: Vec<RoleSession>,
pub store: crate::RunStore,
}