[codex-analytics] add queued submission metadata

This commit is contained in:
rhan-oai
2026-04-06 12:26:33 -07:00
parent 472c15a74b
commit 59cfda78e2
58 changed files with 586 additions and 7 deletions

View File

@@ -155,6 +155,41 @@ pub(crate) async fn wait_for_analytics_event(
.await?
}
pub(crate) async fn wait_for_analytics_turn_event(
server: &MockServer,
read_timeout: Duration,
turn_id: &str,
) -> Result<Value> {
timeout(read_timeout, async {
loop {
let Some(requests) = server.received_requests().await else {
tokio::time::sleep(Duration::from_millis(25)).await;
continue;
};
for request in &requests {
if request.method != "POST"
|| request.url.path() != "/codex/analytics-events/events"
{
continue;
}
let payload: Value = serde_json::from_slice(&request.body)
.map_err(|err| anyhow::anyhow!("invalid analytics payload: {err}"))?;
let Some(events) = payload["events"].as_array() else {
continue;
};
if let Some(event) = events.iter().find(|event| {
event["event_type"] == "codex_turn_event"
&& event["event_params"]["turn_id"] == turn_id
}) {
return Ok::<Value, anyhow::Error>(event.clone());
}
}
tokio::time::sleep(Duration::from_millis(25)).await;
}
})
.await?
}
pub(crate) fn thread_initialized_event(payload: &Value) -> Result<&Value> {
let events = payload["events"]
.as_array()

View File

@@ -55,6 +55,7 @@ use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::SubmissionType;
use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
@@ -68,6 +69,7 @@ use tokio::time::timeout;
use super::analytics::enable_analytics_capture;
use super::analytics::wait_for_analytics_event;
use super::analytics::wait_for_analytics_turn_event;
#[cfg(windows)]
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25);
@@ -304,7 +306,9 @@ async fn turn_start_tracks_turn_event_analytics() -> Result<()> {
assert_eq!(event["event_params"]["model"], "mock-model");
assert_eq!(event["event_params"]["model_provider"], "mock_provider");
assert_eq!(event["event_params"]["sandbox_policy"], "read_only");
assert_eq!(event["event_params"]["submission_type"], "default");
assert_eq!(event["event_params"]["num_input_images"], 1);
assert_eq!(event["event_params"]["is_first_turn"], true);
assert_eq!(event["event_params"]["status"], "completed");
assert!(event["event_params"]["started_at"].as_u64().is_some());
assert!(event["event_params"]["completed_at"].as_u64().is_some());
@@ -318,6 +322,106 @@ async fn turn_start_tracks_turn_event_analytics() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn turn_start_tracks_second_turn_as_not_first_and_queued_submission_analytics() -> Result<()>
{
let responses = vec![
create_final_assistant_message_sse_response("Done 1")?,
create_final_assistant_message_sse_response("Done 2")?,
];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
write_mock_responses_config_toml_with_chatgpt_base_url(
codex_home.path(),
&server.uri(),
&server.uri(),
)?;
enable_analytics_capture(&server, codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let first_turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "first turn".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let first_turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_req)),
)
.await??;
let TurnStartResponse { turn: first_turn } = to_response::<TurnStartResponse>(first_turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let first_turn_event =
wait_for_analytics_turn_event(&server, DEFAULT_READ_TIMEOUT, &first_turn.id).await?;
assert_eq!(
first_turn_event["event_params"]["submission_type"],
"default"
);
assert_eq!(first_turn_event["event_params"]["is_first_turn"], true);
let second_turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![V2UserInput::Text {
text: "second turn".to_string(),
text_elements: Vec::new(),
}],
submission_type: Some(SubmissionType::PromptQueued),
..Default::default()
})
.await?;
let second_turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_req)),
)
.await??;
let TurnStartResponse { turn: second_turn } =
to_response::<TurnStartResponse>(second_turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let second_turn_event =
wait_for_analytics_turn_event(&server, DEFAULT_READ_TIMEOUT, &second_turn.id).await?;
assert_eq!(
second_turn_event["event_params"]["submission_type"],
"queued"
);
assert_eq!(second_turn_event["event_params"]["is_first_turn"], false);
Ok(())
}
#[tokio::test]
async fn turn_start_tracks_failed_turn_event_analytics() -> Result<()> {
let server = create_mock_responses_server_sequence(vec![String::new()]).await;
@@ -1539,6 +1643,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
summary: Some(ReasoningSummary::Auto),
service_tier: None,
personality: None,
submission_type: None,
output_schema: None,
collaboration_mode: None,
})
@@ -1572,6 +1677,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
summary: Some(ReasoningSummary::Auto),
service_tier: None,
personality: None,
submission_type: None,
output_schema: None,
collaboration_mode: None,
})