Compare commits

...

9 Commits

Author SHA1 Message Date
Ahmed Ibrahim
3aaaed3c48 codex: address PR review feedback (#17178)
Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:46:37 -07:00
Ahmed Ibrahim
73da395dd1 codex: stabilize realtime call-create assertions on latest main (#17178)
Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:25:11 -07:00
Ahmed Ibrahim
015c686e95 codex: fix CI failure on PR #17178
Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:23:30 -07:00
Ahmed Ibrahim
8a1f01313f codex: address PR review feedback (#17178)
Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:23:29 -07:00
Ahmed Ibrahim
4b1cc3ba5b codex: address PR review feedback (#17178)
- honor realtime model pins from base URL query params too
- add regression coverage for base-url model selection

Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:23:03 -07:00
Ahmed Ibrahim
3185ee47fb codex: address PR review feedback (#17178)
- preserve provider-pinned realtime models on v2 starts
- add regression coverage for provider model query params

Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:22:30 -07:00
Ahmed Ibrahim
40c63351ae codex: address PR review feedback (#17178)
- reuse the realtime session serializer in the v1 regression test

Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:22:30 -07:00
Ahmed Ibrahim
251d0d8ef7 codex: address PR review feedback (#17178)
- keep the gpt-realtime-1.5 fallback scoped to realtime v2
- add a v1 regression test to preserve provider-selected models

Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:22:30 -07:00
Ahmed Ibrahim
212a798f98 Default realtime start to v2 and gpt-realtime-1.5
- switch realtime start defaults to v2 and gpt-realtime-1.5
- add core integration coverage for websocket and webrtc start paths

Co-authored-by: Codex <noreply@openai.com>
2026-04-08 20:22:30 -07:00
4 changed files with 490 additions and 33 deletions

View File

@@ -5,6 +5,10 @@ use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::create_shell_command_sse_response;
use app_test_support::to_response;
use codex_api::RealtimeEventParser;
use codex_api::RealtimeSessionConfig;
use codex_api::RealtimeSessionMode;
use codex_api::session_update_session_json;
use codex_app_server_protocol::CommandExecutionStatus;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
@@ -962,35 +966,20 @@ async fn realtime_webrtc_start_emits_sdp_notification() -> Result<()> {
"unexpected close reason: {closed_notification:?}"
);
let request = call_capture.single_request();
assert_eq!(request.url.path(), "/v1/realtime/calls");
assert_eq!(request.url.query(), None);
assert_eq!(
request
.headers
.get("content-type")
.and_then(|value| value.to_str().ok()),
Some("multipart/form-data; boundary=codex-realtime-call-boundary")
);
let body = String::from_utf8(request.body).context("multipart body should be utf-8")?;
let session = r#"{"tool_choice":"auto","type":"realtime","instructions":"backend prompt\n\nstartup context","output_modalities":["audio"],"audio":{"input":{"format":{"type":"audio/pcm","rate":24000},"noise_reduction":{"type":"near_field"},"turn_detection":{"type":"server_vad","interrupt_response":true,"create_response":true}},"output":{"format":{"type":"audio/pcm","rate":24000},"voice":"marin"}},"tools":[{"type":"function","name":"codex","description":"Delegate a request to Codex and return the final result to the user. Use this as the default action. If the user asks to do something next, later, after this, or once current work finishes, call this tool so the work is actually queued instead of merely promising to do it later.","parameters":{"type":"object","properties":{"prompt":{"type":"string","description":"The user request to delegate to Codex."}},"required":["prompt"],"additionalProperties":false}}]}"#;
assert_eq!(
body,
format!(
"--codex-realtime-call-boundary\r\n\
Content-Disposition: form-data; name=\"sdp\"\r\n\
Content-Type: application/sdp\r\n\
\r\n\
v=offer\r\n\
\r\n\
--codex-realtime-call-boundary\r\n\
Content-Disposition: form-data; name=\"session\"\r\n\
Content-Type: application/json\r\n\
\r\n\
{session}\r\n\
--codex-realtime-call-boundary--\r\n"
)
);
let mut session = session_update_session_json(RealtimeSessionConfig {
instructions: "backend prompt".to_string(),
model: Some("gpt-realtime-1.5".to_string()),
session_id: Some("ignored".to_string()),
event_parser: RealtimeEventParser::RealtimeV2,
session_mode: RealtimeSessionMode::Conversational,
voice: RealtimeVoice::Marin,
})?;
session
.as_object_mut()
.expect("session should be an object")
.remove("id");
let session = serde_json::to_string(&session).expect("session should serialize");
assert_call_create_multipart(call_capture.single_request(), "v=offer\r\n", &session)?;
realtime_server.shutdown().await;
Ok(())

View File

@@ -55,12 +55,14 @@ use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::warn;
use url::Url;
const AUDIO_IN_QUEUE_CAPACITY: usize = 256;
const USER_TEXT_IN_QUEUE_CAPACITY: usize = 64;
const HANDOFF_OUT_QUEUE_CAPACITY: usize = 64;
const OUTPUT_EVENTS_QUEUE_CAPACITY: usize = 256;
const REALTIME_STARTUP_CONTEXT_TOKEN_BUDGET: usize = 5_000;
const DEFAULT_REALTIME_MODEL: &str = "gpt-realtime-1.5";
const ACTIVE_RESPONSE_CONFLICT_ERROR_PREFIX: &str =
"Conversation already has an active response in progress:";
@@ -554,6 +556,7 @@ pub(crate) async fn build_realtime_session_config(
voice: Option<RealtimeVoice>,
) -> CodexResult<RealtimeSessionConfig> {
let config = sess.get_config().await;
let provider = sess.provider().await;
let prompt = prepare_realtime_backend_prompt(
prompt,
config.experimental_realtime_ws_backend_prompt.clone(),
@@ -572,7 +575,20 @@ pub(crate) async fn build_realtime_session_config(
(false, true) => prompt,
(false, false) => format!("{prompt}\n\n{startup_context}"),
};
let model = config.experimental_realtime_ws_model.clone();
let base_url_pins_model = config
.experimental_realtime_ws_base_url
.as_deref()
.or(provider.base_url.as_deref())
.is_some_and(realtime_base_url_pins_model);
let provider_pins_model = provider
.query_params
.as_ref()
.is_some_and(|query_params| query_params.contains_key("model"))
|| base_url_pins_model;
let model = config.experimental_realtime_ws_model.clone().or_else(|| {
(config.realtime.version == RealtimeWsVersion::V2 && !provider_pins_model)
.then(|| DEFAULT_REALTIME_MODEL.to_string())
});
let event_parser = match config.realtime.version {
RealtimeWsVersion::V1 => RealtimeEventParser::V1,
RealtimeWsVersion::V2 => RealtimeEventParser::RealtimeV2,
@@ -603,6 +619,10 @@ fn default_realtime_voice(version: RealtimeWsVersion) -> RealtimeVoice {
}
}
fn realtime_base_url_pins_model(base_url: &str) -> bool {
Url::parse(base_url).is_ok_and(|url| url.query_pairs().any(|(key, _)| key == "model"))
}
fn validate_realtime_voice(version: RealtimeWsVersion, voice: RealtimeVoice) -> CodexResult<()> {
let voices = RealtimeVoicesList::builtin();
let allowed = match version {

View File

@@ -1,6 +1,10 @@
use anyhow::Context;
use anyhow::Result;
use chrono::Utc;
use codex_api::RealtimeEventParser;
use codex_api::RealtimeSessionConfig;
use codex_api::RealtimeSessionMode;
use codex_api::session_update_session_json;
use codex_config::config_toml::RealtimeWsVersion;
use codex_login::CodexAuth;
use codex_login::OPENAI_API_KEY_ENV_VAR;
@@ -35,6 +39,7 @@ use core_test_support::wait_for_event_match;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::collections::HashMap;
use std::fs;
use std::process::Command;
use std::sync::Arc;
@@ -257,7 +262,7 @@ async fn conversation_start_audio_text_close_round_trip() -> Result<()> {
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation start failed: {err:?}"));
assert!(started.session_id.is_some());
assert_eq!(started.version, RealtimeConversationVersion::V1);
assert_eq!(started.version, RealtimeConversationVersion::V2);
let session_updated = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationRealtime(RealtimeConversationRealtimeEvent {
@@ -324,7 +329,7 @@ async fn conversation_start_audio_text_close_round_trip() -> Result<()> {
);
assert_eq!(
server.handshakes()[1].uri(),
"/v1/realtime?intent=quicksilver&model=realtime-test-model"
"/v1/realtime?model=realtime-test-model"
);
let mut request_types = [
connection[1].body_json()["type"]
@@ -360,6 +365,344 @@ async fn conversation_start_audio_text_close_round_trip() -> Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_start_defaults_to_v2_and_gpt_realtime_1_5() -> Result<()> {
skip_if_no_network!(Ok(()));
let api_server = start_mock_server().await;
let realtime_server = start_websocket_server(vec![
vec![],
vec![vec![json!({
"type": "session.updated",
"session": { "id": "sess_default", "instructions": "backend prompt" }
})]],
])
.await;
let realtime_ws_base_url = realtime_server.uri().to_string();
let mut builder = test_codex().with_config(move |config| {
config.experimental_realtime_ws_backend_prompt = Some("backend prompt".to_string());
config.experimental_realtime_ws_base_url = Some(realtime_ws_base_url);
config.model_provider.supports_websockets = true;
});
let test = builder.build(&api_server).await?;
assert!(
realtime_server
.wait_for_handshakes(/*expected*/ 1, Duration::from_secs(2))
.await
);
test.codex
.submit(Op::RealtimeConversationStart(ConversationStartParams {
prompt: Some(Some("backend prompt".to_string())),
session_id: None,
transport: None,
}))
.await?;
let started = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationStarted(started) => Some(Ok(started.clone())),
EventMsg::Error(err) => Some(Err(err.clone())),
_ => None,
})
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation start failed: {err:?}"));
assert_eq!(started.version, RealtimeConversationVersion::V2);
let session_updated = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationRealtime(RealtimeConversationRealtimeEvent {
payload: RealtimeEvent::SessionUpdated { session_id, .. },
}) => Some(session_id.clone()),
_ => None,
})
.await;
assert_eq!(session_updated, "sess_default");
let connections = realtime_server.connections();
assert_eq!(connections.len(), 2);
assert_eq!(
realtime_server.handshakes()[1].uri(),
"/v1/realtime?model=gpt-realtime-1.5"
);
let expected_session_update = json!({
"type": "session.update",
"session": session_update_session_json(RealtimeSessionConfig {
instructions: "backend prompt".to_string(),
model: Some("gpt-realtime-1.5".to_string()),
session_id: started.session_id.clone(),
event_parser: RealtimeEventParser::RealtimeV2,
session_mode: RealtimeSessionMode::Conversational,
voice: RealtimeVoice::Marin,
})?
});
assert_eq!(connections[1][0].body_json(), expected_session_update);
test.codex.submit(Op::RealtimeConversationClose).await?;
let _closed = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationClosed(closed) => Some(closed.clone()),
_ => None,
})
.await;
realtime_server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_start_v1_without_model_override_preserves_provider_default() -> Result<()> {
skip_if_no_network!(Ok(()));
let api_server = start_mock_server().await;
let realtime_server = start_websocket_server(vec![
vec![],
vec![vec![json!({
"type": "session.updated",
"session": { "id": "sess_v1_default", "instructions": "backend prompt" }
})]],
])
.await;
let realtime_ws_base_url = realtime_server.uri().to_string();
let mut builder = test_codex().with_config(move |config| {
config.experimental_realtime_ws_backend_prompt = Some("backend prompt".to_string());
config.experimental_realtime_ws_base_url = Some(realtime_ws_base_url);
config.model_provider.supports_websockets = true;
config.realtime.version = RealtimeWsVersion::V1;
});
let test = builder.build(&api_server).await?;
assert!(
realtime_server
.wait_for_handshakes(/*expected*/ 1, Duration::from_secs(2))
.await
);
test.codex
.submit(Op::RealtimeConversationStart(ConversationStartParams {
prompt: Some(Some("backend prompt".to_string())),
session_id: None,
transport: None,
}))
.await?;
let started = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationStarted(started) => Some(Ok(started.clone())),
EventMsg::Error(err) => Some(Err(err.clone())),
_ => None,
})
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation start failed: {err:?}"));
assert_eq!(started.version, RealtimeConversationVersion::V1);
let session_updated = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationRealtime(RealtimeConversationRealtimeEvent {
payload: RealtimeEvent::SessionUpdated { session_id, .. },
}) => Some(session_id.clone()),
_ => None,
})
.await;
assert_eq!(session_updated, "sess_v1_default");
let connections = realtime_server.connections();
assert_eq!(connections.len(), 2);
assert_eq!(
realtime_server.handshakes()[1].uri(),
"/v1/realtime?intent=quicksilver"
);
let expected_session_update = json!({
"type": "session.update",
"session": session_update_session_json(RealtimeSessionConfig {
instructions: "backend prompt".to_string(),
model: None,
session_id: started.session_id.clone(),
event_parser: RealtimeEventParser::V1,
session_mode: RealtimeSessionMode::Conversational,
voice: RealtimeVoice::Cove,
})?
});
assert_eq!(connections[1][0].body_json(), expected_session_update);
test.codex.submit(Op::RealtimeConversationClose).await?;
let _closed = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationClosed(closed) => Some(closed.clone()),
_ => None,
})
.await;
realtime_server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_start_v2_with_provider_model_preserves_provider_default() -> Result<()> {
skip_if_no_network!(Ok(()));
let api_server = start_mock_server().await;
let realtime_server = start_websocket_server(vec![
vec![],
vec![vec![json!({
"type": "session.updated",
"session": { "id": "sess_v2_provider", "instructions": "backend prompt" }
})]],
])
.await;
let realtime_ws_base_url = realtime_server.uri().to_string();
let mut builder = test_codex().with_config(move |config| {
config.experimental_realtime_ws_backend_prompt = Some("backend prompt".to_string());
config.experimental_realtime_ws_base_url = Some(realtime_ws_base_url);
config.model_provider.supports_websockets = true;
config.model_provider.query_params = Some(HashMap::from([(
"model".to_string(),
"provider-realtime-model".to_string(),
)]));
});
let test = builder.build(&api_server).await?;
assert!(
realtime_server
.wait_for_handshakes(/*expected*/ 1, Duration::from_secs(2))
.await
);
test.codex
.submit(Op::RealtimeConversationStart(ConversationStartParams {
prompt: Some(Some("backend prompt".to_string())),
session_id: None,
transport: None,
}))
.await?;
let started = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationStarted(started) => Some(Ok(started.clone())),
EventMsg::Error(err) => Some(Err(err.clone())),
_ => None,
})
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation start failed: {err:?}"));
assert_eq!(started.version, RealtimeConversationVersion::V2);
let session_updated = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationRealtime(RealtimeConversationRealtimeEvent {
payload: RealtimeEvent::SessionUpdated { session_id, .. },
}) => Some(session_id.clone()),
_ => None,
})
.await;
assert_eq!(session_updated, "sess_v2_provider");
let connections = realtime_server.connections();
assert_eq!(connections.len(), 2);
assert_eq!(
realtime_server.handshakes()[1].uri(),
"/v1/realtime?model=provider-realtime-model"
);
let expected_session_update = json!({
"type": "session.update",
"session": session_update_session_json(RealtimeSessionConfig {
instructions: "backend prompt".to_string(),
model: None,
session_id: started.session_id.clone(),
event_parser: RealtimeEventParser::RealtimeV2,
session_mode: RealtimeSessionMode::Conversational,
voice: RealtimeVoice::Marin,
})?
});
assert_eq!(connections[1][0].body_json(), expected_session_update);
test.codex.submit(Op::RealtimeConversationClose).await?;
let _closed = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationClosed(closed) => Some(closed.clone()),
_ => None,
})
.await;
realtime_server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_start_v2_with_base_url_model_preserves_provider_default() -> Result<()> {
skip_if_no_network!(Ok(()));
let api_server = start_mock_server().await;
let realtime_server = start_websocket_server(vec![
vec![],
vec![vec![json!({
"type": "session.updated",
"session": { "id": "sess_v2_base_url", "instructions": "backend prompt" }
})]],
])
.await;
let realtime_ws_base_url =
format!("{}/v1?model=base-url-realtime-model", realtime_server.uri());
let mut builder = test_codex().with_config(move |config| {
config.experimental_realtime_ws_backend_prompt = Some("backend prompt".to_string());
config.experimental_realtime_ws_base_url = Some(realtime_ws_base_url);
config.model_provider.supports_websockets = true;
});
let test = builder.build(&api_server).await?;
assert!(
realtime_server
.wait_for_handshakes(/*expected*/ 1, Duration::from_secs(2))
.await
);
test.codex
.submit(Op::RealtimeConversationStart(ConversationStartParams {
prompt: Some(Some("backend prompt".to_string())),
session_id: None,
transport: None,
}))
.await?;
let started = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationStarted(started) => Some(Ok(started.clone())),
EventMsg::Error(err) => Some(Err(err.clone())),
_ => None,
})
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation start failed: {err:?}"));
assert_eq!(started.version, RealtimeConversationVersion::V2);
let session_updated = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationRealtime(RealtimeConversationRealtimeEvent {
payload: RealtimeEvent::SessionUpdated { session_id, .. },
}) => Some(session_id.clone()),
_ => None,
})
.await;
assert_eq!(session_updated, "sess_v2_base_url");
let connections = realtime_server.connections();
assert_eq!(connections.len(), 2);
assert_eq!(
realtime_server.handshakes()[1].uri(),
"/v1/realtime?model=base-url-realtime-model"
);
let expected_session_update = json!({
"type": "session.update",
"session": session_update_session_json(RealtimeSessionConfig {
instructions: "backend prompt".to_string(),
model: None,
session_id: started.session_id.clone(),
event_parser: RealtimeEventParser::RealtimeV2,
session_mode: RealtimeSessionMode::Conversational,
voice: RealtimeVoice::Marin,
})?
});
assert_eq!(connections[1][0].body_json(), expected_session_update);
test.codex.submit(Op::RealtimeConversationClose).await?;
let _closed = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationClosed(closed) => Some(closed.clone()),
_ => None,
})
.await;
realtime_server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_webrtc_start_posts_generated_session() -> Result<()> {
skip_if_no_network!(Ok(()));
@@ -505,6 +848,111 @@ async fn conversation_webrtc_start_posts_generated_session() -> Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_webrtc_start_defaults_to_gpt_realtime_1_5() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let capture = RealtimeCallRequestCapture::new();
Mock::given(method("POST"))
.and(path_regex(".*/realtime/calls$"))
.and(capture.clone())
.respond_with(
ResponseTemplate::new(200)
.insert_header("Location", "/v1/realtime/calls/calls/rtc_core_default")
.set_body_string("v=answer\r\n"),
)
.mount(&server)
.await;
let realtime_server = start_websocket_server_with_headers(vec![WebSocketConnectionConfig {
requests: vec![vec![json!({
"type": "session.updated",
"session": { "id": "sess_default_webrtc", "instructions": "backend prompt" }
})]],
response_headers: Vec::new(),
accept_delay: None,
close_after_requests: false,
}])
.await;
let realtime_ws_base_url = realtime_server.uri().to_string();
let mut builder = test_codex().with_config(move |config| {
config.experimental_realtime_ws_backend_prompt = Some("backend prompt".to_string());
config.experimental_realtime_ws_base_url = Some(realtime_ws_base_url);
});
let test = builder.build(&server).await?;
test.codex
.submit(Op::RealtimeConversationStart(ConversationStartParams {
prompt: Some(Some("backend prompt".to_string())),
session_id: None,
transport: Some(ConversationStartTransport::Webrtc {
sdp: "v=offer\r\n".to_string(),
}),
}))
.await?;
let started = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationStarted(started) => Some(Ok(started.clone())),
EventMsg::Error(err) => Some(Err(err.clone())),
_ => None,
})
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation start failed: {err:?}"));
assert_eq!(started.version, RealtimeConversationVersion::V2);
let _created = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationSdp(created) => Some(Ok(created.clone())),
EventMsg::Error(err) => Some(Err(err.clone())),
_ => None,
})
.await
.unwrap_or_else(|err: ErrorEvent| panic!("conversation call create failed: {err:?}"));
let request = capture.single_request();
let body = String::from_utf8(request.body).context("multipart body should be utf-8")?;
let mut session = session_update_session_json(RealtimeSessionConfig {
instructions: "backend prompt".to_string(),
model: Some("gpt-realtime-1.5".to_string()),
session_id: Some("ignored".to_string()),
event_parser: RealtimeEventParser::RealtimeV2,
session_mode: RealtimeSessionMode::Conversational,
voice: RealtimeVoice::Marin,
})?;
session
.as_object_mut()
.expect("session should be an object")
.remove("id");
let session = serde_json::to_string(&session).expect("session should serialize");
assert_eq!(
body,
format!(
"--codex-realtime-call-boundary\r\n\
Content-Disposition: form-data; name=\"sdp\"\r\n\
Content-Type: application/sdp\r\n\
\r\n\
v=offer\r\n\
\r\n\
--codex-realtime-call-boundary\r\n\
Content-Disposition: form-data; name=\"session\"\r\n\
Content-Type: application/json\r\n\
\r\n\
{session}\r\n\
--codex-realtime-call-boundary--\r\n"
)
);
test.codex.submit(Op::RealtimeConversationClose).await?;
let _closed = wait_for_event_match(&test.codex, |msg| match msg {
EventMsg::RealtimeConversationClosed(closed) => Some(closed.clone()),
_ => None,
})
.await;
realtime_server.shutdown().await;
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn conversation_start_uses_openai_env_key_fallback_with_chatgpt_auth() -> Result<()> {
if std::env::var_os(REALTIME_CONVERSATION_TEST_SUBPROCESS_ENV_VAR).is_none() {

View File

@@ -1649,8 +1649,8 @@ pub struct HookCompletedEvent {
#[derive(Debug, Clone, Copy, Default, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum RealtimeConversationVersion {
#[default]
V1,
#[default]
V2,
}