chore: nuke chat/completions API (#10157)

This commit is contained in:
jif-oai
2026-02-03 11:31:57 +00:00
committed by GitHub
parent 9257d8451c
commit d2394a2494
49 changed files with 268 additions and 2931 deletions

View File

@@ -4,15 +4,12 @@ use codex_core::auth::CODEX_API_KEY_ENV_VAR;
use codex_core::protocol::GitInfo;
use codex_utils_cargo_bin::find_resource;
use core_test_support::fs_wait;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use std::time::Duration;
use tempfile::TempDir;
use uuid::Uuid;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
fn repo_root() -> std::path::PathBuf {
#[expect(clippy::expect_used)]
@@ -24,41 +21,28 @@ fn cli_responses_fixture() -> std::path::PathBuf {
find_resource!("tests/cli_responses_fixture.sse").expect("failed to resolve fixture path")
}
/// Tests streaming chat completions through the CLI using a mock server.
/// This test:
/// 1. Sets up a mock server that simulates OpenAI's chat completions API
/// 2. Configures codex to use this mock server via a custom provider
/// 3. Sends a simple "hello?" prompt and verifies the streamed response
/// 4. Ensures the response is received exactly once and contains "hi"
/// Tests streaming the Responses API through the CLI using a mock server.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn chat_mode_stream_cli() {
async fn responses_mode_stream_cli() {
skip_if_no_network!();
let server = MockServer::start().await;
let repo_root = repo_root();
let sse = concat!(
"data: {\"choices\":[{\"delta\":{\"content\":\"hi\"}}]}\n\n",
"data: {\"choices\":[{\"delta\":{}}]}\n\n",
"data: [DONE]\n\n"
);
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream"),
)
.expect(1)
.mount(&server)
.await;
let sse = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "hi"),
responses::ev_completed("resp-1"),
]);
let resp_mock = responses::mount_sse_once(&server, sse).await;
let home = TempDir::new().unwrap();
let provider_override = format!(
"model_providers.mock={{ name = \"mock\", base_url = \"{}/v1\", env_key = \"PATH\", wire_api = \"chat\" }}",
"model_providers.mock={{ name = \"mock\", base_url = \"{}/v1\", env_key = \"PATH\", wire_api = \"responses\" }}",
server.uri()
);
let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap();
let mut cmd = AssertCommand::new(bin);
cmd.timeout(Duration::from_secs(30));
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
@@ -81,7 +65,8 @@ async fn chat_mode_stream_cli() {
let hi_lines = stdout.lines().filter(|line| line.trim() == "hi").count();
assert_eq!(hi_lines, 1, "Expected exactly one line with 'hi'");
server.verify().await;
let request = resp_mock.single_request();
assert_eq!(request.path(), "/v1/responses");
// Verify a new session rollout was created and is discoverable via list_conversations
let provider_filter = vec!["mock".to_string()];

View File

@@ -329,200 +329,6 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[serial(mcp_test_value)]
async fn stdio_image_completions_round_trip() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let call_id = "img-cc-1";
let server_name = "rmcp";
let tool_name = format!("mcp__{server_name}__image");
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"type": "function",
"function": {"name": tool_name, "arguments": "{}"}
}
]
},
"finish_reason": "tool_calls"
}
]
});
let sse_tool_call = format!(
"data: {}\n\ndata: [DONE]\n\n",
serde_json::to_string(&tool_call)?
);
let final_assistant = json!({
"choices": [
{
"delta": {"content": "rmcp image tool completed successfully."},
"finish_reason": "stop"
}
]
});
let sse_final = format!(
"data: {}\n\ndata: [DONE]\n\n",
serde_json::to_string(&final_assistant)?
);
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
struct ChatSeqResponder {
num_calls: AtomicUsize,
bodies: Vec<String>,
}
impl wiremock::Respond for ChatSeqResponder {
fn respond(&self, _: &wiremock::Request) -> wiremock::ResponseTemplate {
let idx = self.num_calls.fetch_add(1, Ordering::SeqCst);
match self.bodies.get(idx) {
Some(body) => wiremock::ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_string(body.clone()),
None => panic!("no chat completion response for index {idx}"),
}
}
}
let chat_seq = ChatSeqResponder {
num_calls: AtomicUsize::new(0),
bodies: vec![sse_tool_call, sse_final],
};
wiremock::Mock::given(wiremock::matchers::method("POST"))
.and(wiremock::matchers::path("/v1/chat/completions"))
.respond_with(chat_seq)
.expect(2)
.mount(&server)
.await;
let rmcp_test_server_bin = stdio_server_bin()?;
let fixture = test_codex()
.with_config(move |config| {
config.model_provider.wire_api = codex_core::WireApi::Chat;
let mut servers = config.mcp_servers.get().clone();
servers.insert(
server_name.to_string(),
McpServerConfig {
transport: McpServerTransportConfig::Stdio {
command: rmcp_test_server_bin,
args: Vec::new(),
env: Some(HashMap::from([(
"MCP_TEST_IMAGE_DATA_URL".to_string(),
OPENAI_PNG.to_string(),
)])),
env_vars: Vec::new(),
cwd: None,
},
enabled: true,
disabled_reason: None,
startup_timeout_sec: Some(Duration::from_secs(10)),
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
);
config
.mcp_servers
.set(servers)
.expect("test mcp servers should accept any configuration");
})
.build(&server)
.await?;
let session_model = fixture.session_configured.model.clone();
fixture
.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "call the rmcp image tool".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: fixture.cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
let begin_event = wait_for_event(&fixture.codex, |ev| {
matches!(ev, EventMsg::McpToolCallBegin(_))
})
.await;
let EventMsg::McpToolCallBegin(begin) = begin_event else {
unreachable!("begin");
};
assert_eq!(
begin,
McpToolCallBeginEvent {
call_id: call_id.to_string(),
invocation: McpInvocation {
server: server_name.to_string(),
tool: "image".to_string(),
arguments: Some(json!({})),
},
},
);
let end_event = wait_for_event(&fixture.codex, |ev| {
matches!(ev, EventMsg::McpToolCallEnd(_))
})
.await;
let EventMsg::McpToolCallEnd(end) = end_event else {
unreachable!("end");
};
assert!(end.result.as_ref().is_ok(), "tool call should succeed");
wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
// Chat Completions assertion: the second POST should include a tool role message
// with an array `content` containing an item with the expected data URL.
let all_requests = server.received_requests().await.expect("requests captured");
let requests: Vec<_> = all_requests
.iter()
.filter(|req| req.method == "POST" && req.url.path().ends_with("/chat/completions"))
.collect();
assert!(requests.len() >= 2, "expected two chat completion calls");
let second = requests[1];
let body: Value = serde_json::from_slice(&second.body)?;
let messages = body
.get("messages")
.and_then(Value::as_array)
.cloned()
.expect("messages array");
let tool_msg = messages
.iter()
.find(|m| {
m.get("role") == Some(&json!("tool")) && m.get("tool_call_id") == Some(&json!(call_id))
})
.cloned()
.expect("tool message present");
assert_eq!(
tool_msg,
json!({
"role": "tool",
"tool_call_id": call_id,
"content": [{"type": "image_url", "image_url": {"url": OPENAI_PNG}}]
})
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[serial(mcp_test_value)]
async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {