diff --git a/codex-rs/codex-api/src/endpoint/models.rs b/codex-rs/codex-api/src/endpoint/models.rs index b15f07fca2..7f21c77635 100644 --- a/codex-rs/codex-api/src/endpoint/models.rs +++ b/codex-rs/codex-api/src/endpoint/models.rs @@ -5,6 +5,7 @@ use crate::provider::Provider; use crate::telemetry::run_with_request_telemetry; use codex_client::HttpTransport; use codex_client::RequestTelemetry; +use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelsResponse; use http::HeaderMap; use http::Method; @@ -41,7 +42,7 @@ impl ModelsClient { &self, client_version: &str, extra_headers: HeaderMap, - ) -> Result { + ) -> Result<(Vec, Option), ApiError> { let builder = || { let mut req = self.provider.build_request(Method::GET, self.path()); req.headers.extend(extra_headers.clone()); @@ -66,7 +67,7 @@ impl ModelsClient { .and_then(|value| value.to_str().ok()) .map(ToString::to_string); - let ModelsResponse { models, etag } = serde_json::from_slice::(&resp.body) + let ModelsResponse { models } = serde_json::from_slice::(&resp.body) .map_err(|e| { ApiError::Stream(format!( "failed to decode models response: {e}; body: {}", @@ -74,9 +75,7 @@ impl ModelsClient { )) })?; - let etag = header_etag.unwrap_or(etag); - - Ok(ModelsResponse { models, etag }) + Ok((models, header_etag)) } } @@ -102,16 +101,15 @@ mod tests { struct CapturingTransport { last_request: Arc>>, body: Arc, + response_etag: Arc>, } impl Default for CapturingTransport { fn default() -> Self { Self { last_request: Arc::new(Mutex::new(None)), - body: Arc::new(ModelsResponse { - models: Vec::new(), - etag: String::new(), - }), + body: Arc::new(ModelsResponse { models: Vec::new() }), + response_etag: Arc::new(None), } } } @@ -122,8 +120,8 @@ mod tests { *self.last_request.lock().unwrap() = Some(req); let body = serde_json::to_vec(&*self.body).unwrap(); let mut headers = HeaderMap::new(); - if !self.body.etag.is_empty() { - headers.insert(ETAG, self.body.etag.parse().unwrap()); + if let Some(etag) = self.response_etag.as_ref().as_deref() { + headers.insert(ETAG, etag.parse().unwrap()); } Ok(Response { status: StatusCode::OK, @@ -166,14 +164,12 @@ mod tests { #[tokio::test] async fn appends_client_version_query() { - let response = ModelsResponse { - models: Vec::new(), - etag: String::new(), - }; + let response = ModelsResponse { models: Vec::new() }; let transport = CapturingTransport { last_request: Arc::new(Mutex::new(None)), body: Arc::new(response), + response_etag: Arc::new(None), }; let client = ModelsClient::new( @@ -182,12 +178,12 @@ mod tests { DummyAuth, ); - let result = client + let (models, _etag) = client .list_models("0.99.0", HeaderMap::new()) .await .expect("request should succeed"); - assert_eq!(result.models.len(), 0); + assert_eq!(models.len(), 0); let url = transport .last_request @@ -232,12 +228,12 @@ mod tests { })) .unwrap(), ], - etag: String::new(), }; let transport = CapturingTransport { last_request: Arc::new(Mutex::new(None)), body: Arc::new(response), + response_etag: Arc::new(None), }; let client = ModelsClient::new( @@ -246,27 +242,25 @@ mod tests { DummyAuth, ); - let result = client + let (models, _etag) = client .list_models("0.99.0", HeaderMap::new()) .await .expect("request should succeed"); - assert_eq!(result.models.len(), 1); - assert_eq!(result.models[0].slug, "gpt-test"); - assert_eq!(result.models[0].supported_in_api, true); - assert_eq!(result.models[0].priority, 1); + assert_eq!(models.len(), 1); + assert_eq!(models[0].slug, "gpt-test"); + assert_eq!(models[0].supported_in_api, true); + assert_eq!(models[0].priority, 1); } #[tokio::test] async fn list_models_includes_etag() { - let response = ModelsResponse { - models: Vec::new(), - etag: "\"abc\"".to_string(), - }; + let response = ModelsResponse { models: Vec::new() }; let transport = CapturingTransport { last_request: Arc::new(Mutex::new(None)), body: Arc::new(response), + response_etag: Arc::new(Some("\"abc\"".to_string())), }; let client = ModelsClient::new( @@ -275,12 +269,12 @@ mod tests { DummyAuth, ); - let result = client + let (models, etag) = client .list_models("0.1.0", HeaderMap::new()) .await .expect("request should succeed"); - assert_eq!(result.models.len(), 0); - assert_eq!(result.etag, "\"abc\""); + assert_eq!(models.len(), 0); + assert_eq!(etag.as_deref(), Some("\"abc\"")); } } diff --git a/codex-rs/codex-api/tests/models_integration.rs b/codex-rs/codex-api/tests/models_integration.rs index 93baffd356..0b3e95ee30 100644 --- a/codex-rs/codex-api/tests/models_integration.rs +++ b/codex-rs/codex-api/tests/models_integration.rs @@ -90,7 +90,6 @@ async fn models_client_hits_models_endpoint() { reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), }], - etag: String::new(), }; Mock::given(method("GET")) @@ -106,13 +105,13 @@ async fn models_client_hits_models_endpoint() { let transport = ReqwestTransport::new(reqwest::Client::new()); let client = ModelsClient::new(transport, provider(&base_url), DummyAuth); - let result = client + let (models, _etag) = client .list_models("0.1.0", HeaderMap::new()) .await .expect("models request should succeed"); - assert_eq!(result.models.len(), 1); - assert_eq!(result.models[0].slug, "gpt-test"); + assert_eq!(models.len(), 1); + assert_eq!(models[0].slug, "gpt-test"); let received = server .received_requests() diff --git a/codex-rs/core/src/openai_models/models_manager.rs b/codex-rs/core/src/openai_models/models_manager.rs index 7f54c4f852..d6ca13e76b 100644 --- a/codex-rs/core/src/openai_models/models_manager.rs +++ b/codex-rs/core/src/openai_models/models_manager.rs @@ -94,12 +94,12 @@ impl ModelsManager { let client = ModelsClient::new(transport, api_provider, api_auth); let client_version = format_client_version_to_whole(); - let ModelsResponse { models, etag } = client + let (models, etag) = client .list_models(&client_version, HeaderMap::new()) .await .map_err(map_api_error)?; - let etag = (!etag.is_empty()).then_some(etag); + let etag = etag.filter(|value| !value.is_empty()); self.apply_remote_models(models.clone()).await; *self.etag.write().await = etag.clone(); @@ -389,7 +389,6 @@ mod tests { &server, ModelsResponse { models: remote_models.clone(), - etag: String::new(), }, ) .await; @@ -446,7 +445,6 @@ mod tests { &server, ModelsResponse { models: remote_models.clone(), - etag: String::new(), }, ) .await; @@ -501,7 +499,6 @@ mod tests { &server, ModelsResponse { models: initial_models.clone(), - etag: String::new(), }, ) .await; @@ -542,7 +539,6 @@ mod tests { &server, ModelsResponse { models: updated_models.clone(), - etag: String::new(), }, ) .await; @@ -576,7 +572,6 @@ mod tests { &server, ModelsResponse { models: initial_models, - etag: String::new(), }, ) .await; @@ -605,7 +600,6 @@ mod tests { &server, ModelsResponse { models: refreshed_models, - etag: String::new(), }, ) .await; diff --git a/codex-rs/core/tests/common/responses.rs b/codex-rs/core/tests/common/responses.rs index b98b29625e..8fcabdda54 100644 --- a/codex-rs/core/tests/common/responses.rs +++ b/codex-rs/core/tests/common/responses.rs @@ -677,14 +677,7 @@ pub async fn start_mock_server() -> MockServer { .await; // Provide a default `/models` response so tests remain hermetic when the client queries it. - let _ = mount_models_once( - &server, - ModelsResponse { - models: Vec::new(), - etag: String::new(), - }, - ) - .await; + let _ = mount_models_once(&server, ModelsResponse { models: Vec::new() }).await; server } diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index 3c4d389ec0..4086732c2b 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -93,7 +93,6 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { &server, ModelsResponse { models: vec![remote_model], - etag: String::new(), }, ) .await; @@ -232,7 +231,6 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> { &server, ModelsResponse { models: vec![remote_model], - etag: String::new(), }, ) .await; @@ -310,7 +308,6 @@ async fn remote_models_preserve_builtin_presets() -> Result<()> { &server, ModelsResponse { models: vec![remote_model.clone()], - etag: String::new(), }, ) .await; @@ -368,7 +365,6 @@ async fn remote_models_hide_picker_only_models() -> Result<()> { &server, ModelsResponse { models: vec![remote_model], - etag: String::new(), }, ) .await; diff --git a/codex-rs/protocol/src/openai_models.rs b/codex-rs/protocol/src/openai_models.rs index 28b25bb604..4cdc0e4ba6 100644 --- a/codex-rs/protocol/src/openai_models.rs +++ b/codex-rs/protocol/src/openai_models.rs @@ -197,8 +197,6 @@ pub struct ModelInfo { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema, Default)] pub struct ModelsResponse { pub models: Vec, - #[serde(default)] - pub etag: String, } // convert ModelInfo to ModelPreset