Compare commits

...

2 Commits

Author SHA1 Message Date
Rajeev Nayak
333464c678 add app-server model list chatgpt auth integration test 2026-05-13 21:49:35 -04:00
Rajeev Nayak
b834738666 update codex commit message formatting 2026-05-13 15:53:04 -04:00
3 changed files with 276 additions and 0 deletions

View File

@@ -1,8 +1,10 @@
use std::time::Duration;
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use app_test_support::write_models_cache;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
@@ -13,10 +15,16 @@ use codex_app_server_protocol::ModelServiceTier;
use codex_app_server_protocol::ModelUpgradeInfo;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_config::types::AuthCredentialsStoreMode;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelsResponse;
use core_test_support::responses::mount_models_once;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::MockServer;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
@@ -148,6 +156,101 @@ async fn list_models_includes_hidden_models() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn list_models_uses_chatgpt_remote_catalog_as_source_of_truth() -> Result<()> {
let server = MockServer::start().await;
let remote_model: ModelInfo = serde_json::from_value(json!({
"slug": "chatgpt-remote-only",
"display_name": "ChatGPT Remote Only",
"description": "Remote-only model for app-server model/list coverage",
"default_reasoning_level": "medium",
"supported_reasoning_levels": [
{"effort": "low", "description": "low"},
{"effort": "medium", "description": "medium"}
],
"shell_type": "shell_command",
"visibility": "list",
"minimal_client_version": [0, 1, 0],
"supported_in_api": true,
"priority": 0,
"upgrade": null,
"base_instructions": "base instructions",
"supports_reasoning_summaries": false,
"support_verbosity": false,
"default_verbosity": null,
"apply_patch_tool_type": null,
"truncation_policy": {"mode": "bytes", "limit": 10_000},
"supports_parallel_tool_calls": false,
"supports_image_detail_original": false,
"context_window": 272_000,
"max_context_window": 272_000,
"experimental_supported_tools": [],
}))?;
let models_mock = mount_models_once(
&server,
ModelsResponse {
models: vec![remote_model.clone()],
},
)
.await;
let codex_home = TempDir::new()?;
let server_uri = server.uri();
std::fs::write(
codex_home.path().join("config.toml"),
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
openai_base_url = "{server_uri}/v1"
"#
),
)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-access-token").plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(100),
cursor: None,
include_hidden: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ModelListResponse {
data: items,
next_cursor,
} = to_response::<ModelListResponse>(response)?;
let mut expected_presets: Vec<ModelPreset> = vec![remote_model.into()];
ModelPreset::mark_default_by_picker_visibility(&mut expected_presets);
let expected_items = expected_presets
.iter()
.map(model_from_preset)
.collect::<Vec<_>>();
assert_eq!(items, expected_items);
assert!(next_cursor.is_none());
assert_eq!(
models_mock.requests().len(),
1,
"expected a single /models request"
);
Ok(())
}
#[tokio::test]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;

View File

@@ -3,11 +3,13 @@ use crate::collaboration_mode_presets::builtin_collaboration_mode_presets;
use crate::config::ModelsManagerConfig;
use crate::model_info;
use async_trait::async_trait;
use codex_app_server_protocol::AuthMode;
use codex_login::AuthManager;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::error::Result as CoreResult;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use std::fmt;
use std::path::PathBuf;
@@ -319,6 +321,23 @@ impl OpenAiModelsManager {
/// Replace the cached remote models and rebuild the derived presets list.
async fn apply_remote_models(&self, models: Vec<ModelInfo>) {
// Use the remote models list as the source of truth if it contains at least one
// non-hidden model and the user is using ChatGPT auth.
let should_use_remote_models_only = !models.is_empty()
&& models
.iter()
.any(|model| model.visibility == ModelVisibility::List)
&& self.auth_manager.as_ref().is_some_and(|auth_manager| {
matches!(
auth_manager.auth_mode(),
Some(AuthMode::Chatgpt | AuthMode::ChatgptAuthTokens)
)
});
if should_use_remote_models_only {
*self.remote_models.write().await = models;
return;
}
let mut existing_models = load_remote_models_from_file().unwrap_or_default();
for model in models {
if let Some(existing_index) = existing_models

View File

@@ -366,6 +366,160 @@ async fn refresh_available_models_sorts_by_priority() {
assert_eq!(endpoint.fetch_count(), 1, "expected a single model fetch");
}
#[tokio::test]
async fn refresh_available_models_uses_remote_only_catalog_for_chatgpt_auth() {
let remote_models = vec![remote_model(
"chatgpt-visible-source-of-truth",
"ChatGPT Visible",
/*priority*/ 0,
)];
let codex_home = tempdir().expect("temp dir");
let endpoint = TestModelsEndpoint::new(vec![remote_models.clone()]);
let manager = openai_manager_for_tests(codex_home.path().to_path_buf(), endpoint.clone());
manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("refresh succeeds");
assert_eq!(manager.get_remote_models().await, remote_models);
assert_eq!(endpoint.fetch_count(), 1, "expected a single model fetch");
}
#[tokio::test]
async fn refresh_available_models_uses_cached_remote_only_catalog_for_chatgpt_auth() {
let remote_models = vec![remote_model(
"chatgpt-cached-source-of-truth",
"ChatGPT Cached",
/*priority*/ 0,
)];
let codex_home = tempdir().expect("temp dir");
let fetch_endpoint = TestModelsEndpoint::new(vec![remote_models.clone()]);
let fetch_manager =
openai_manager_for_tests(codex_home.path().to_path_buf(), fetch_endpoint.clone());
fetch_manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("initial refresh succeeds");
let cache_endpoint = TestModelsEndpoint::new(Vec::new());
let cache_manager =
openai_manager_for_tests(codex_home.path().to_path_buf(), cache_endpoint.clone());
cache_manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("cached refresh succeeds");
assert_eq!(cache_manager.get_remote_models().await, remote_models);
assert_eq!(
cache_endpoint.fetch_count(),
0,
"fresh cache should avoid a model fetch"
);
}
#[tokio::test]
async fn get_model_info_uses_fallback_for_bundled_models_when_chatgpt_remote_is_authoritative() {
let remote_models = vec![remote_model(
"chatgpt-authoritative-model-info",
"ChatGPT Model Info",
/*priority*/ 0,
)];
let codex_home = tempdir().expect("temp dir");
let endpoint = TestModelsEndpoint::new(vec![remote_models]);
let manager = openai_manager_for_tests(codex_home.path().to_path_buf(), endpoint);
let bundled_slug = load_remote_models_from_file()
.expect("bundled models should parse")
.first()
.expect("bundled models should contain at least one model")
.slug
.clone();
manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("refresh succeeds");
let model_info = manager
.get_model_info(&bundled_slug, &ModelsManagerConfig::default())
.await;
assert_eq!(model_info.slug, bundled_slug);
assert!(model_info.used_fallback_model_metadata);
}
#[tokio::test]
async fn refresh_available_models_preserves_bundled_catalog_for_empty_chatgpt_remote() {
let codex_home = tempdir().expect("temp dir");
let endpoint = TestModelsEndpoint::new(vec![Vec::new()]);
let manager = openai_manager_for_tests(codex_home.path().to_path_buf(), endpoint);
let expected = load_remote_models_from_file().expect("bundled models should parse");
manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("refresh succeeds");
assert_eq!(manager.get_remote_models().await, expected);
}
#[tokio::test]
async fn refresh_available_models_merges_hidden_only_chatgpt_remote_with_bundled_catalog() {
let hidden_remote = remote_model_with_visibility(
"chatgpt-hidden-only",
"ChatGPT Hidden",
/*priority*/ 0,
"hide",
);
let codex_home = tempdir().expect("temp dir");
let endpoint = TestModelsEndpoint::new(vec![vec![hidden_remote.clone()]]);
let manager = openai_manager_for_tests(codex_home.path().to_path_buf(), endpoint);
let mut expected = load_remote_models_from_file().expect("bundled models should parse");
expected.push(hidden_remote);
manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("refresh succeeds");
assert_eq!(manager.get_remote_models().await, expected);
}
#[tokio::test]
async fn refresh_available_models_keeps_merging_for_api_auth() {
let remote_models = vec![remote_model(
"api-auth-visible-remote",
"API Auth Visible",
/*priority*/ 0,
)];
let codex_home = tempdir().expect("temp dir");
let endpoint = Arc::new(TestModelsEndpoint {
has_command_auth: true,
uses_codex_backend: false,
responses: Mutex::new(vec![remote_models.clone()].into()),
fetch_count: AtomicUsize::new(0),
});
let manager = openai_manager_for_tests_with_auth(
codex_home.path().to_path_buf(),
endpoint.clone(),
Some(AuthManager::from_auth_for_testing(CodexAuth::from_api_key(
"test-api-key",
))),
);
let mut expected = load_remote_models_from_file().expect("bundled models should parse");
expected.extend(remote_models);
manager
.refresh_available_models(RefreshStrategy::OnlineIfUncached)
.await
.expect("refresh succeeds");
assert_eq!(manager.get_remote_models().await, expected);
assert_eq!(endpoint.fetch_count(), 1, "expected a single model fetch");
}
#[tokio::test]
async fn refresh_available_models_uses_cache_when_fresh() {
let remote_models = vec![remote_model("cached", "Cached", /*priority*/ 5)];