client side modelinfo overrides (#12101)

TL;DR
Add top-level `model_catalog_json` config support so users can supply a
local model catalog override from a JSON file path (including adding new
models) without backend changes.

### Problem
Codex previously had no clean client-side way to replace/overlay model
catalog data for local testing of model metadata and new model entries.

### Fix
- Add top-level `model_catalog_json` config field (JSON file path).
- Apply catalog entries when resolving `ModelInfo`:
  1. Base resolved model metadata (remote/fallback)
  2. Catalog overlay from `model_catalog_json`
3. Existing global top-level overrides (`model_context_window`,
`model_supports_reasoning_summaries`, etc.)

### Note
Will revisit per-field overrides in a follow-up

### Tests
Added tests
This commit is contained in:
sayan-oai
2026-02-19 10:38:57 -08:00
committed by GitHub
parent 3a951f8096
commit d54999d006
13 changed files with 178 additions and 30 deletions

View File

@@ -66,6 +66,7 @@ use codex_protocol::config_types::TrustLevel;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_utils_absolute_path::AbsolutePathBuf;
@@ -359,6 +360,10 @@ pub struct Config {
/// Optional override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Optional full model catalog loaded from `model_catalog_json`.
/// When set, this replaces the bundled catalog for the current process.
pub model_catalog: Option<ModelsResponse>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
pub model_verbosity: Option<Verbosity>,
@@ -618,6 +623,27 @@ pub(crate) fn deserialize_config_toml_with_base(
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
}
fn load_catalog_json(path: &AbsolutePathBuf) -> std::io::Result<ModelsResponse> {
let file_contents = std::fs::read_to_string(path)?;
serde_json::from_str::<ModelsResponse>(&file_contents).map_err(|err| {
std::io::Error::new(
ErrorKind::InvalidData,
format!(
"failed to parse model_catalog_json path `{}` as JSON: {err}",
path.display()
),
)
})
}
fn load_model_catalog(
model_catalog_json: Option<AbsolutePathBuf>,
) -> std::io::Result<Option<ModelsResponse>> {
model_catalog_json
.map(|path| load_catalog_json(&path))
.transpose()
}
fn filter_mcp_servers_by_requirements(
mcp_servers: &mut HashMap<String, McpServerConfig>,
mcp_requirements: Option<&Sourced<BTreeMap<String, McpServerRequirement>>>,
@@ -1039,6 +1065,10 @@ pub struct ConfigToml {
/// Override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Optional path to a JSON file containing a complete model catalog.
/// When set, this replaces the bundled catalog for this process.
pub model_catalog_json: Option<AbsolutePathBuf>,
/// Optionally specify a personality for the model
pub personality: Option<Personality>,
@@ -1793,6 +1823,7 @@ impl Config {
let review_model = override_review_model.or(cfg.review_model);
let check_for_update_on_startup = cfg.check_for_update_on_startup.unwrap_or(true);
let model_catalog = load_model_catalog(cfg.model_catalog_json.clone())?;
let log_dir = cfg
.log_dir
@@ -1929,6 +1960,7 @@ impl Config {
.or(cfg.model_reasoning_summary)
.unwrap_or_default(),
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
model_catalog,
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
chatgpt_base_url: config_profile
.chatgpt_base_url
@@ -4177,6 +4209,33 @@ config_file = "./agents/researcher.toml"
Ok(())
}
#[test]
fn model_catalog_json_loads_from_path() -> std::io::Result<()> {
let codex_home = TempDir::new()?;
let catalog_path = codex_home.path().join("catalog.json");
let mut catalog: ModelsResponse =
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
catalog.models = catalog.models.into_iter().take(1).collect();
std::fs::write(
&catalog_path,
serde_json::to_string(&catalog).expect("serialize catalog"),
)?;
let cfg = ConfigToml {
model_catalog_json: Some(AbsolutePathBuf::from_absolute_path(catalog_path)?),
..Default::default()
};
let config = Config::load_from_base_config_with_overrides(
cfg,
ConfigOverrides::default(),
codex_home.path().to_path_buf(),
)?;
assert_eq!(config.model_catalog, Some(catalog));
Ok(())
}
fn create_test_fixture() -> std::io::Result<PrecedenceTestFixture> {
let toml = r#"
model = "o3"
@@ -4349,6 +4408,7 @@ model_verbosity = "high"
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: None,
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -4464,6 +4524,7 @@ model_verbosity = "high"
model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: None,
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -4577,6 +4638,7 @@ model_verbosity = "high"
model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: None,
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -4676,6 +4738,7 @@ model_verbosity = "high"
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: Some(Verbosity::High),
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),