Compare commits

...

7 Commits

Author SHA1 Message Date
Sayan Sisodiya
3e8dba4ac9 test fixes after rebase 2026-02-10 23:05:22 -08:00
Sayan Sisodiya
809248bdd8 reject unknown fields 2026-02-10 22:56:40 -08:00
Sayan Sisodiya
32c7f6806a clear model_messages on base_instructions 2026-02-10 22:56:40 -08:00
Sayan Sisodiya
b963d42262 reorder ops 2026-02-10 22:56:40 -08:00
Sayan Sisodiya
2d3cb345e0 clean up tests 2026-02-10 22:56:40 -08:00
Sayan Sisodiya
664eed0809 apply overrides to picker, add test for ModelInfo/ModelInfoPatch drift 2026-02-10 22:56:39 -08:00
Sayan Sisodiya
a5d2b31065 add client-side ModelInfo overrides 2026-02-10 22:56:39 -08:00
8 changed files with 773 additions and 13 deletions

View File

@@ -82,6 +82,13 @@
],
"type": "string"
},
"ApplyPatchToolType": {
"enum": [
"freeform",
"function"
],
"type": "string"
},
"AppsConfigToml": {
"additionalProperties": {
"$ref": "#/definitions/AppConfig"
@@ -335,6 +342,17 @@
},
"type": "object"
},
"ConfigShellToolType": {
"description": "Shell execution capability for a model.",
"enum": [
"default",
"local",
"unified_exec",
"disabled",
"shell_command"
],
"type": "string"
},
"FeedbackConfigToml": {
"additionalProperties": false,
"properties": {
@@ -414,6 +432,25 @@
}
]
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"ModeKind": {
"description": "Initial collaboration mode to use when the TUI starts.",
"enum": [
@@ -422,6 +459,191 @@
],
"type": "string"
},
"ModelInfoPatch": {
"additionalProperties": false,
"description": "User-provided patch for overriding model metadata in local config.\n\nEvery field is optional so users can override only the parts of [`ModelInfo`] they need. The target model slug is provided by the surrounding map key.",
"properties": {
"apply_patch_tool_type": {
"allOf": [
{
"$ref": "#/definitions/ApplyPatchToolType"
}
],
"default": null
},
"auto_compact_token_limit": {
"default": null,
"format": "int64",
"type": "integer"
},
"base_instructions": {
"default": null,
"type": "string"
},
"context_window": {
"default": null,
"format": "int64",
"type": "integer"
},
"default_reasoning_level": {
"allOf": [
{
"$ref": "#/definitions/ReasoningEffort"
}
],
"default": null
},
"default_verbosity": {
"allOf": [
{
"$ref": "#/definitions/Verbosity"
}
],
"default": null
},
"description": {
"default": null,
"type": "string"
},
"display_name": {
"default": null,
"type": "string"
},
"effective_context_window_percent": {
"default": null,
"format": "int64",
"type": "integer"
},
"experimental_supported_tools": {
"default": null,
"items": {
"type": "string"
},
"type": "array"
},
"input_modalities": {
"default": null,
"items": {
"$ref": "#/definitions/InputModality"
},
"type": "array"
},
"model_messages": {
"allOf": [
{
"$ref": "#/definitions/ModelMessages"
}
],
"default": null
},
"prefer_websockets": {
"default": null,
"type": "boolean"
},
"priority": {
"default": null,
"format": "int32",
"type": "integer"
},
"shell_type": {
"allOf": [
{
"$ref": "#/definitions/ConfigShellToolType"
}
],
"default": null
},
"support_verbosity": {
"default": null,
"type": "boolean"
},
"supported_in_api": {
"default": null,
"type": "boolean"
},
"supported_reasoning_levels": {
"default": null,
"items": {
"$ref": "#/definitions/ReasoningEffortPreset"
},
"type": "array"
},
"supports_parallel_tool_calls": {
"default": null,
"type": "boolean"
},
"supports_reasoning_summaries": {
"default": null,
"type": "boolean"
},
"truncation_policy": {
"allOf": [
{
"$ref": "#/definitions/TruncationPolicyConfig"
}
],
"default": null
},
"upgrade": {
"allOf": [
{
"$ref": "#/definitions/ModelInfoUpgrade"
}
],
"default": null
},
"visibility": {
"allOf": [
{
"$ref": "#/definitions/ModelVisibility"
}
],
"default": null
}
},
"type": "object"
},
"ModelInfoUpgrade": {
"properties": {
"migration_markdown": {
"type": "string"
},
"model": {
"type": "string"
}
},
"required": [
"migration_markdown",
"model"
],
"type": "object"
},
"ModelInstructionsVariables": {
"properties": {
"personality_default": {
"type": "string"
},
"personality_friendly": {
"type": "string"
},
"personality_pragmatic": {
"type": "string"
}
},
"type": "object"
},
"ModelMessages": {
"description": "A strongly-typed template for assembling model instructions and developer messages. If instructions_* is populated and valid, it will override base_instructions.",
"properties": {
"instructions_template": {
"type": "string"
},
"instructions_variables": {
"$ref": "#/definitions/ModelInstructionsVariables"
}
},
"type": "object"
},
"ModelProviderInfo": {
"additionalProperties": false,
"description": "Serializable representation of a provider definition.",
@@ -510,6 +732,15 @@
],
"type": "object"
},
"ModelVisibility": {
"description": "Visibility of a model in the picker or APIs.",
"enum": [
"list",
"hide",
"none"
],
"type": "string"
},
"Notice": {
"description": "Settings for notices we display to users via the tui and app-server clients (primarily the Codex IDE extension). NOTE: these are different from notifications - notices are warnings, NUX screens, acknowledgements, etc.",
"properties": {
@@ -881,6 +1112,28 @@
],
"type": "string"
},
"ReasoningEffortPreset": {
"description": "A reasoning effort option that can be surfaced for a model.",
"properties": {
"description": {
"description": "Short human description shown next to the effort in UIs.",
"type": "string"
},
"effort": {
"allOf": [
{
"$ref": "#/definitions/ReasoningEffort"
}
],
"description": "Effort level that the model supports."
}
},
"required": [
"description",
"effort"
],
"type": "object"
},
"ReasoningSummary": {
"description": "A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries",
"oneOf": [
@@ -1038,6 +1291,30 @@
},
"type": "object"
},
"TruncationMode": {
"description": "Server-provided truncation policy metadata for a model.",
"enum": [
"bytes",
"tokens"
],
"type": "string"
},
"TruncationPolicyConfig": {
"properties": {
"limit": {
"format": "int64",
"type": "integer"
},
"mode": {
"$ref": "#/definitions/TruncationMode"
}
},
"required": [
"limit",
"mode"
],
"type": "object"
},
"TrustLevel": {
"description": "Represents the trust level for a project directory. This determines the approval policy and sandbox mode applied.",
"enum": [
@@ -1442,6 +1719,14 @@
"format": "int64",
"type": "integer"
},
"model_info_overrides": {
"additionalProperties": {
"$ref": "#/definitions/ModelInfoPatch"
},
"default": {},
"description": "Per-model metadata overrides keyed by model slug.",
"type": "object"
},
"model_instructions_file": {
"allOf": [
{

View File

@@ -57,6 +57,7 @@ use codex_protocol::config_types::TrustLevel;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::openai_models::ModelInfoPatch;
use codex_protocol::openai_models::ReasoningEffort;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_utils_absolute_path::AbsolutePathBuf;
@@ -317,6 +318,9 @@ pub struct Config {
/// Optional override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Per-model metadata overrides keyed by model slug.
pub model_info_overrides: HashMap<String, ModelInfoPatch>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
pub model_verbosity: Option<Verbosity>,
@@ -975,6 +979,10 @@ pub struct ConfigToml {
/// Override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// Per-model metadata overrides keyed by model slug.
#[serde(default)]
pub model_info_overrides: HashMap<String, ModelInfoPatch>,
/// Optionally specify a personality for the model
pub personality: Option<Personality>,
@@ -1766,6 +1774,7 @@ impl Config {
.or(cfg.model_reasoning_summary)
.unwrap_or_default(),
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
model_info_overrides: cfg.model_info_overrides,
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
chatgpt_base_url: config_profile
.chatgpt_base_url
@@ -4054,6 +4063,7 @@ model_verbosity = "high"
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_info_overrides: HashMap::new(),
model_verbosity: None,
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -4160,6 +4170,7 @@ model_verbosity = "high"
model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_info_overrides: HashMap::new(),
model_verbosity: None,
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -4264,6 +4275,7 @@ model_verbosity = "high"
model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_info_overrides: HashMap::new(),
model_verbosity: None,
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -4354,6 +4366,7 @@ model_verbosity = "high"
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_info_overrides: HashMap::new(),
model_verbosity: Some(Verbosity::High),
personality: Some(Personality::Pragmatic),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),

View File

@@ -85,7 +85,7 @@ impl ModelsManager {
error!("failed to refresh available models: {err}");
}
let remote_models = self.get_remote_models(config).await;
self.build_available_models(remote_models)
self.build_available_models(remote_models, config)
}
/// List collaboration mode presets.
@@ -100,7 +100,7 @@ impl ModelsManager {
/// Returns an error if the internal lock cannot be acquired.
pub fn try_list_models(&self, config: &Config) -> Result<Vec<ModelPreset>, TryLockError> {
let remote_models = self.try_get_remote_models(config)?;
Ok(self.build_available_models(remote_models))
Ok(self.build_available_models(remote_models, config))
}
// todo(aibrahim): should be visible to core only and sent on session_configured event
@@ -124,7 +124,7 @@ impl ModelsManager {
error!("failed to refresh available models: {err}");
}
let remote_models = self.get_remote_models(config).await;
let available = self.build_available_models(remote_models);
let available = self.build_available_models(remote_models, config);
available
.iter()
.find(|model| model.is_default)
@@ -134,17 +134,19 @@ impl ModelsManager {
}
// todo(aibrahim): look if we can tighten it to pub(crate)
/// Look up model metadata, applying remote overrides and config adjustments.
/// Look up model metadata, applying remote overrides and config adjustments, as well as client overrides.
pub async fn get_model_info(&self, model: &str, config: &Config) -> ModelInfo {
let remote = self
.find_remote_model_by_longest_prefix(model, config)
.await;
let model = if let Some(remote) = remote {
let model_info = if let Some(remote) = remote {
remote
} else {
model_info::model_info_from_slug(model)
};
model_info::with_config_overrides(model, config)
let model_info = model_info::with_model_info_patches(model_info, model, config);
model_info::with_config_overrides(model_info, config)
}
async fn find_remote_model_by_longest_prefix(
@@ -296,7 +298,18 @@ impl ModelsManager {
}
/// Merge remote model metadata into picker-ready presets, preserving existing entries.
fn build_available_models(&self, mut remote_models: Vec<ModelInfo>) -> Vec<ModelPreset> {
fn build_available_models(
&self,
mut remote_models: Vec<ModelInfo>,
config: &Config,
) -> Vec<ModelPreset> {
remote_models = remote_models
.into_iter()
.map(|remote_model| {
let remote_slug = remote_model.slug.clone();
model_info::with_model_info_patch(remote_model, &remote_slug, config)
})
.collect();
remote_models.sort_by(|a, b| a.priority.cmp(&b.priority));
let remote_presets: Vec<ModelPreset> = remote_models.into_iter().map(Into::into).collect();
@@ -369,11 +382,16 @@ impl ModelsManager {
}
/// Build `ModelInfo` without consulting remote state or cache.
///
/// This follows the same override precedence as `get_model_info`:
/// per-model `model_info_overrides` are applied first, then top-level config overrides.
pub(crate) fn construct_model_info_offline_for_tests(
model: &str,
config: &Config,
) -> ModelInfo {
model_info::with_config_overrides(model_info::model_info_from_slug(model), config)
let model_info = model_info::model_info_from_slug(model);
let model_info = model_info::with_model_info_patches(model_info, model, config);
model_info::with_config_overrides(model_info, config)
}
}
@@ -386,6 +404,8 @@ mod tests {
use crate::features::Feature;
use crate::model_provider_info::WireApi;
use chrono::Utc;
use codex_protocol::openai_models::ModelInfoPatch;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use core_test_support::responses::mount_models_once;
use pretty_assertions::assert_eq;
@@ -823,8 +843,8 @@ mod tests {
);
}
#[test]
fn build_available_models_picks_default_after_hiding_hidden_models() {
#[tokio::test]
async fn build_available_models_respects_visibility_defaults_and_overrides() {
let codex_home = tempdir().expect("temp dir");
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
@@ -843,9 +863,42 @@ mod tests {
let mut expected_visible = ModelPreset::from(visible_model.clone());
expected_visible.is_default = true;
let available = manager.build_available_models(vec![hidden_model, visible_model]);
let config = ConfigBuilder::default()
.codex_home(codex_home.path().to_path_buf())
.build()
.await
.expect("load default test config");
let available_default = manager
.build_available_models(vec![hidden_model.clone(), visible_model.clone()], &config);
assert_eq!(
available_default,
vec![expected_hidden.clone(), expected_visible]
);
assert_eq!(available, vec![expected_hidden, expected_visible]);
let mut override_config = config;
override_config.model_info_overrides.insert(
"visible".to_string(),
ModelInfoPatch {
display_name: Some("Visible Local Override".to_string()),
visibility: Some(ModelVisibility::Hide),
..Default::default()
},
);
let available_overridden =
manager.build_available_models(vec![hidden_model, visible_model], &override_config);
let visible = available_overridden
.iter()
.find(|preset| preset.model == "visible")
.expect("visible model should exist");
assert_eq!(visible.display_name, "Visible Local Override");
assert!(!visible.show_in_picker);
assert!(
available_overridden
.iter()
.find(|preset| preset.model == "hidden")
.is_some_and(|preset| preset.is_default),
"when no model is shown in picker, first model should be marked default"
);
}
#[test]

View File

@@ -19,6 +19,108 @@ const LOCAL_FRIENDLY_TEMPLATE: &str =
const LOCAL_PRAGMATIC_TEMPLATE: &str = "You are a deeply pragmatic, effective software engineer.";
const PERSONALITY_PLACEHOLDER: &str = "{{ personality }}";
pub(crate) fn with_model_info_patch(
mut model: ModelInfo,
model_key: &str,
config: &Config,
) -> ModelInfo {
let Some(model_info_patch) = config.model_info_overrides.get(model_key) else {
return model;
};
model.slug = model_key.to_string();
if let Some(display_name) = &model_info_patch.display_name {
model.display_name = display_name.clone();
}
if let Some(description) = &model_info_patch.description {
model.description = Some(description.clone());
}
if let Some(default_reasoning_level) = model_info_patch.default_reasoning_level {
model.default_reasoning_level = Some(default_reasoning_level);
}
if let Some(supported_reasoning_levels) = &model_info_patch.supported_reasoning_levels {
model.supported_reasoning_levels = supported_reasoning_levels.clone();
}
if let Some(shell_type) = model_info_patch.shell_type {
model.shell_type = shell_type;
}
if let Some(visibility) = model_info_patch.visibility {
model.visibility = visibility;
}
if let Some(supported_in_api) = model_info_patch.supported_in_api {
model.supported_in_api = supported_in_api;
}
if let Some(priority) = model_info_patch.priority {
model.priority = priority;
}
if let Some(upgrade) = &model_info_patch.upgrade {
model.upgrade = Some(upgrade.clone());
}
if let Some(base_instructions) = &model_info_patch.base_instructions {
model.base_instructions = base_instructions.clone();
// Keep parity with top-level config behavior: explicit base instructions
// should disable template-driven model messages unless the patch sets them again.
model.model_messages = None;
}
if let Some(model_messages) = &model_info_patch.model_messages {
model.model_messages = Some(model_messages.clone());
}
if let Some(supports_reasoning_summaries) = model_info_patch.supports_reasoning_summaries {
model.supports_reasoning_summaries = supports_reasoning_summaries;
}
if let Some(support_verbosity) = model_info_patch.support_verbosity {
model.support_verbosity = support_verbosity;
}
if let Some(default_verbosity) = model_info_patch.default_verbosity {
model.default_verbosity = Some(default_verbosity);
}
if let Some(apply_patch_tool_type) = &model_info_patch.apply_patch_tool_type {
model.apply_patch_tool_type = Some(apply_patch_tool_type.clone());
}
if let Some(truncation_policy) = model_info_patch.truncation_policy {
model.truncation_policy = truncation_policy;
}
if let Some(supports_parallel_tool_calls) = model_info_patch.supports_parallel_tool_calls {
model.supports_parallel_tool_calls = supports_parallel_tool_calls;
}
if let Some(context_window) = model_info_patch.context_window {
model.context_window = Some(context_window);
}
if let Some(auto_compact_token_limit) = model_info_patch.auto_compact_token_limit {
model.auto_compact_token_limit = Some(auto_compact_token_limit);
}
if let Some(effective_context_window_percent) =
model_info_patch.effective_context_window_percent
{
model.effective_context_window_percent = effective_context_window_percent;
}
if let Some(experimental_supported_tools) = &model_info_patch.experimental_supported_tools {
model.experimental_supported_tools = experimental_supported_tools.clone();
}
if let Some(input_modalities) = &model_info_patch.input_modalities {
model.input_modalities = input_modalities.clone();
}
if let Some(prefer_websockets) = model_info_patch.prefer_websockets {
model.prefer_websockets = prefer_websockets;
}
model
}
pub(crate) fn with_model_info_patches(
mut model_info: ModelInfo,
requested_model: &str,
config: &Config,
) -> ModelInfo {
let resolved_slug = model_info.slug.clone();
model_info = with_model_info_patch(model_info, &resolved_slug, config);
if requested_model != resolved_slug {
model_info = with_model_info_patch(model_info, requested_model, config);
}
model_info
}
pub(crate) fn with_config_overrides(mut model: ModelInfo, config: &Config) -> ModelInfo {
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
model.supports_reasoning_summaries = supports_reasoning_summaries;

View File

@@ -9,10 +9,12 @@ use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::models_manager::manager::ModelsManager;
use codex_otel::OtelManager;
use codex_otel::TelemetryAuthMode;
use codex_protocol::ThreadId;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ModelInfoPatch;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use core_test_support::load_default_config_for_test;
@@ -233,8 +235,10 @@ async fn responses_stream_includes_subagent_header_on_other() {
);
}
// Request-construction integration test:
// enabling summaries via config should result in reasoning.summary being sent in /responses.
#[tokio::test]
async fn responses_respects_model_info_overrides_from_config() {
async fn responses_includes_reasoning_summary_when_enabled_in_config() {
core_test_support::skip_if_no_network!();
let server = responses::start_mock_server().await;
@@ -352,6 +356,128 @@ async fn responses_respects_model_info_overrides_from_config() {
);
}
// Request-construction integration test for model_info_overrides:
// patching supports_reasoning_summaries on the selected model should enable reasoning.summary
// in the actual outbound /responses body.
#[tokio::test]
async fn responses_respects_model_info_overrides_from_config() {
core_test_support::skip_if_no_network!();
let server = responses::start_mock_server().await;
let response_body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_completed("resp-1"),
]);
let request_recorder = responses::mount_sse_once(&server, response_body).await;
let provider = ModelProviderInfo {
name: "mock".into(),
base_url: Some(format!("{}/v1", server.uri())),
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
requires_openai_auth: false,
supports_websockets: false,
};
let codex_home = TempDir::new().expect("failed to create TempDir");
let mut config = load_default_config_for_test(&codex_home).await;
config.model = Some("gpt-3.5-turbo".to_string());
config.model_provider_id = provider.name.clone();
config.model_provider = provider.clone();
config.model_reasoning_summary = ReasoningSummary::Detailed;
config.model_info_overrides.insert(
"gpt-3.5-turbo".to_string(),
ModelInfoPatch {
supports_reasoning_summaries: Some(true),
..Default::default()
},
);
let effort = config.model_reasoning_effort;
let summary = config.model_reasoning_summary;
let model = config.model.clone().expect("model configured");
let auth_manager =
codex_core::test_support::auth_manager_from_auth(CodexAuth::from_api_key("Test API Key"));
let auth_mode = auth_manager.auth_mode().map(TelemetryAuthMode::from);
let models_manager = ModelsManager::new(config.codex_home.clone(), auth_manager);
let config = Arc::new(config);
let model_info = models_manager.get_model_info(model.as_str(), &config).await;
let conversation_id = ThreadId::new();
let session_source =
SessionSource::SubAgent(SubAgentSource::Other("override-check".to_string()));
let otel_manager = OtelManager::new(
conversation_id,
model.as_str(),
model_info.slug.as_str(),
None,
Some("test@test.com".to_string()),
auth_mode,
"test_originator".to_string(),
false,
"test".to_string(),
session_source.clone(),
);
let client = ModelClient::new(
None,
conversation_id,
provider,
session_source,
config.model_verbosity,
false,
false,
false,
false,
None,
);
let mut client_session = client.new_session();
let mut prompt = Prompt::default();
prompt.input = vec![ResponseItem::Message {
id: None,
role: "user".into(),
content: vec![ContentItem::InputText {
text: "hello".into(),
}],
end_turn: None,
phase: None,
}];
let mut stream = client_session
.stream(&prompt, &model_info, &otel_manager, effort, summary, None)
.await
.expect("stream failed");
while let Some(event) = stream.next().await {
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
break;
}
}
let request = request_recorder.single_request();
let body = request.body_json();
let reasoning = body
.get("reasoning")
.and_then(|value| value.as_object())
.cloned();
assert_eq!(
reasoning
.as_ref()
.and_then(|value| value.get("summary"))
.and_then(|value| value.as_str()),
Some("detailed")
);
}
#[tokio::test]
async fn responses_stream_includes_turn_metadata_header_for_git_workspace_e2e() {
core_test_support::skip_if_no_network!();

View File

@@ -1,6 +1,7 @@
use codex_core::CodexAuth;
use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_protocol::openai_models::ModelInfoPatch;
use codex_protocol::openai_models::TruncationPolicyConfig;
use core_test_support::load_default_config_for_test;
use pretty_assertions::assert_eq;
@@ -42,3 +43,92 @@ async fn offline_model_info_with_tool_output_override() {
TruncationPolicyConfig::tokens(123)
);
}
// Unknown model path + prefix-resolution path:
// verify model_info_overrides apply both when the slug falls back to synthetic metadata and when
// the requested slug differs from the resolved remote slug (longest-prefix match path).
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn model_info_patch_applies_for_fallback_and_prefix_resolution_paths() {
let codex_home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&codex_home).await;
config.features.enable(Feature::RemoteModels);
let auth_manager = codex_core::test_support::auth_manager_from_auth(
CodexAuth::create_dummy_chatgpt_auth_for_testing(),
);
let manager = ModelsManager::new(config.codex_home.clone(), auth_manager);
let mut baseline_config = config.clone();
baseline_config.model_info_overrides.clear();
let baseline = manager.get_model_info("gpt-fake", &baseline_config).await;
config.model_info_overrides.insert(
"gpt-fake".to_string(),
ModelInfoPatch {
display_name: Some("gpt-fake-dev".to_string()),
context_window: Some(400_000),
supports_parallel_tool_calls: Some(true),
base_instructions: Some("Custom model instructions".to_string()),
..Default::default()
},
);
let model_info = manager.get_model_info("gpt-fake", &config).await;
let mut expected = baseline;
expected.slug = "gpt-fake".to_string();
expected.display_name = "gpt-fake-dev".to_string();
expected.context_window = Some(400_000);
expected.supports_parallel_tool_calls = true;
expected.base_instructions = "Custom model instructions".to_string();
assert_eq!(model_info, expected);
let requested_slug = "gpt-5.1-eval";
let mut baseline_config = config;
baseline_config.model_info_overrides.clear();
let baseline = manager
.get_model_info(requested_slug, &baseline_config)
.await;
baseline_config.model_info_overrides.insert(
requested_slug.to_string(),
ModelInfoPatch {
display_name: Some("gpt-5.1-eval-dev".to_string()),
context_window: Some(456_789),
..Default::default()
},
);
let model_info = manager
.get_model_info(requested_slug, &baseline_config)
.await;
let mut expected = baseline;
expected.slug = requested_slug.to_string();
expected.display_name = "gpt-5.1-eval-dev".to_string();
expected.context_window = Some(456_789);
assert_eq!(model_info, expected);
}
// Offline helper parity path:
// construct_model_info_offline should apply model_info_overrides before global
// top-level overrides, matching get_model_info precedence.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn offline_helper_applies_model_info_patch() {
let codex_home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&codex_home).await;
config.features.enable(Feature::RemoteModels);
config.model_context_window = Some(111_111);
config.model_info_overrides.insert(
"gpt-fake-offline".to_string(),
ModelInfoPatch {
display_name: Some("gpt-fake-offline-dev".to_string()),
context_window: Some(222_222),
..Default::default()
},
);
let model_info =
codex_core::test_support::construct_model_info_offline("gpt-fake-offline", &config);
assert_eq!(model_info.display_name, "gpt-fake-offline-dev".to_string());
assert_eq!(model_info.context_window, Some(111_111));
}

View File

@@ -254,6 +254,38 @@ pub struct ModelInfo {
pub prefer_websockets: bool,
}
/// User-provided patch for overriding model metadata in local config.
///
/// Every field is optional so users can override only the parts of [`ModelInfo`] they need.
/// The target model slug is provided by the surrounding map key.
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq, TS, JsonSchema)]
#[serde(default, deny_unknown_fields)]
pub struct ModelInfoPatch {
pub display_name: Option<String>,
pub description: Option<String>,
pub default_reasoning_level: Option<ReasoningEffort>,
pub supported_reasoning_levels: Option<Vec<ReasoningEffortPreset>>,
pub shell_type: Option<ConfigShellToolType>,
pub visibility: Option<ModelVisibility>,
pub supported_in_api: Option<bool>,
pub priority: Option<i32>,
pub upgrade: Option<ModelInfoUpgrade>,
pub base_instructions: Option<String>,
pub model_messages: Option<ModelMessages>,
pub supports_reasoning_summaries: Option<bool>,
pub support_verbosity: Option<bool>,
pub default_verbosity: Option<Verbosity>,
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
pub truncation_policy: Option<TruncationPolicyConfig>,
pub supports_parallel_tool_calls: Option<bool>,
pub context_window: Option<i64>,
pub auto_compact_token_limit: Option<i64>,
pub effective_context_window_percent: Option<i64>,
pub experimental_supported_tools: Option<Vec<String>>,
pub input_modalities: Option<Vec<InputModality>>,
pub prefer_websockets: Option<bool>,
}
impl ModelInfo {
pub fn auto_compact_token_limit(&self) -> Option<i64> {
self.auto_compact_token_limit.or_else(|| {
@@ -483,6 +515,7 @@ fn nearest_effort(target: ReasoningEffort, supported: &[ReasoningEffort]) -> Rea
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeSet;
fn test_model(spec: Option<ModelMessages>) -> ModelInfo {
ModelInfo {
@@ -666,4 +699,34 @@ mod tests {
);
assert_eq!(personality_variables.get_personality_message(None), None);
}
#[test]
fn model_info_patch_field_coverage_is_explicit() {
fn schema_fields<T: JsonSchema>() -> BTreeSet<String> {
let schema = schemars::schema_for!(T);
let object = schema
.schema
.object
.as_ref()
.expect("expected object schema");
object.properties.keys().cloned().collect()
}
let model_info_fields = schema_fields::<ModelInfo>();
let patch_fields = schema_fields::<ModelInfoPatch>();
let intentionally_non_patchable: BTreeSet<String> =
["slug".to_string()].into_iter().collect();
let expected_patch_fields: BTreeSet<String> = model_info_fields
.difference(&intentionally_non_patchable)
.cloned()
.collect();
assert_eq!(patch_fields, expected_patch_fields);
for field in &intentionally_non_patchable {
assert!(
model_info_fields.contains(field),
"intentionally_non_patchable contains unknown field: {field}"
);
}
}
}

View File

@@ -28,6 +28,34 @@ Codex can run a notification hook when the agent finishes a turn. See the config
The generated JSON Schema for `config.toml` lives at `codex-rs/core/config.schema.json`.
## `model_info_overrides` precedence
`model_info_overrides` lets you patch metadata for specific model slugs in `config.toml`.
The patch is merge-only: only fields you set are overridden, and omitted fields keep their
resolved value.
Precedence for model metadata is:
1. Resolved model metadata (remote `/models` when available, otherwise built-in fallback)
2. `model_info_overrides[<slug>]`
3. Existing top-level config overrides (for example `model_context_window`,
`model_auto_compact_token_limit`, `model_supports_reasoning_summaries`, and
`tool_output_token_limit`)
This means top-level overrides still win for their specific fields after per-model patching.
If a model slug contains dots, quote it.
Example:
```toml
[model_info_overrides."gpt-5.1"]
display_name = "gpt-5.1-local"
context_window = 400000
supports_parallel_tool_calls = true
base_instructions = "Custom model instructions"
```
## Notices
Codex stores "do not show again" flags for some UI prompts under the `[notice]` table.