[Codex][CLI] Gate image inputs by model modalities (#10271)

###### Summary

- Add input_modalities to model metadata so clients can determine
supported input types.
- Gate image paste/attach in TUI when the selected model does not
support images.
- Block submits that include images for unsupported models and show a
clear warning.
- Propagate modality metadata through app-server protocol/model-list
responses.
  - Update related tests/fixtures.

  ###### Rationale

  - Models support different input modalities.
- Clients need an explicit capability signal to prevent unsupported
requests.
- Backward-compatible defaults preserve existing behavior when modality
metadata is absent.

  ###### Scope

  - codex-rs/protocol, codex-rs/core, codex-rs/tui
  - codex-rs/app-server-protocol, codex-rs/app-server
  - Generated app-server types / schema fixtures

  ###### Trade-offs

- Default behavior assumes text + image when field is absent for
compatibility.
  - Server-side validation remains the source of truth.

  ###### Follow-up

- Non-TUI clients should consume input_modalities to disable unsupported
attachments.
- Model catalogs should explicitly set input_modalities for text-only
models.

  ###### Testing

  - cargo fmt --all
  - cargo test -p codex-tui
  - env -u GITHUB_APP_KEY cargo test -p codex-core --lib
  - just write-app-server-schema
- cargo run -p codex-cli --bin codex -- app-server generate-ts --out
app-server-types
  - test against local backend
  
<img width="695" height="199" alt="image"
src="https://github.com/user-attachments/assets/d22dd04f-5eba-4db9-a7c5-a2506f60ec44"
/>

---------

Co-authored-by: Josh McKinney <joshka@openai.com>
This commit is contained in:
Colin Young
2026-02-02 18:56:39 -08:00
committed by GitHub
parent b8addcddb9
commit 7e07ec8f73
23 changed files with 373 additions and 3 deletions

View File

@@ -7,6 +7,7 @@ use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::default_input_modalities;
use core_test_support::load_default_config_for_test;
use indoc::indoc;
use pretty_assertions::assert_eq;
@@ -99,6 +100,7 @@ fn gpt_52_codex() -> ModelPreset {
upgrade: None,
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -142,6 +144,7 @@ fn gpt_5_1_codex_max() -> ModelPreset {
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -177,6 +180,7 @@ fn gpt_5_1_codex_mini() -> ModelPreset {
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -222,6 +226,7 @@ fn gpt_5_2() -> ModelPreset {
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -255,6 +260,7 @@ fn bengalfox() -> ModelPreset {
upgrade: None,
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -288,6 +294,7 @@ fn boomslang() -> ModelPreset {
upgrade: None,
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -327,6 +334,7 @@ fn gpt_5_codex() -> ModelPreset {
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -362,6 +370,7 @@ fn gpt_5_codex_mini() -> ModelPreset {
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -401,6 +410,7 @@ fn gpt_5_1_codex() -> ModelPreset {
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -444,6 +454,7 @@ fn gpt_5() -> ModelPreset {
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
@@ -483,6 +494,7 @@ fn gpt_5_1() -> ModelPreset {
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}

View File

@@ -19,6 +19,7 @@ use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::user_input::UserInput;
use core_test_support::responses;
use core_test_support::responses::ev_assistant_message;
@@ -349,5 +350,6 @@ fn test_remote_model(slug: &str, priority: i32) -> ModelInfo {
auto_compact_token_limit: None,
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
}
}

View File

@@ -16,6 +16,7 @@ use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::responses::ev_completed;
@@ -512,6 +513,7 @@ async fn ignores_remote_personality_if_remote_models_disabled() -> anyhow::Resul
auto_compact_token_limit: None,
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
};
let _models_mock = mount_models_once(
@@ -627,6 +629,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
auto_compact_token_limit: None,
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
};
let _models_mock = mount_models_once(
@@ -737,6 +740,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
auto_compact_token_limit: None,
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
};
let _models_mock = mount_models_once(

View File

@@ -25,6 +25,7 @@ use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::responses::ev_assistant_message;
@@ -76,6 +77,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
shell_type: ConfigShellToolType::UnifiedExec,
visibility: ModelVisibility::List,
supported_in_api: true,
input_modalities: default_input_modalities(),
priority: 1,
upgrade: None,
base_instructions: "base instructions".to_string(),
@@ -313,6 +315,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
shell_type: ConfigShellToolType::ShellCommand,
visibility: ModelVisibility::List,
supported_in_api: true,
input_modalities: default_input_modalities(),
priority: 1,
upgrade: None,
base_instructions: remote_base.to_string(),
@@ -787,6 +790,7 @@ fn test_remote_model_with_policy(
shell_type: ConfigShellToolType::ShellCommand,
visibility,
supported_in_api: true,
input_modalities: default_input_modalities(),
priority,
upgrade: None,
base_instructions: "base instructions".to_string(),