mirror of
https://github.com/openai/codex.git
synced 2026-04-30 03:12:20 +03:00
Migrate model preset (#7542)
- Introduce `openai_models` in `/core` - Move `PRESETS` under it - Move `ModelPreset`, `ModelUpgrade`, `ReasoningEffortPreset`, `ReasoningEffortPreset`, and `ReasoningEffortPreset` to `protocol` - Introduce `Op::ListModels` and `EventMsg::AvailableModels` Next steps: - migrate `app-server` and `tui` to use the introduced Operation
This commit is contained in:
@@ -22,11 +22,11 @@ use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SessionSource;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::WebSearchAction;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
|
||||
187
codex-rs/core/tests/suite/list_models.rs
Normal file
187
codex-rs/core/tests/suite/list_models.rs
Normal file
@@ -0,0 +1,187 @@
|
||||
use anyhow::Result;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event_match;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn list_models_returns_api_key_models() -> Result<()> {
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_auth(CodexAuth::from_api_key("sk-test"));
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.codex.submit(Op::ListModels).await?;
|
||||
|
||||
let event = wait_for_event_match(&test.codex, |event| match event {
|
||||
EventMsg::ListModelsResponse(models) => Some(models.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.await;
|
||||
|
||||
let expected_models = expected_models_for_api_key();
|
||||
assert_eq!(expected_models, event.models);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn list_models_returns_chatgpt_models() -> Result<()> {
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing());
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.codex.submit(Op::ListModels).await?;
|
||||
|
||||
let event = wait_for_event_match(&test.codex, |event| match event {
|
||||
EventMsg::ListModelsResponse(models) => Some(models.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.await;
|
||||
|
||||
let expected_models = expected_models_for_chatgpt();
|
||||
assert_eq!(expected_models, event.models);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn expected_models_for_api_key() -> Vec<ModelPreset> {
|
||||
vec![gpt_5_1_codex(), gpt_5_1_codex_mini(), gpt_5_1()]
|
||||
}
|
||||
|
||||
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
|
||||
vec![
|
||||
gpt_5_1_codex_max(),
|
||||
gpt_5_1_codex(),
|
||||
gpt_5_1_codex_mini(),
|
||||
gpt_5_1(),
|
||||
]
|
||||
}
|
||||
|
||||
fn gpt_5_1_codex_max() -> ModelPreset {
|
||||
ModelPreset {
|
||||
id: "gpt-5.1-codex-max".to_string(),
|
||||
model: "gpt-5.1-codex-max".to_string(),
|
||||
display_name: "gpt-5.1-codex-max".to_string(),
|
||||
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
supported_reasoning_efforts: vec![
|
||||
effort(
|
||||
ReasoningEffort::Low,
|
||||
"Fast responses with lighter reasoning",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::Medium,
|
||||
"Balances speed and reasoning depth for everyday tasks",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::High,
|
||||
"Maximizes reasoning depth for complex problems",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::XHigh,
|
||||
"Extra high reasoning depth for complex problems",
|
||||
),
|
||||
],
|
||||
is_default: true,
|
||||
upgrade: None,
|
||||
show_in_picker: true,
|
||||
}
|
||||
}
|
||||
|
||||
fn gpt_5_1_codex() -> ModelPreset {
|
||||
ModelPreset {
|
||||
id: "gpt-5.1-codex".to_string(),
|
||||
model: "gpt-5.1-codex".to_string(),
|
||||
display_name: "gpt-5.1-codex".to_string(),
|
||||
description: "Optimized for codex.".to_string(),
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
supported_reasoning_efforts: vec![
|
||||
effort(
|
||||
ReasoningEffort::Low,
|
||||
"Fastest responses with limited reasoning",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::Medium,
|
||||
"Dynamically adjusts reasoning based on the task",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::High,
|
||||
"Maximizes reasoning depth for complex or ambiguous problems",
|
||||
),
|
||||
],
|
||||
is_default: false,
|
||||
upgrade: Some(gpt_5_1_codex_max_upgrade()),
|
||||
show_in_picker: true,
|
||||
}
|
||||
}
|
||||
|
||||
fn gpt_5_1_codex_mini() -> ModelPreset {
|
||||
ModelPreset {
|
||||
id: "gpt-5.1-codex-mini".to_string(),
|
||||
model: "gpt-5.1-codex-mini".to_string(),
|
||||
display_name: "gpt-5.1-codex-mini".to_string(),
|
||||
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
supported_reasoning_efforts: vec![
|
||||
effort(
|
||||
ReasoningEffort::Medium,
|
||||
"Dynamically adjusts reasoning based on the task",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::High,
|
||||
"Maximizes reasoning depth for complex or ambiguous problems",
|
||||
),
|
||||
],
|
||||
is_default: false,
|
||||
upgrade: Some(gpt_5_1_codex_max_upgrade()),
|
||||
show_in_picker: true,
|
||||
}
|
||||
}
|
||||
|
||||
fn gpt_5_1() -> ModelPreset {
|
||||
ModelPreset {
|
||||
id: "gpt-5.1".to_string(),
|
||||
model: "gpt-5.1".to_string(),
|
||||
display_name: "gpt-5.1".to_string(),
|
||||
description: "Broad world knowledge with strong general reasoning.".to_string(),
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
supported_reasoning_efforts: vec![
|
||||
effort(
|
||||
ReasoningEffort::Low,
|
||||
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::Medium,
|
||||
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||
),
|
||||
effort(
|
||||
ReasoningEffort::High,
|
||||
"Maximizes reasoning depth for complex or ambiguous problems",
|
||||
),
|
||||
],
|
||||
is_default: false,
|
||||
upgrade: Some(gpt_5_1_codex_max_upgrade()),
|
||||
show_in_picker: true,
|
||||
}
|
||||
}
|
||||
|
||||
fn gpt_5_1_codex_max_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
|
||||
codex_protocol::openai_models::ModelUpgrade {
|
||||
id: "gpt-5.1-codex-max".to_string(),
|
||||
reasoning_effort_mapping: None,
|
||||
migration_config_key: "hide_gpt-5.1-codex-max_migration_prompt".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn effort(reasoning_effort: ReasoningEffort, description: &str) -> ReasoningEffortPreset {
|
||||
ReasoningEffortPreset {
|
||||
effort: reasoning_effort,
|
||||
description: description.to_string(),
|
||||
}
|
||||
}
|
||||
@@ -34,6 +34,7 @@ mod grep_files;
|
||||
mod items;
|
||||
mod json_result;
|
||||
mod list_dir;
|
||||
mod list_models;
|
||||
mod live_cli;
|
||||
mod model_overrides;
|
||||
mod model_tools;
|
||||
|
||||
@@ -2,7 +2,7 @@ use codex_core::CodexAuth;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
@@ -7,10 +7,10 @@ use codex_core::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_core::protocol_config_types::ReasoningSummary;
|
||||
use codex_core::shell::Shell;
|
||||
use codex_core::shell::default_user_shell;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
use core_test_support::responses::mount_sse_once;
|
||||
|
||||
Reference in New Issue
Block a user