mirror of
https://github.com/openai/codex.git
synced 2026-04-27 18:01:04 +03:00
fix: send unfiltered models over model/list (#11793)
### What to unblock filtering models in VSCE, change `model/list` app-server endpoint to send all models + visibility field `showInPicker` so filtering can be done in VSCE if desired. ### Tests Updated tests.
This commit is contained in:
@@ -1128,6 +1128,13 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHidden": {
|
||||
"description": "When true, include models that are hidden from the default picker list.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -12611,6 +12611,9 @@
|
||||
"displayName": {
|
||||
"type": "string"
|
||||
},
|
||||
"hidden": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -12651,6 +12654,7 @@
|
||||
"defaultReasoningEffort",
|
||||
"description",
|
||||
"displayName",
|
||||
"hidden",
|
||||
"id",
|
||||
"isDefault",
|
||||
"model",
|
||||
@@ -12668,6 +12672,13 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHidden": {
|
||||
"description": "When true, include models that are hidden from the default picker list.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHidden": {
|
||||
"description": "When true, include models that are hidden from the default picker list.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional page size; defaults to a reasonable server-side value.",
|
||||
"format": "uint32",
|
||||
|
||||
@@ -31,6 +31,9 @@
|
||||
"displayName": {
|
||||
"type": "string"
|
||||
},
|
||||
"hidden": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -71,6 +74,7 @@
|
||||
"defaultReasoningEffort",
|
||||
"description",
|
||||
"displayName",
|
||||
"hidden",
|
||||
"id",
|
||||
"isDefault",
|
||||
"model",
|
||||
|
||||
@@ -5,4 +5,4 @@ import type { InputModality } from "../InputModality";
|
||||
import type { ReasoningEffort } from "../ReasoningEffort";
|
||||
import type { ReasoningEffortOption } from "./ReasoningEffortOption";
|
||||
|
||||
export type Model = { id: string, model: string, upgrade: string | null, displayName: string, description: string, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, isDefault: boolean, };
|
||||
export type Model = { id: string, model: string, upgrade: string | null, displayName: string, description: string, hidden: boolean, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, isDefault: boolean, };
|
||||
|
||||
@@ -10,4 +10,8 @@ cursor?: string | null,
|
||||
/**
|
||||
* Optional page size; defaults to a reasonable server-side value.
|
||||
*/
|
||||
limit?: number | null, };
|
||||
limit?: number | null,
|
||||
/**
|
||||
* When true, include models that are hidden from the default picker list.
|
||||
*/
|
||||
includeHidden?: boolean | null, };
|
||||
|
||||
@@ -1235,7 +1235,8 @@ mod tests {
|
||||
"id": 6,
|
||||
"params": {
|
||||
"limit": null,
|
||||
"cursor": null
|
||||
"cursor": null,
|
||||
"includeHidden": null
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
|
||||
@@ -1110,6 +1110,9 @@ pub struct ModelListParams {
|
||||
/// Optional page size; defaults to a reasonable server-side value.
|
||||
#[ts(optional = nullable)]
|
||||
pub limit: Option<u32>,
|
||||
/// When true, include models that are hidden from the default picker list.
|
||||
#[ts(optional = nullable)]
|
||||
pub include_hidden: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1121,6 +1124,7 @@ pub struct Model {
|
||||
pub upgrade: Option<String>,
|
||||
pub display_name: String,
|
||||
pub description: String,
|
||||
pub hidden: bool,
|
||||
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
|
||||
pub default_reasoning_effort: ReasoningEffort,
|
||||
#[serde(default = "default_input_modalities")]
|
||||
|
||||
@@ -131,7 +131,7 @@ Example with notification opt-out:
|
||||
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `model/list` — list available models (with reasoning effort options and optional `upgrade` model ids).
|
||||
- `model/list` — list available models (set `includeHidden: true` to include entries with `hidden: true`), with reasoning effort options and optional `upgrade` model ids.
|
||||
- `experimentalFeature/list` — list feature flags with stage metadata (`beta`, `underDevelopment`, `stable`, etc.), enabled/default-enabled state, and cursor pagination. For non-beta flags, `displayName`/`description`/`announcement` are `null`.
|
||||
- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination).
|
||||
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
|
||||
|
||||
@@ -3592,10 +3592,15 @@ impl CodexMessageProcessor {
|
||||
request_id: ConnectionRequestId,
|
||||
params: ModelListParams,
|
||||
) {
|
||||
let ModelListParams { limit, cursor } = params;
|
||||
let ModelListParams {
|
||||
limit,
|
||||
cursor,
|
||||
include_hidden,
|
||||
} = params;
|
||||
let mut config = (*config).clone();
|
||||
config.features.enable(Feature::RemoteModels);
|
||||
let models = supported_models(thread_manager, &config).await;
|
||||
let models =
|
||||
supported_models(thread_manager, &config, include_hidden.unwrap_or(false)).await;
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
|
||||
@@ -8,12 +8,16 @@ use codex_core::models_manager::manager::RefreshStrategy;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
|
||||
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
|
||||
pub async fn supported_models(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: &Config,
|
||||
include_hidden: bool,
|
||||
) -> Vec<Model> {
|
||||
thread_manager
|
||||
.list_models(config, RefreshStrategy::OnlineIfUncached)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.filter(|preset| include_hidden || preset.show_in_picker)
|
||||
.map(model_from_preset)
|
||||
.collect()
|
||||
}
|
||||
@@ -25,6 +29,7 @@ fn model_from_preset(preset: ModelPreset) -> Model {
|
||||
upgrade: preset.upgrade.map(|upgrade| upgrade.id),
|
||||
display_name: preset.display_name.to_string(),
|
||||
description: preset.description.to_string(),
|
||||
hidden: !preset.show_in_picker,
|
||||
supported_reasoning_efforts: reasoning_efforts_from_preset(
|
||||
preset.supported_reasoning_efforts,
|
||||
),
|
||||
|
||||
@@ -33,6 +33,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(100),
|
||||
cursor: None,
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -54,6 +55,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
upgrade: None,
|
||||
display_name: "gpt-5.2-codex".to_string(),
|
||||
description: "Latest frontier agentic coding model.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -84,6 +86,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.1-codex-max".to_string(),
|
||||
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -114,6 +117,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.1-codex-mini".to_string(),
|
||||
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
@@ -138,6 +142,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -173,6 +178,38 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_includes_hidden_models() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(100),
|
||||
cursor: None,
|
||||
include_hidden: Some(true),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let ModelListResponse {
|
||||
data: items,
|
||||
next_cursor,
|
||||
} = to_response::<ModelListResponse>(response)?;
|
||||
|
||||
assert!(items.iter().any(|item| item.hidden));
|
||||
assert!(next_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_pagination_works() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -185,6 +222,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: None,
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -207,6 +245,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(next_cursor.clone()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -229,6 +268,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(third_cursor.clone()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -251,6 +291,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(fourth_cursor.clone()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -283,6 +324,7 @@ async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: None,
|
||||
cursor: Some("invalid".to_string()),
|
||||
include_hidden: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user