mirror of
https://github.com/openai/codex.git
synced 2026-04-30 19:32:04 +03:00
257 lines
10 KiB
Rust
257 lines
10 KiB
Rust
use codex_protocol::config_types::ReasoningEffort;
|
|
use codex_protocol::config_types::Verbosity;
|
|
|
|
use crate::config::types::ReasoningSummaryFormat;
|
|
use crate::tools::handlers::apply_patch::ApplyPatchToolType;
|
|
use crate::tools::spec::ConfigShellToolType;
|
|
use crate::truncate::TruncationPolicy;
|
|
|
|
/// The `instructions` field in the payload sent to a model should always start
|
|
/// with this content.
|
|
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
|
|
|
|
const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md");
|
|
const GPT_5_1_INSTRUCTIONS: &str = include_str!("../gpt_5_1_prompt.md");
|
|
const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../gpt-5.1-codex-max_prompt.md");
|
|
|
|
/// A model family is a group of models that share certain characteristics.
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
|
pub struct ModelFamily {
|
|
/// The full model slug used to derive this model family, e.g.
|
|
/// "gpt-4.1-2025-04-14".
|
|
pub slug: String,
|
|
|
|
/// The model family name, e.g. "gpt-4.1". Note this should able to be used
|
|
/// with [`crate::openai_model_info::get_model_info`].
|
|
pub family: String,
|
|
|
|
/// True if the model needs additional instructions on how to use the
|
|
/// "virtual" `apply_patch` CLI.
|
|
pub needs_special_apply_patch_instructions: bool,
|
|
|
|
// Whether the `reasoning` field can be set when making a request to this
|
|
// model family. Note it has `effort` and `summary` subfields (though
|
|
// `summary` is optional).
|
|
pub supports_reasoning_summaries: bool,
|
|
|
|
// The reasoning effort to use for this model family when none is explicitly chosen.
|
|
pub default_reasoning_effort: Option<ReasoningEffort>,
|
|
|
|
// Define if we need a special handling of reasoning summary
|
|
pub reasoning_summary_format: ReasoningSummaryFormat,
|
|
|
|
/// Whether this model supports parallel tool calls when using the
|
|
/// Responses API.
|
|
pub supports_parallel_tool_calls: bool,
|
|
|
|
/// Present if the model performs better when `apply_patch` is provided as
|
|
/// a tool call instead of just a bash command
|
|
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
|
|
|
// Instructions to use for querying the model
|
|
pub base_instructions: String,
|
|
|
|
/// Names of beta tools that should be exposed to this model family.
|
|
pub experimental_supported_tools: Vec<String>,
|
|
|
|
/// Percentage of the context window considered usable for inputs, after
|
|
/// reserving headroom for system prompts, tool overhead, and model output.
|
|
/// This is applied when computing the effective context window seen by
|
|
/// consumers.
|
|
pub effective_context_window_percent: i64,
|
|
|
|
/// If the model family supports setting the verbosity level when using Responses API.
|
|
pub support_verbosity: bool,
|
|
|
|
// The default verbosity level for this model family when using Responses API.
|
|
pub default_verbosity: Option<Verbosity>,
|
|
|
|
/// Preferred shell tool type for this model family when features do not override it.
|
|
pub shell_type: ConfigShellToolType,
|
|
|
|
pub truncation_policy: TruncationPolicy,
|
|
}
|
|
|
|
macro_rules! model_family {
|
|
(
|
|
$slug:expr, $family:expr $(, $key:ident : $value:expr )* $(,)?
|
|
) => {{
|
|
let truncation_policy = TruncationPolicy::Bytes(10_000);
|
|
// defaults
|
|
#[allow(unused_mut)]
|
|
let mut mf = ModelFamily {
|
|
slug: $slug.to_string(),
|
|
family: $family.to_string(),
|
|
needs_special_apply_patch_instructions: false,
|
|
supports_reasoning_summaries: false,
|
|
reasoning_summary_format: ReasoningSummaryFormat::None,
|
|
supports_parallel_tool_calls: false,
|
|
apply_patch_tool_type: None,
|
|
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
|
experimental_supported_tools: Vec::new(),
|
|
effective_context_window_percent: 95,
|
|
support_verbosity: false,
|
|
shell_type: ConfigShellToolType::Default(truncation_policy),
|
|
default_verbosity: None,
|
|
default_reasoning_effort: None,
|
|
truncation_policy,
|
|
};
|
|
|
|
// apply overrides
|
|
$(
|
|
mf.$key = $value;
|
|
)*
|
|
Some(mf)
|
|
}};
|
|
}
|
|
|
|
/// Returns a `ModelFamily` for the given model slug, or `None` if the slug
|
|
/// does not match any known model family.
|
|
pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
|
if slug.starts_with("o3") {
|
|
model_family!(
|
|
slug, "o3",
|
|
supports_reasoning_summaries: true,
|
|
needs_special_apply_patch_instructions: true,
|
|
)
|
|
} else if slug.starts_with("o4-mini") {
|
|
model_family!(
|
|
slug, "o4-mini",
|
|
supports_reasoning_summaries: true,
|
|
needs_special_apply_patch_instructions: true,
|
|
)
|
|
} else if slug.starts_with("codex-mini-latest") {
|
|
model_family!(
|
|
slug, "codex-mini-latest",
|
|
supports_reasoning_summaries: true,
|
|
needs_special_apply_patch_instructions: true,
|
|
shell_type: ConfigShellToolType::Local,
|
|
)
|
|
} else if slug.starts_with("gpt-4.1") {
|
|
model_family!(
|
|
slug, "gpt-4.1",
|
|
needs_special_apply_patch_instructions: true,
|
|
)
|
|
} else if slug.starts_with("gpt-oss") || slug.starts_with("openai/gpt-oss") {
|
|
model_family!(slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function))
|
|
} else if slug.starts_with("gpt-4o") {
|
|
model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true)
|
|
} else if slug.starts_with("gpt-3.5") {
|
|
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
|
|
} else if slug.starts_with("test-gpt-5") {
|
|
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
|
model_family!(
|
|
slug, slug,
|
|
supports_reasoning_summaries: true,
|
|
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
|
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
|
experimental_supported_tools: vec![
|
|
"grep_files".to_string(),
|
|
"list_dir".to_string(),
|
|
"read_file".to_string(),
|
|
"test_sync_tool".to_string(),
|
|
],
|
|
supports_parallel_tool_calls: true,
|
|
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
|
support_verbosity: true,
|
|
)
|
|
|
|
// Internal models.
|
|
} else if slug.starts_with("codex-exp-") {
|
|
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
|
model_family!(
|
|
slug, slug,
|
|
supports_reasoning_summaries: true,
|
|
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
|
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
|
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
|
experimental_supported_tools: vec![
|
|
"grep_files".to_string(),
|
|
"list_dir".to_string(),
|
|
"read_file".to_string(),
|
|
],
|
|
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
|
supports_parallel_tool_calls: true,
|
|
support_verbosity: true,
|
|
truncation_policy: truncation_policy,
|
|
)
|
|
|
|
// Production models.
|
|
} else if slug.starts_with("gpt-5.1-codex-max") {
|
|
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
|
model_family!(
|
|
slug, slug,
|
|
supports_reasoning_summaries: true,
|
|
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
|
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
|
|
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
|
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
|
supports_parallel_tool_calls: true,
|
|
support_verbosity: false,
|
|
truncation_policy: truncation_policy,
|
|
)
|
|
} else if slug.starts_with("gpt-5-codex")
|
|
|| slug.starts_with("gpt-5.1-codex")
|
|
|| slug.starts_with("codex-")
|
|
{
|
|
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
|
model_family!(
|
|
slug, slug,
|
|
supports_reasoning_summaries: true,
|
|
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
|
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
|
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
|
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
|
supports_parallel_tool_calls: true,
|
|
support_verbosity: false,
|
|
truncation_policy: truncation_policy,
|
|
)
|
|
} else if slug.starts_with("gpt-5.1") {
|
|
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
|
model_family!(
|
|
slug, "gpt-5.1",
|
|
supports_reasoning_summaries: true,
|
|
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
|
support_verbosity: true,
|
|
default_verbosity: Some(Verbosity::Low),
|
|
base_instructions: GPT_5_1_INSTRUCTIONS.to_string(),
|
|
default_reasoning_effort: Some(ReasoningEffort::Medium),
|
|
truncation_policy: TruncationPolicy::Bytes(10_000),
|
|
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
|
supports_parallel_tool_calls: true,
|
|
)
|
|
} else if slug.starts_with("gpt-5") {
|
|
model_family!(
|
|
slug, "gpt-5",
|
|
supports_reasoning_summaries: true,
|
|
needs_special_apply_patch_instructions: true,
|
|
shell_type: ConfigShellToolType::Default(TruncationPolicy::Bytes(10_000)),
|
|
support_verbosity: true,
|
|
truncation_policy: TruncationPolicy::Bytes(10_000),
|
|
)
|
|
} else {
|
|
None
|
|
}
|
|
}
|
|
|
|
pub fn derive_default_model_family(model: &str) -> ModelFamily {
|
|
let truncation_policy = TruncationPolicy::Bytes(10_000);
|
|
ModelFamily {
|
|
slug: model.to_string(),
|
|
family: model.to_string(),
|
|
needs_special_apply_patch_instructions: false,
|
|
supports_reasoning_summaries: false,
|
|
reasoning_summary_format: ReasoningSummaryFormat::None,
|
|
supports_parallel_tool_calls: false,
|
|
apply_patch_tool_type: None,
|
|
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
|
experimental_supported_tools: Vec::new(),
|
|
effective_context_window_percent: 95,
|
|
support_verbosity: false,
|
|
shell_type: ConfigShellToolType::Default(truncation_policy),
|
|
default_verbosity: None,
|
|
default_reasoning_effort: None,
|
|
truncation_policy,
|
|
}
|
|
}
|