get plan tool by default

This commit is contained in:
easong-openai
2025-08-01 19:52:34 -07:00
parent 18619dbbc1
commit 658f2d677f
5 changed files with 57 additions and 16 deletions

View File

@@ -243,15 +243,19 @@ By default, `reasoning` is only set on requests to OpenAI models that are known
model_supports_reasoning_summaries = true
```
## include_plan_tool
## experimental_include_plan_tool
Controls whether to expose the experimental plan tool (named `update_plan`) to the model and include the corresponding guidance in the system prompt.
Default behavior:
- For known models (anything hardcoded in the models list), this is disabled by default.
- For unknown models (not in the known list), this is enabled by default so new models get the feature without a CLI update.
When enabled, the model can call `update_plan` to keep an up-to-date, step-by-step plan for the task and Codex will render plan updates in the UI. When disabled, the tool is not advertised to the model and the “Plan updates” section is omitted from the prompt; any unsolicited `update_plan` calls will be treated as unsupported.
```toml
# Enable the experimental plan tool and prompt instructions
include_plan_tool = true
experimental_include_plan_tool = true
```
## sandbox_mode

View File

@@ -37,7 +37,11 @@ pub(crate) async fn stream_chat_completions(
// Build messages array
let mut messages = Vec::<serde_json::Value>::new();
let full_instructions = prompt.get_full_instructions(model, include_plan_tool);
let instr_cfg = crate::client_common::InstructionsConfig::for_model(
model,
include_plan_tool,
);
let full_instructions = prompt.get_full_instructions(&instr_cfg);
messages.push(json!({"role": "system", "content": full_instructions}));
if let Some(instr) = &prompt.user_instructions {

View File

@@ -141,8 +141,11 @@ impl ModelClient {
let token = auth.get_token().await?;
let full_instructions =
prompt.get_full_instructions(&self.config.model, self.config.include_plan_tool);
let instr_cfg = crate::client_common::InstructionsConfig::for_model(
&self.config.model,
self.config.include_plan_tool,
);
let full_instructions = prompt.get_full_instructions(&instr_cfg);
let tools_json = create_tools_json_for_responses_api(
prompt,
&self.config.model,

View File

@@ -37,19 +37,35 @@ pub struct Prompt {
pub base_instructions_override: Option<String>,
}
/// Options that influence how the full instructions are composed for a request.
#[derive(Debug, Default, Clone)]
pub struct InstructionsConfig {
pub include_plan_tool: bool,
pub extra_sections: Vec<&'static str>,
}
impl InstructionsConfig {
pub fn for_model(model: &str, include_plan_tool: bool) -> Self {
let mut extra_sections = Vec::new();
if model.starts_with("gpt-4.1") {
extra_sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
}
Self {
include_plan_tool,
extra_sections,
}
}
}
impl Prompt {
pub(crate) fn get_full_instructions(
&self,
model: &str,
include_plan_tool: bool,
) -> Cow<'_, str> {
pub(crate) fn get_full_instructions(&self, cfg: &InstructionsConfig) -> Cow<'_, str> {
let mut base = self
.base_instructions_override
.as_deref()
.unwrap_or(BASE_INSTRUCTIONS)
.to_string();
if !include_plan_tool {
if !cfg.include_plan_tool {
// Remove the plan-tool section if present. Prefer explicit markers
// for robustness, but fall back to trimming from the "Plan updates"
// heading if markers are missing.
@@ -73,8 +89,8 @@ impl Prompt {
}
let mut sections: Vec<&str> = vec![&base];
if model.starts_with("gpt-4.1") {
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
for s in &cfg.extra_sections {
sections.push(s);
}
Cow::Owned(sections.join("\n"))
}
@@ -226,7 +242,18 @@ mod tests {
..Default::default()
};
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");
let full = prompt.get_full_instructions("gpt-4.1", true);
let cfg = InstructionsConfig::for_model("gpt-4.1", true);
let full = prompt.get_full_instructions(&cfg);
assert_eq!(full, expected);
}
#[test]
fn plan_section_removed_when_disabled() {
let prompt = Prompt::default();
let cfg = InstructionsConfig::for_model("gpt-4.1", false);
let full = prompt.get_full_instructions(&cfg);
assert!(!full.contains("Plan updates"));
assert!(!full.contains("update_plan"));
assert!(full.contains(APPLY_PATCH_TOOL_INSTRUCTIONS));
}
}

View File

@@ -344,7 +344,8 @@ pub struct ConfigToml {
pub internal_originator: Option<String>,
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
pub include_plan_tool: Option<bool>,
/// This is experimental and may be removed in the future.
pub experimental_include_plan_tool: Option<bool>,
}
impl ConfigToml {
@@ -538,7 +539,9 @@ impl Config {
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
experimental_resume,
include_plan_tool: include_plan_tool.or(cfg.include_plan_tool).unwrap_or(false),
include_plan_tool: include_plan_tool
.or(cfg.experimental_include_plan_tool)
.unwrap_or_else(|| openai_model_info.is_none()),
internal_originator: cfg.internal_originator,
};
Ok(config)