Compare commits

..

3 Commits

Author SHA1 Message Date
dedrisian-oai
488ec061bf Release 0.44.0 2025-10-03 09:43:33 -07:00
dedrisian-oai
16b6951648 Nit: Pop model effort picker on esc (#4642)
Pops the effort picker instead of dismissing the whole thing (on
escape).



https://github.com/user-attachments/assets/cef32291-cd07-4ac7-be8f-ce62b38145f9
2025-10-02 21:07:47 -07:00
dedrisian-oai
231c36f8d3 Move gpt-5-codex to top (#4641)
In /model picker
2025-10-03 03:34:58 +00:00
5 changed files with 40 additions and 13 deletions

View File

@@ -36,7 +36,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.0.0"
version = "0.44.0"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024

View File

@@ -83,7 +83,7 @@ pub enum CodexErr {
UsageLimitReached(UsageLimitReachedError),
#[error(
"To use Codex with your ChatGPT plan, [upgrade to Plus](https://openai.com/chatgpt/pricing)."
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
)]
UsageNotIncluded,
@@ -189,7 +189,7 @@ impl std::fmt::Display for UsageLimitReachedError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let message = match self.plan_type.as_ref() {
Some(PlanType::Known(KnownPlan::Plus)) => format!(
"You've hit your usage limit. [Upgrade to Pro](https://openai.com/chatgpt/pricing){}",
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing){}",
retry_suffix_after_or(self.resets_in_seconds)
),
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
@@ -199,7 +199,7 @@ impl std::fmt::Display for UsageLimitReachedError {
)
}
Some(PlanType::Known(KnownPlan::Free)) => {
"You've hit your usage limit. [Upgrade to Plus](https://openai.com/chatgpt/pricing) to continue using Codex."
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
.to_string()
}
Some(PlanType::Known(KnownPlan::Pro))
@@ -336,7 +336,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. [Upgrade to Pro](https://openai.com/chatgpt/pricing) or try again later."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again later."
);
}
@@ -349,7 +349,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. [Upgrade to Plus](https://openai.com/chatgpt/pricing) to continue using Codex."
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
);
}
@@ -427,7 +427,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. [Upgrade to Pro](https://openai.com/chatgpt/pricing) or try again in 3 hours 32 minutes."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again in 3 hours 32 minutes."
);
}

View File

@@ -1,4 +1,3 @@
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::path::PathBuf;
@@ -1598,9 +1597,14 @@ impl ChatWidget {
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let presets: Vec<ModelPreset> = builtin_model_presets(auth_mode);
let mut grouped: BTreeMap<&str, Vec<ModelPreset>> = BTreeMap::new();
let mut grouped: Vec<(&str, Vec<ModelPreset>)> = Vec::new();
for preset in presets.into_iter() {
grouped.entry(preset.model).or_default().push(preset);
if let Some((_, entries)) = grouped.iter_mut().find(|(model, _)| *model == preset.model)
{
entries.push(preset);
} else {
grouped.push((preset.model, vec![preset]));
}
}
let mut items: Vec<SelectionItem> = Vec::new();
@@ -1629,7 +1633,7 @@ impl ChatWidget {
description,
is_current,
actions,
dismiss_on_select: true,
dismiss_on_select: false,
..Default::default()
});
}

View File

@@ -5,8 +5,8 @@ expression: popup
Select Model
Switch the model for this and future Codex CLI sessions
1. gpt-5 Broad world knowledge with strong general
1. gpt-5-codex (current) Optimized for coding tasks with many tools.
2. gpt-5 Broad world knowledge with strong general
reasoning.
2. gpt-5-codex (current) Optimized for coding tasks with many tools.
Press enter to confirm or esc to go back

View File

@@ -1026,6 +1026,29 @@ fn model_reasoning_selection_popup_snapshot() {
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn reasoning_popup_escape_returns_to_model_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.open_model_popup();
let presets = builtin_model_presets(None)
.into_iter()
.filter(|preset| preset.model == "gpt-5-codex")
.collect::<Vec<_>>();
chat.open_reasoning_popup("gpt-5-codex".to_string(), presets);
let before_escape = render_bottom_popup(&chat, 80);
assert!(before_escape.contains("Select Reasoning Level"));
chat.handle_key_event(KeyEvent::new(KeyCode::Esc, KeyModifiers::NONE));
let after_escape = render_bottom_popup(&chat, 80);
assert!(after_escape.contains("Select Model"));
assert!(!after_escape.contains("Select Reasoning Level"));
}
#[test]
fn exec_history_extends_previous_when_consecutive() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();