mirror of
https://github.com/openai/codex.git
synced 2026-04-28 10:21:06 +03:00
11 KiB
11 KiB
PR #1969: Adjust error messages
- URL: https://github.com/openai/codex/pull/1969
- Author: pakrym-oai
- Created: 2025-08-07 22:39:28 UTC
- Updated: 2025-08-08 01:24:45 UTC
- Changes: +98/-17, Files changed: 5, Commits: 5
Description
Full Diff
diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs
index 34aecad17a..0caf1170a6 100644
--- a/codex-rs/core/src/client.rs
+++ b/codex-rs/core/src/client.rs
@@ -31,6 +31,7 @@ use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
use crate::error::Result;
+use crate::error::UsageLimitReachedError;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
@@ -195,7 +196,7 @@ impl ModelClient {
if let Some(auth) = auth.as_ref()
&& auth.mode == AuthMode::ChatGPT
- && let Some(account_id) = auth.get_account_id().await
+ && let Some(account_id) = auth.get_account_id()
{
req_builder = req_builder.header("chatgpt-account-id", account_id);
}
@@ -263,7 +264,9 @@ impl ModelClient {
}) = body
{
if r#type == "usage_limit_reached" {
- return Err(CodexErr::UsageLimitReached);
+ return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
+ plan_type: auth.and_then(|a| a.get_plan_type()),
+ }));
} else if r#type == "usage_not_included" {
return Err(CodexErr::UsageNotIncluded);
}
diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs
index aaef73ded9..385361e8ff 100644
--- a/codex-rs/core/src/codex.rs
+++ b/codex-rs/core/src/codex.rs
@@ -1290,7 +1290,9 @@ async fn run_turn(
Ok(output) => return Ok(output),
Err(CodexErr::Interrupted) => return Err(CodexErr::Interrupted),
Err(CodexErr::EnvVar(var)) => return Err(CodexErr::EnvVar(var)),
- Err(e @ (CodexErr::UsageLimitReached | CodexErr::UsageNotIncluded)) => return Err(e),
+ Err(e @ (CodexErr::UsageLimitReached(_) | CodexErr::UsageNotIncluded)) => {
+ return Err(e);
+ }
Err(e) => {
// Use the configured provider-specific stream retry budget.
let max_retries = sess.client.get_provider().stream_max_retries();
diff --git a/codex-rs/core/src/error.rs b/codex-rs/core/src/error.rs
index f6394b71ce..7d6dc2cc8d 100644
--- a/codex-rs/core/src/error.rs
+++ b/codex-rs/core/src/error.rs
@@ -62,14 +62,16 @@ pub enum CodexErr {
#[error("unexpected status {0}: {1}")]
UnexpectedStatus(StatusCode, String),
- #[error("Usage limit has been reached")]
- UsageLimitReached,
+ #[error("{0}")]
+ UsageLimitReached(UsageLimitReachedError),
- #[error("Usage not included with the plan")]
+ #[error(
+ "To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
+ )]
UsageNotIncluded,
#[error(
- "We’re currently experiencing high demand, which may cause temporary errors. We’re adding capacity in East and West Europe to restore normal service."
+ "We're currently experiencing high demand, which may cause temporary errors. We’re adding capacity in East and West Europe to restore normal service."
)]
InternalServerError,
@@ -115,6 +117,30 @@ pub enum CodexErr {
EnvVar(EnvVarError),
}
+#[derive(Debug)]
+pub struct UsageLimitReachedError {
+ pub plan_type: Option<String>,
+}
+
+impl std::fmt::Display for UsageLimitReachedError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if let Some(plan_type) = &self.plan_type
+ && plan_type == "plus"
+ {
+ write!(
+ f,
+ "You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), or wait for limits to reset (every 5h and every week.)."
+ )?;
+ } else {
+ write!(
+ f,
+ "You've hit usage your usage limit. Limits reset every 5h and every week."
+ )?;
+ }
+ Ok(())
+ }
+}
+
#[derive(Debug)]
pub struct EnvVarError {
/// Name of the environment variable that is missing.
@@ -150,3 +176,39 @@ pub fn get_error_message_ui(e: &CodexErr) -> String {
_ => e.to_string(),
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn usage_limit_reached_error_formats_plus_plan() {
+ let err = UsageLimitReachedError {
+ plan_type: Some("plus".to_string()),
+ };
+ assert_eq!(
+ err.to_string(),
+ "You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), or wait for limits to reset (every 5h and every week.)."
+ );
+ }
+
+ #[test]
+ fn usage_limit_reached_error_formats_default_when_none() {
+ let err = UsageLimitReachedError { plan_type: None };
+ assert_eq!(
+ err.to_string(),
+ "You've hit usage your usage limit. Limits reset every 5h and every week."
+ );
+ }
+
+ #[test]
+ fn usage_limit_reached_error_formats_default_for_other_plans() {
+ let err = UsageLimitReachedError {
+ plan_type: Some("pro".to_string()),
+ };
+ assert_eq!(
+ err.to_string(),
+ "You've hit usage your usage limit. Limits reset every 5h and every week."
+ );
+ }
+}
diff --git a/codex-rs/login/src/lib.rs b/codex-rs/login/src/lib.rs
index 7e693ccdf8..2a8f6749b4 100644
--- a/codex-rs/login/src/lib.rs
+++ b/codex-rs/login/src/lib.rs
@@ -68,8 +68,7 @@ impl CodexAuth {
}
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
- #[expect(clippy::unwrap_used)]
- let auth_dot_json = self.auth_dot_json.lock().unwrap().clone();
+ let auth_dot_json: Option<AuthDotJson> = self.get_current_auth_json();
match auth_dot_json {
Some(AuthDotJson {
tokens: Some(mut tokens),
@@ -124,15 +123,23 @@ impl CodexAuth {
}
}
- pub async fn get_account_id(&self) -> Option<String> {
- match self.mode {
- AuthMode::ApiKey => None,
- AuthMode::ChatGPT => {
- let token_data = self.get_token_data().await.ok()?;
+ pub fn get_account_id(&self) -> Option<String> {
+ self.get_current_token_data()
+ .and_then(|t| t.account_id.clone())
+ }
- token_data.account_id.clone()
- }
- }
+ pub fn get_plan_type(&self) -> Option<String> {
+ self.get_current_token_data()
+ .and_then(|t| t.id_token.chatgpt_plan_type.as_ref().map(|p| p.as_string()))
+ }
+
+ fn get_current_auth_json(&self) -> Option<AuthDotJson> {
+ #[expect(clippy::unwrap_used)]
+ self.auth_dot_json.lock().unwrap().clone()
+ }
+
+ fn get_current_token_data(&self) -> Option<TokenData> {
+ self.get_current_auth_json().and_then(|t| t.tokens.clone())
}
/// Consider this private to integration tests.
diff --git a/codex-rs/login/src/token_data.rs b/codex-rs/login/src/token_data.rs
index 86ddaf5819..fb4d83950f 100644
--- a/codex-rs/login/src/token_data.rs
+++ b/codex-rs/login/src/token_data.rs
@@ -67,6 +67,13 @@ impl PlanType {
}
}
}
+
+ pub fn as_string(&self) -> String {
+ match self {
+ Self::Known(known) => format!("{known:?}").to_lowercase(),
+ Self::Unknown(s) => s.clone(),
+ }
+ }
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
Review Comments
codex-rs/core/src/error.rs
- Created: 2025-08-07 22:55:45 UTC | Link: https://github.com/openai/codex/pull/1969#discussion_r2261604888
@@ -115,6 +117,30 @@ pub enum CodexErr {
EnvVar(EnvVarError),
}
+#[derive(Debug)]
+pub struct UsageLimitReachedError {
+ pub plan_type: Option<String>,
+}
+
+impl std::fmt::Display for UsageLimitReachedError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if let Some(plan_type) = &self.plan_type
+ && plan_type == "plus"
+ {
+ write!(
+ f,
+ "You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), or wait for limits to reset (every 5h and every week)"
Should we put a period at the end of this message and also the one in the
elsecase? Feels a little weird for there to be one sentence with one and one without?
codex-rs/login/src/lib.rs
- Created: 2025-08-07 22:57:01 UTC | Link: https://github.com/openai/codex/pull/1969#discussion_r2261606060
@@ -132,15 +131,23 @@ impl CodexAuth {
}
}
- pub async fn get_account_id(&self) -> Option<String> {
- match self.mode {
- AuthMode::ApiKey => None,
- AuthMode::ChatGPT => {
- let token_data = self.get_token_data().await.ok()?;
+ pub fn get_account_id(&self) -> Option<String> {
+ self.get_current_token_data()
+ .and_then(|t| t.account_id.clone())
+ }
- token_data.account_id.clone()
- }
- }
+ pub fn get_plan_type(&self) -> Option<String> {
+ self.get_current_token_data()
+ .and_then(|t| t.id_token.chatgpt_plan_type.clone())
+ }
+
+ fn get_current_auth_json(&self) -> Option<AuthDotJson> {
+ #[expect(clippy::unwrap_used)]
+ self.auth_dot_json.lock().unwrap().clone()
+ }
I was in the middle of trying to make
AuthDotJsonprivate to the login crate. Do we need to open this up?
- Created: 2025-08-08 01:23:09 UTC | Link: https://github.com/openai/codex/pull/1969#discussion_r2261742763
@@ -132,15 +131,23 @@ impl CodexAuth {
}
}
- pub async fn get_account_id(&self) -> Option<String> {
- match self.mode {
- AuthMode::ApiKey => None,
- AuthMode::ChatGPT => {
- let token_data = self.get_token_data().await.ok()?;
+ pub fn get_account_id(&self) -> Option<String> {
+ self.get_current_token_data()
+ .and_then(|t| t.account_id.clone())
+ }
- token_data.account_id.clone()
- }
- }
+ pub fn get_plan_type(&self) -> Option<String> {
+ self.get_current_token_data()
+ .and_then(|t| t.id_token.chatgpt_plan_type.clone())
+ }
+
+ fn get_current_auth_json(&self) -> Option<AuthDotJson> {
+ #[expect(clippy::unwrap_used)]
+ self.auth_dot_json.lock().unwrap().clone()
+ }
Oh, I see this is private, just the ones above are
pub.