mirror of
https://github.com/openai/codex.git
synced 2026-04-28 02:11:08 +03:00
adding helper to prevent caching
This commit is contained in:
@@ -21,8 +21,8 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::client_common::ResponseStream;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::flags::OPENAI_REQUEST_MAX_RETRIES;
|
||||
use crate::flags::OPENAI_STREAM_IDLE_TIMEOUT_MS;
|
||||
use crate::flags::openai_request_max_retries;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::create_tools_json_for_chat_completions_api;
|
||||
@@ -146,7 +146,7 @@ pub(crate) async fn stream_chat_completions(
|
||||
return Err(CodexErr::UnexpectedStatus(status, body));
|
||||
}
|
||||
|
||||
if attempt > *OPENAI_REQUEST_MAX_RETRIES {
|
||||
if attempt > openai_request_max_retries() {
|
||||
return Err(CodexErr::RetryLimit(status));
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ pub(crate) async fn stream_chat_completions(
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
Err(e) => {
|
||||
if attempt > *OPENAI_REQUEST_MAX_RETRIES {
|
||||
if attempt > openai_request_max_retries() {
|
||||
return Err(e.into());
|
||||
}
|
||||
let delay = backoff(attempt);
|
||||
|
||||
@@ -29,8 +29,8 @@ use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::flags::OPENAI_REQUEST_MAX_RETRIES;
|
||||
use crate::flags::OPENAI_STREAM_IDLE_TIMEOUT_MS;
|
||||
use crate::flags::openai_request_max_retries;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::models::ResponseItem;
|
||||
@@ -171,7 +171,7 @@ impl ModelClient {
|
||||
return Err(CodexErr::UnexpectedStatus(status, body));
|
||||
}
|
||||
|
||||
if attempt > *OPENAI_REQUEST_MAX_RETRIES {
|
||||
if attempt > openai_request_max_retries() {
|
||||
return Err(CodexErr::RetryLimit(status));
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ impl ModelClient {
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
Err(e) => {
|
||||
if attempt > *OPENAI_REQUEST_MAX_RETRIES {
|
||||
if attempt > openai_request_max_retries() {
|
||||
return Err(e.into());
|
||||
}
|
||||
let delay = backoff(attempt);
|
||||
|
||||
@@ -23,3 +23,25 @@ env_flags! {
|
||||
/// Fixture path for offline tests (see client.rs).
|
||||
pub CODEX_RS_SSE_FIXTURE: Option<&str> = None;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Test-friendly runtime override helpers
|
||||
// -----------------------------------------------------------------------------
|
||||
/// Return the effective retry budget for outbound OpenAI requests.
|
||||
///
|
||||
/// The `env_flags!` macro above initialises its values lazily and caches
|
||||
/// them for the remainder of the process. A number of our unit tests tweak
|
||||
/// `OPENAI_REQUEST_MAX_RETRIES` *at runtime* (e.g. set to 0/1) to exercise the
|
||||
/// retry/back‑off logic deterministically. When another test touches the flag
|
||||
/// first, the cached value "sticks" and later tests silently inherit it,
|
||||
/// leading to surprising flakes (see #???).
|
||||
///
|
||||
/// To make the behaviour deterministic we re‑read the raw environment variable
|
||||
/// on every call and fall back to the cached default when unset or invalid.
|
||||
#[inline]
|
||||
pub fn openai_request_max_retries() -> u64 {
|
||||
match std::env::var("OPENAI_REQUEST_MAX_RETRIES") {
|
||||
Ok(s) => s.parse::<u64>().unwrap_or(*OPENAI_REQUEST_MAX_RETRIES),
|
||||
Err(_) => *OPENAI_REQUEST_MAX_RETRIES,
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user