Compare commits

...

2 Commits

Author SHA1 Message Date
Eric Traut
8ad8b163c7 Clarify chatgpt_base_url and restore 2026-02-17 12:49:11 -08:00
Eric Traut
64e8880f72 Revert "chore(deps): bump rust-toolchain from 1.93.0 to 1.93.1 in /codex-rs (#11886)"
This reverts commit af3b1ae6cb.
2026-02-17 12:24:40 -08:00
9 changed files with 102 additions and 18 deletions

View File

@@ -33,6 +33,7 @@ use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::experimental_required_message;
use codex_core::AuthManager;
use codex_core::OPENAI_PROVIDER_ID;
use codex_core::ThreadManager;
use codex_core::auth::ExternalAuthRefreshContext;
use codex_core::auth::ExternalAuthRefreshReason;
@@ -169,10 +170,16 @@ impl MessageProcessor {
auth_manager.set_external_auth_refresher(Arc::new(ExternalAuthRefreshBridge {
outgoing: outgoing.clone(),
}));
let thread_manager = Arc::new(ThreadManager::new(
let openai_models_provider = config
.model_providers
.get(OPENAI_PROVIDER_ID)
.cloned()
.unwrap_or_else(codex_core::ModelProviderInfo::create_openai_provider);
let thread_manager = Arc::new(ThreadManager::new_with_models_provider(
config.codex_home.clone(),
auth_manager.clone(),
SessionSource::VSCode,
openai_models_provider,
));
let cloud_requirements = Arc::new(RwLock::new(cloud_requirements));
let codex_message_processor = CodexMessageProcessor::new(CodexMessageProcessorArgs {

View File

@@ -73,6 +73,7 @@ pub use model_provider_info::DEFAULT_OLLAMA_PORT;
pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
pub use model_provider_info::ModelProviderInfo;
pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID;
pub use model_provider_info::OPENAI_PROVIDER_ID;
pub use model_provider_info::WireApi;
pub use model_provider_info::built_in_model_providers;
pub use model_provider_info::create_oss_provider_with_base_url;

View File

@@ -27,6 +27,7 @@ const MAX_STREAM_MAX_RETRIES: u64 = 100;
const MAX_REQUEST_MAX_RETRIES: u64 = 100;
const OPENAI_PROVIDER_NAME: &str = "OpenAI";
pub const OPENAI_PROVIDER_ID: &str = "openai";
const CHAT_WIRE_API_REMOVED_ERROR: &str = "`wire_api = \"chat\"` is no longer supported.\nHow to fix: set `wire_api = \"responses\"` in your provider config.\nMore info: https://github.com/openai/codex/discussions/7782";
pub(crate) const LEGACY_OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat";
pub(crate) const OLLAMA_CHAT_PROVIDER_REMOVED_ERROR: &str = "`ollama-chat` is no longer supported.\nHow to fix: replace `ollama-chat` with `ollama` in `model_provider`, `oss_provider`, or `--local-provider`.\nMore info: https://github.com/openai/codex/discussions/7782";
@@ -276,7 +277,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
// open source ("oss") providers by default. Users are encouraged to add to
// `model_providers` in config.toml to add their own providers.
[
("openai", P::create_openai_provider()),
(OPENAI_PROVIDER_ID, P::create_openai_provider()),
(
OLLAMA_OSS_PROVIDER_ID,
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses),

View File

@@ -58,6 +58,19 @@ impl ModelsManager {
///
/// Uses `codex_home` to store cached model metadata and initializes with built-in presets.
pub fn new(codex_home: PathBuf, auth_manager: Arc<AuthManager>) -> Self {
Self::new_with_provider(
codex_home,
auth_manager,
ModelProviderInfo::create_openai_provider(),
)
}
/// Construct a manager with an explicit provider used for remote model refreshes.
pub fn new_with_provider(
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
provider: ModelProviderInfo,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
Self {
@@ -66,7 +79,7 @@ impl ModelsManager {
auth_manager,
etag: RwLock::new(None),
cache_manager,
provider: ModelProviderInfo::create_openai_provider(),
provider,
}
}
@@ -322,16 +335,7 @@ impl ModelsManager {
auth_manager: Arc<AuthManager>,
provider: ModelProviderInfo,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
Self {
local_models: builtin_model_presets(auth_manager.auth_mode()),
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
auth_manager,
etag: RwLock::new(None),
cache_manager,
provider,
}
Self::new_with_provider(codex_home, auth_manager, provider)
}
/// Get model identifier without consulting remote state or cache.

View File

@@ -141,6 +141,20 @@ impl ThreadManager {
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
session_source: SessionSource,
) -> Self {
Self::new_with_models_provider(
codex_home,
auth_manager,
session_source,
ModelProviderInfo::create_openai_provider(),
)
}
pub fn new_with_models_provider(
codex_home: PathBuf,
auth_manager: Arc<AuthManager>,
session_source: SessionSource,
provider: ModelProviderInfo,
) -> Self {
let (thread_created_tx, _) = broadcast::channel(THREAD_CREATED_CHANNEL_CAPACITY);
let skills_manager = Arc::new(SkillsManager::new(codex_home.clone()));
@@ -149,7 +163,11 @@ impl ThreadManager {
state: Arc::new(ThreadManagerState {
threads: Arc::new(RwLock::new(HashMap::new())),
thread_created_tx,
models_manager: Arc::new(ModelsManager::new(codex_home, auth_manager.clone())),
models_manager: Arc::new(ModelsManager::new_with_provider(
codex_home,
auth_manager.clone(),
provider,
)),
skills_manager,
file_watcher,
auth_manager,
@@ -562,11 +580,16 @@ fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> Initia
mod tests {
use super::*;
use crate::codex::make_session_and_context;
use crate::models_manager::manager::RefreshStrategy;
use assert_matches::assert_matches;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemReasoningSummary;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ModelsResponse;
use core_test_support::responses::mount_models_once;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
use wiremock::MockServer;
fn user_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
@@ -672,4 +695,31 @@ mod tests {
serde_json::to_value(&expected).unwrap()
);
}
#[tokio::test]
async fn new_with_models_provider_uses_custom_provider_for_model_refresh() {
let server = MockServer::start().await;
let models_mock = mount_models_once(&server, ModelsResponse { models: vec![] }).await;
let auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let provider = ModelProviderInfo {
base_url: Some(server.uri()),
..ModelProviderInfo::create_openai_provider()
};
let codex_home = tempdir().expect("create temp codex home");
let manager = ThreadManager::new_with_models_provider(
codex_home.path().to_path_buf(),
auth_manager,
SessionSource::Exec,
provider,
);
let _ = manager.list_models(RefreshStrategy::OnlineIfUncached).await;
assert_eq!(
models_mock.requests().len(),
1,
"expected model refresh to use custom provider base URL"
);
}
}

View File

@@ -18,6 +18,7 @@ use codex_core::AuthManager;
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::NewThread;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::OPENAI_PROVIDER_ID;
use codex_core::ThreadManager;
use codex_core::auth::enforce_login_restrictions;
use codex_core::check_execpolicy_for_warnings;
@@ -367,10 +368,16 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
true,
config.cli_auth_credentials_store_mode,
);
let thread_manager = Arc::new(ThreadManager::new(
let openai_models_provider = config
.model_providers
.get(OPENAI_PROVIDER_ID)
.cloned()
.unwrap_or_else(codex_core::ModelProviderInfo::create_openai_provider);
let thread_manager = Arc::new(ThreadManager::new_with_models_provider(
config.codex_home.clone(),
auth_manager.clone(),
SessionSource::Exec,
openai_models_provider,
));
let default_model = thread_manager
.get_models_manager()

View File

@@ -2,6 +2,7 @@ use std::collections::HashMap;
use std::path::PathBuf;
use codex_core::AuthManager;
use codex_core::OPENAI_PROVIDER_ID;
use codex_core::ThreadManager;
use codex_core::config::Config;
use codex_core::default_client::USER_AGENT_SUFFIX;
@@ -57,10 +58,16 @@ impl MessageProcessor {
false,
config.cli_auth_credentials_store_mode,
);
let thread_manager = Arc::new(ThreadManager::new(
let openai_models_provider = config
.model_providers
.get(OPENAI_PROVIDER_ID)
.cloned()
.unwrap_or_else(codex_core::ModelProviderInfo::create_openai_provider);
let thread_manager = Arc::new(ThreadManager::new_with_models_provider(
config.codex_home.clone(),
auth_manager,
SessionSource::Mcp,
openai_models_provider,
));
Self {
outgoing,

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "1.93.1"
channel = "1.93.0"
components = ["clippy", "rustfmt", "rust-src"]

View File

@@ -34,6 +34,7 @@ use codex_ansi_escape::ansi_escape_line;
use codex_app_server_protocol::ConfigLayerSource;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::OPENAI_PROVIDER_ID;
use codex_core::ThreadManager;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
@@ -1022,10 +1023,16 @@ impl App {
let harness_overrides =
normalize_harness_overrides_for_cwd(harness_overrides, &config.cwd)?;
let thread_manager = Arc::new(ThreadManager::new(
let openai_models_provider = config
.model_providers
.get(OPENAI_PROVIDER_ID)
.cloned()
.unwrap_or_else(codex_core::ModelProviderInfo::create_openai_provider);
let thread_manager = Arc::new(ThreadManager::new_with_models_provider(
config.codex_home.clone(),
auth_manager.clone(),
SessionSource::Cli,
openai_models_provider,
));
let mut model = thread_manager
.get_models_manager()