mirror of
https://github.com/openai/codex.git
synced 2026-03-18 11:56:35 +03:00
Compare commits
8 Commits
pr14989
...
codex/spli
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2bb42bc018 | ||
|
|
e02add131a | ||
|
|
e679f59205 | ||
|
|
80bfeea4c9 | ||
|
|
1cf68f940c | ||
|
|
0f406c3de0 | ||
|
|
8b3fc35e0b | ||
|
|
38a28973a8 |
15
.github/workflows/rust-ci.yml
vendored
15
.github/workflows/rust-ci.yml
vendored
@@ -86,8 +86,7 @@ jobs:
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: cargo-shear
|
||||
version: 1.5.1
|
||||
tool: cargo-shear@1.5.1
|
||||
- name: cargo shear
|
||||
run: cargo shear
|
||||
|
||||
@@ -291,8 +290,7 @@ jobs:
|
||||
if: ${{ env.USE_SCCACHE == 'true' }}
|
||||
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: sccache
|
||||
version: 0.7.5
|
||||
tool: sccache@0.7.5
|
||||
|
||||
- name: Configure sccache backend
|
||||
if: ${{ env.USE_SCCACHE == 'true' }}
|
||||
@@ -421,8 +419,7 @@ jobs:
|
||||
if: ${{ matrix.profile == 'release' }}
|
||||
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: cargo-chef
|
||||
version: 0.1.71
|
||||
tool: cargo-chef@0.1.71
|
||||
|
||||
- name: Pre-warm dependency cache (cargo-chef)
|
||||
if: ${{ matrix.profile == 'release' }}
|
||||
@@ -593,8 +590,7 @@ jobs:
|
||||
if: ${{ env.USE_SCCACHE == 'true' }}
|
||||
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: sccache
|
||||
version: 0.7.5
|
||||
tool: sccache@0.7.5
|
||||
|
||||
- name: Configure sccache backend
|
||||
if: ${{ env.USE_SCCACHE == 'true' }}
|
||||
@@ -628,8 +624,7 @@ jobs:
|
||||
|
||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: nextest
|
||||
version: 0.9.103
|
||||
tool: nextest@0.9.103
|
||||
|
||||
- name: Enable unprivileged user namespaces (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
29
codex-rs/Cargo.lock
generated
29
codex-rs/Cargo.lock
generated
@@ -1786,10 +1786,12 @@ name = "codex-config"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64 0.22.1",
|
||||
"codex-app-server-protocol",
|
||||
"codex-execpolicy",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"core-foundation 0.9.4",
|
||||
"futures",
|
||||
"multimap",
|
||||
"pretty_assertions",
|
||||
@@ -1802,6 +1804,7 @@ dependencies = [
|
||||
"toml 0.9.11+spec-1.1.0",
|
||||
"toml_edit 0.24.0+spec-1.1.0",
|
||||
"tracing",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1846,6 +1849,7 @@ dependencies = [
|
||||
"codex-git",
|
||||
"codex-hooks",
|
||||
"codex-keyring-store",
|
||||
"codex-models",
|
||||
"codex-network-proxy",
|
||||
"codex-otel",
|
||||
"codex-protocol",
|
||||
@@ -1866,7 +1870,6 @@ dependencies = [
|
||||
"codex-utils-stream-parser",
|
||||
"codex-utils-string",
|
||||
"codex-windows-sandbox",
|
||||
"core-foundation 0.9.4",
|
||||
"core_test_support",
|
||||
"csv",
|
||||
"ctor 0.6.3",
|
||||
@@ -1926,7 +1929,6 @@ dependencies = [
|
||||
"walkdir",
|
||||
"which",
|
||||
"wildmatch",
|
||||
"windows-sys 0.52.0",
|
||||
"wiremock",
|
||||
"zip",
|
||||
"zstd",
|
||||
@@ -2193,6 +2195,29 @@ dependencies = [
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-models"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"codex-api",
|
||||
"codex-protocol",
|
||||
"http 1.4.0",
|
||||
"maplit",
|
||||
"pretty_assertions",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"toml 0.9.11+spec-1.1.0",
|
||||
"tracing",
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-network-proxy"
|
||||
version = "0.0.0"
|
||||
|
||||
@@ -32,6 +32,7 @@ members = [
|
||||
"linux-sandbox",
|
||||
"lmstudio",
|
||||
"login",
|
||||
"models",
|
||||
"mcp-server",
|
||||
"network-proxy",
|
||||
"ollama",
|
||||
@@ -114,6 +115,7 @@ codex-keyring-store = { path = "keyring-store" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-lmstudio = { path = "lmstudio" }
|
||||
codex-login = { path = "login" }
|
||||
codex-models = { path = "models" }
|
||||
codex-mcp-server = { path = "mcp-server" }
|
||||
codex-network-proxy = { path = "network-proxy" }
|
||||
codex-ollama = { path = "ollama" }
|
||||
|
||||
@@ -4,10 +4,14 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
base64 = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-execpolicy = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
@@ -24,6 +28,16 @@ toml = { workspace = true }
|
||||
toml_edit = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
core-foundation = "0.9"
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
windows-sys = { version = "0.52", features = [
|
||||
"Win32_Foundation",
|
||||
"Win32_System_Com",
|
||||
"Win32_UI_Shell",
|
||||
] }
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use super::LoaderOverrides;
|
||||
use crate::LoaderOverrides;
|
||||
use crate::config_error_from_toml;
|
||||
use crate::io_error_from_config_error;
|
||||
#[cfg(target_os = "macos")]
|
||||
use super::macos::ManagedAdminConfigLayer;
|
||||
use crate::macos::ManagedAdminConfigLayer;
|
||||
#[cfg(target_os = "macos")]
|
||||
use super::macos::load_managed_admin_config_layer;
|
||||
use codex_config::config_error_from_toml;
|
||||
use codex_config::io_error_from_config_error;
|
||||
use crate::macos::load_managed_admin_config_layer;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
@@ -16,26 +16,26 @@ use toml::Value as TomlValue;
|
||||
const CODEX_MANAGED_CONFIG_SYSTEM_PATH: &str = "/etc/codex/managed_config.toml";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct MangedConfigFromFile {
|
||||
pub struct ManagedConfigFromFile {
|
||||
pub managed_config: TomlValue,
|
||||
pub file: AbsolutePathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct ManagedConfigFromMdm {
|
||||
pub struct ManagedConfigFromMdm {
|
||||
pub managed_config: TomlValue,
|
||||
pub raw_toml: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct LoadedConfigLayers {
|
||||
pub struct LoadedConfigLayers {
|
||||
/// If present, data read from a file such as `/etc/codex/managed_config.toml`.
|
||||
pub managed_config: Option<MangedConfigFromFile>,
|
||||
pub managed_config: Option<ManagedConfigFromFile>,
|
||||
/// If present, data read from managed preferences (macOS only).
|
||||
pub managed_config_from_mdm: Option<ManagedConfigFromMdm>,
|
||||
}
|
||||
|
||||
pub(super) async fn load_config_layers_internal(
|
||||
pub async fn load_config_layers_internal(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<LoadedConfigLayers> {
|
||||
@@ -59,7 +59,7 @@ pub(super) async fn load_config_layers_internal(
|
||||
let managed_config =
|
||||
read_config_from_path(&managed_config_path, /*log_missing_as_info*/ false)
|
||||
.await?
|
||||
.map(|managed_config| MangedConfigFromFile {
|
||||
.map(|managed_config| ManagedConfigFromFile {
|
||||
managed_config,
|
||||
file: managed_config_path.clone(),
|
||||
});
|
||||
@@ -88,7 +88,7 @@ fn map_managed_admin_layer(layer: ManagedAdminConfigLayer) -> ManagedConfigFromM
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn read_config_from_path(
|
||||
async fn read_config_from_path(
|
||||
path: impl AsRef<Path>,
|
||||
log_missing_as_info: bool,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
@@ -120,8 +120,7 @@ pub(super) async fn read_config_from_path(
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the default managed config path.
|
||||
pub(super) fn managed_config_default_path(codex_home: &Path) -> PathBuf {
|
||||
fn managed_config_default_path(codex_home: &Path) -> PathBuf {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = codex_home;
|
||||
@@ -3,6 +3,10 @@ mod config_requirements;
|
||||
mod constraint;
|
||||
mod diagnostics;
|
||||
mod fingerprint;
|
||||
mod layer_io;
|
||||
mod loader;
|
||||
#[cfg(target_os = "macos")]
|
||||
mod macos;
|
||||
mod merge;
|
||||
mod overrides;
|
||||
mod requirements_exec_policy;
|
||||
@@ -44,6 +48,15 @@ pub use diagnostics::format_config_error;
|
||||
pub use diagnostics::format_config_error_with_source;
|
||||
pub use diagnostics::io_error_from_config_error;
|
||||
pub use fingerprint::version_for_toml;
|
||||
pub use layer_io::LoadedConfigLayers;
|
||||
pub use layer_io::ManagedConfigFromFile;
|
||||
pub use layer_io::ManagedConfigFromMdm;
|
||||
pub use layer_io::load_config_layers_internal;
|
||||
pub use loader::load_managed_admin_requirements;
|
||||
pub use loader::load_requirements_from_legacy_scheme;
|
||||
pub use loader::load_requirements_toml;
|
||||
pub use loader::system_config_toml_file;
|
||||
pub use loader::system_requirements_toml_file;
|
||||
pub use merge::merge_toml_values;
|
||||
pub use overrides::build_cli_overrides_layer;
|
||||
pub use requirements_exec_policy::RequirementsExecPolicy;
|
||||
|
||||
236
codex-rs/config/src/loader.rs
Normal file
236
codex-rs/config/src/loader.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
use crate::ConfigRequirementsToml;
|
||||
use crate::ConfigRequirementsWithSources;
|
||||
use crate::LoadedConfigLayers;
|
||||
use crate::RequirementSource;
|
||||
#[cfg(target_os = "macos")]
|
||||
use crate::macos::load_managed_admin_requirements_toml;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
#[cfg(windows)]
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[cfg(unix)]
|
||||
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
|
||||
|
||||
#[cfg(windows)]
|
||||
const DEFAULT_PROGRAM_DATA_DIR_WINDOWS: &str = r"C:\ProgramData";
|
||||
|
||||
pub async fn load_requirements_toml(
|
||||
config_requirements_toml: &mut ConfigRequirementsWithSources,
|
||||
requirements_toml_file: impl AsRef<Path>,
|
||||
) -> io::Result<()> {
|
||||
let requirements_toml_file =
|
||||
AbsolutePathBuf::from_absolute_path(requirements_toml_file.as_ref())?;
|
||||
match tokio::fs::read_to_string(&requirements_toml_file).await {
|
||||
Ok(contents) => {
|
||||
let requirements_config: ConfigRequirementsToml =
|
||||
toml::from_str(&contents).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing requirements file {}: {err}",
|
||||
requirements_toml_file.as_ref().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
config_requirements_toml.merge_unset_fields(
|
||||
RequirementSource::SystemRequirementsToml {
|
||||
file: requirements_toml_file.clone(),
|
||||
},
|
||||
requirements_config,
|
||||
);
|
||||
}
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
|
||||
Err(err) => {
|
||||
return Err(io::Error::new(
|
||||
err.kind(),
|
||||
format!(
|
||||
"Failed to read requirements file {}: {err}",
|
||||
requirements_toml_file.as_ref().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn load_managed_admin_requirements(
|
||||
config_requirements_toml: &mut ConfigRequirementsWithSources,
|
||||
managed_config_requirements_base64: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
load_managed_admin_requirements_toml(
|
||||
config_requirements_toml,
|
||||
managed_config_requirements_base64,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
{
|
||||
let _ = config_requirements_toml;
|
||||
let _ = managed_config_requirements_base64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(Path::new("/etc/codex/requirements.toml"))
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
pub fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
windows_system_requirements_toml_file()
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(Path::new(SYSTEM_CONFIG_TOML_FILE_UNIX))
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
pub fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
windows_system_config_toml_file()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_codex_system_dir() -> PathBuf {
|
||||
let program_data = windows_program_data_dir_from_known_folder().unwrap_or_else(|err| {
|
||||
tracing::warn!(
|
||||
error = %err,
|
||||
"Failed to resolve ProgramData known folder; using default path"
|
||||
);
|
||||
PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS)
|
||||
});
|
||||
program_data.join("OpenAI").join("Codex")
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
let requirements_toml_file = windows_codex_system_dir().join("requirements.toml");
|
||||
AbsolutePathBuf::try_from(requirements_toml_file)
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_system_config_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
let config_toml_file = windows_codex_system_dir().join("config.toml");
|
||||
AbsolutePathBuf::try_from(config_toml_file)
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_program_data_dir_from_known_folder() -> io::Result<PathBuf> {
|
||||
use std::ffi::OsString;
|
||||
use std::os::windows::ffi::OsStringExt;
|
||||
use windows_sys::Win32::System::Com::CoTaskMemFree;
|
||||
use windows_sys::Win32::UI::Shell::FOLDERID_ProgramData;
|
||||
use windows_sys::Win32::UI::Shell::KF_FLAG_DEFAULT;
|
||||
use windows_sys::Win32::UI::Shell::SHGetKnownFolderPath;
|
||||
|
||||
let mut path_ptr = std::ptr::null_mut::<u16>();
|
||||
let known_folder_flags = u32::try_from(KF_FLAG_DEFAULT).map_err(|_| {
|
||||
io::Error::other(format!(
|
||||
"KF_FLAG_DEFAULT did not fit in u32: {KF_FLAG_DEFAULT}"
|
||||
))
|
||||
})?;
|
||||
let hr = unsafe {
|
||||
SHGetKnownFolderPath(&FOLDERID_ProgramData, known_folder_flags, 0, &mut path_ptr)
|
||||
};
|
||||
if hr != 0 {
|
||||
return Err(io::Error::other(format!(
|
||||
"SHGetKnownFolderPath(FOLDERID_ProgramData) failed with HRESULT {hr:#010x}"
|
||||
)));
|
||||
}
|
||||
if path_ptr.is_null() {
|
||||
return Err(io::Error::other(
|
||||
"SHGetKnownFolderPath(FOLDERID_ProgramData) returned a null pointer",
|
||||
));
|
||||
}
|
||||
|
||||
let path = unsafe {
|
||||
let mut len = 0usize;
|
||||
while *path_ptr.add(len) != 0 {
|
||||
len += 1;
|
||||
}
|
||||
let wide = std::slice::from_raw_parts(path_ptr, len);
|
||||
let path = PathBuf::from(OsString::from_wide(wide));
|
||||
CoTaskMemFree(path_ptr.cast());
|
||||
path
|
||||
};
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
pub async fn load_requirements_from_legacy_scheme(
|
||||
config_requirements_toml: &mut ConfigRequirementsWithSources,
|
||||
loaded_config_layers: LoadedConfigLayers,
|
||||
) -> io::Result<()> {
|
||||
let LoadedConfigLayers {
|
||||
managed_config,
|
||||
managed_config_from_mdm,
|
||||
} = loaded_config_layers;
|
||||
|
||||
for (source, config) in managed_config_from_mdm
|
||||
.map(|config| {
|
||||
(
|
||||
RequirementSource::LegacyManagedConfigTomlFromMdm,
|
||||
config.managed_config,
|
||||
)
|
||||
})
|
||||
.into_iter()
|
||||
.chain(managed_config.map(|config| {
|
||||
(
|
||||
RequirementSource::LegacyManagedConfigTomlFromFile { file: config.file },
|
||||
config.managed_config,
|
||||
)
|
||||
}))
|
||||
{
|
||||
let legacy_config: LegacyManagedConfigToml =
|
||||
config.try_into().map_err(|err: toml::de::Error| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Failed to parse config requirements as TOML: {err}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
let requirements = ConfigRequirementsToml::from(legacy_config);
|
||||
config_requirements_toml.merge_unset_fields(source, requirements);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
struct LegacyManagedConfigToml {
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_mode: Option<SandboxMode>,
|
||||
}
|
||||
|
||||
impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
|
||||
fn from(legacy: LegacyManagedConfigToml) -> Self {
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
let LegacyManagedConfigToml {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
} = legacy;
|
||||
if let Some(approval_policy) = approval_policy {
|
||||
config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]);
|
||||
}
|
||||
if let Some(sandbox_mode) = sandbox_mode {
|
||||
let required_mode = sandbox_mode.into();
|
||||
let mut allowed_modes = vec![crate::SandboxModeRequirement::ReadOnly];
|
||||
if required_mode != crate::SandboxModeRequirement::ReadOnly {
|
||||
allowed_modes.push(required_mode);
|
||||
}
|
||||
config_requirements_toml.allowed_sandbox_modes = Some(allowed_modes);
|
||||
}
|
||||
config_requirements_toml
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use super::ConfigRequirementsToml;
|
||||
use super::ConfigRequirementsWithSources;
|
||||
use super::RequirementSource;
|
||||
use crate::ConfigRequirementsToml;
|
||||
use crate::ConfigRequirementsWithSources;
|
||||
use crate::RequirementSource;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
@@ -16,19 +16,19 @@ const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
const MANAGED_PREFERENCES_REQUIREMENTS_KEY: &str = "requirements_toml_base64";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct ManagedAdminConfigLayer {
|
||||
pub struct ManagedAdminConfigLayer {
|
||||
pub config: TomlValue,
|
||||
pub raw_toml: String,
|
||||
}
|
||||
|
||||
pub(super) fn managed_preferences_requirements_source() -> RequirementSource {
|
||||
fn managed_preferences_requirements_source() -> RequirementSource {
|
||||
RequirementSource::MdmManagedPreferences {
|
||||
domain: MANAGED_PREFERENCES_APPLICATION_ID.to_string(),
|
||||
key: MANAGED_PREFERENCES_REQUIREMENTS_KEY.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
pub async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<Option<ManagedAdminConfigLayer>> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
@@ -61,7 +61,7 @@ fn load_managed_admin_config() -> io::Result<Option<ManagedAdminConfigLayer>> {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) async fn load_managed_admin_requirements_toml(
|
||||
pub async fn load_managed_admin_requirements_toml(
|
||||
target: &mut ConfigRequirementsWithSources,
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
@@ -41,6 +41,7 @@ codex-file-search = { workspace = true }
|
||||
codex-git = { workspace = true }
|
||||
codex-hooks = { workspace = true }
|
||||
codex-keyring-store = { workspace = true }
|
||||
codex-models = { workspace = true }
|
||||
codex-network-proxy = { workspace = true }
|
||||
codex-otel = { workspace = true }
|
||||
codex-artifacts = { workspace = true }
|
||||
@@ -123,7 +124,6 @@ landlock = { workspace = true }
|
||||
seccompiler = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
core-foundation = "0.9"
|
||||
keyring = { workspace = true, features = ["apple-native"] }
|
||||
|
||||
# Build OpenSSL from source for musl builds.
|
||||
@@ -136,11 +136,6 @@ openssl-sys = { workspace = true, features = ["vendored"] }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
keyring = { workspace = true, features = ["windows-native"] }
|
||||
windows-sys = { version = "0.52", features = [
|
||||
"Win32_Foundation",
|
||||
"Win32_System_Com",
|
||||
"Win32_UI_Shell",
|
||||
] }
|
||||
|
||||
[target.'cfg(any(target_os = "freebsd", target_os = "openbsd"))'.dependencies]
|
||||
keyring = { workspace = true, features = ["sync-secret-service"] }
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use codex_models::ModelProviderInfo;
|
||||
use codex_models::WireApi;
|
||||
use codex_otel::AuthEnvTelemetryMetadata;
|
||||
|
||||
use crate::auth::CODEX_API_KEY_ENV_VAR;
|
||||
use crate::auth::OPENAI_API_KEY_ENV_VAR;
|
||||
use crate::auth::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub(crate) struct AuthEnvTelemetry {
|
||||
@@ -64,7 +65,7 @@ mod tests {
|
||||
env_key: Some("sk-should-not-leak".to_string()),
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api: crate::model_provider_info::WireApi::Responses,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
|
||||
@@ -541,10 +541,10 @@ impl ModelClient {
|
||||
Some(manager) => manager.auth().await,
|
||||
None => None,
|
||||
};
|
||||
let api_provider = self
|
||||
.state
|
||||
.provider
|
||||
.to_api_provider(auth.as_ref().map(CodexAuth::auth_mode))?;
|
||||
let api_provider = self.state.provider.to_api_provider(matches!(
|
||||
auth.as_ref().map(CodexAuth::auth_mode),
|
||||
Some(crate::auth::AuthMode::Chatgpt)
|
||||
));
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
|
||||
Ok(CurrentClientSetup {
|
||||
auth,
|
||||
|
||||
@@ -1,27 +1,18 @@
|
||||
mod layer_io;
|
||||
#[cfg(target_os = "macos")]
|
||||
mod macos;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_loader::layer_io::LoadedConfigLayers;
|
||||
use crate::git_info::resolve_root_git_project_for_trust;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_config::CONFIG_TOML_FILE;
|
||||
use codex_config::ConfigRequirementsWithSources;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use dunce::canonicalize as normalize_path;
|
||||
use serde::Deserialize;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
#[cfg(windows)]
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
pub use codex_config::AppRequirementToml;
|
||||
@@ -38,6 +29,7 @@ pub use codex_config::ConfigRequirements;
|
||||
pub use codex_config::ConfigRequirementsToml;
|
||||
pub use codex_config::ConstrainedWithSource;
|
||||
pub use codex_config::FeatureRequirementsToml;
|
||||
use codex_config::LoadedConfigLayers;
|
||||
pub use codex_config::LoaderOverrides;
|
||||
pub use codex_config::McpServerIdentity;
|
||||
pub use codex_config::McpServerRequirement;
|
||||
@@ -55,18 +47,16 @@ pub(crate) use codex_config::config_error_from_toml;
|
||||
pub use codex_config::format_config_error;
|
||||
pub use codex_config::format_config_error_with_source;
|
||||
pub(crate) use codex_config::io_error_from_config_error;
|
||||
use codex_config::load_config_layers_internal;
|
||||
use codex_config::load_managed_admin_requirements;
|
||||
use codex_config::load_requirements_from_legacy_scheme;
|
||||
pub(crate) use codex_config::load_requirements_toml;
|
||||
pub use codex_config::merge_toml_values;
|
||||
use codex_config::system_config_toml_file;
|
||||
use codex_config::system_requirements_toml_file;
|
||||
#[cfg(test)]
|
||||
pub(crate) use codex_config::version_for_toml;
|
||||
|
||||
/// On Unix systems, load default settings from this file path, if present.
|
||||
/// Note that /etc/codex/ is treated as a "config folder," so subfolders such
|
||||
/// as skills/ and rules/ will also be honored.
|
||||
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
|
||||
|
||||
#[cfg(windows)]
|
||||
const DEFAULT_PROGRAM_DATA_DIR_WINDOWS: &str = r"C:\ProgramData";
|
||||
|
||||
const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
|
||||
|
||||
pub(crate) async fn first_layer_config_error(layers: &ConfigLayerStack) -> Option<ConfigError> {
|
||||
@@ -125,8 +115,7 @@ pub async fn load_config_layers_state(
|
||||
.merge_unset_fields(RequirementSource::CloudRequirements, requirements);
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
macos::load_managed_admin_requirements_toml(
|
||||
load_managed_admin_requirements(
|
||||
&mut config_requirements_toml,
|
||||
overrides
|
||||
.macos_managed_config_requirements_base64
|
||||
@@ -140,7 +129,7 @@ pub async fn load_config_layers_state(
|
||||
|
||||
// Make a best-effort to support the legacy `managed_config.toml` as a
|
||||
// requirements specification.
|
||||
let loaded_config_layers = layer_io::load_config_layers_internal(codex_home, overrides).await?;
|
||||
let loaded_config_layers = load_config_layers_internal(codex_home, overrides).await?;
|
||||
load_requirements_from_legacy_scheme(
|
||||
&mut config_requirements_toml,
|
||||
loaded_config_layers.clone(),
|
||||
@@ -343,185 +332,6 @@ async fn load_config_toml_for_required_layer(
|
||||
Ok(create_entry(toml_value))
|
||||
}
|
||||
|
||||
/// If available, apply requirements from the platform system
|
||||
/// `requirements.toml` location to `config_requirements_toml` by filling in
|
||||
/// any unset fields.
|
||||
async fn load_requirements_toml(
|
||||
config_requirements_toml: &mut ConfigRequirementsWithSources,
|
||||
requirements_toml_file: impl AsRef<Path>,
|
||||
) -> io::Result<()> {
|
||||
let requirements_toml_file =
|
||||
AbsolutePathBuf::from_absolute_path(requirements_toml_file.as_ref())?;
|
||||
match tokio::fs::read_to_string(&requirements_toml_file).await {
|
||||
Ok(contents) => {
|
||||
let requirements_config: ConfigRequirementsToml =
|
||||
toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing requirements file {}: {e}",
|
||||
requirements_toml_file.as_ref().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
config_requirements_toml.merge_unset_fields(
|
||||
RequirementSource::SystemRequirementsToml {
|
||||
file: requirements_toml_file.clone(),
|
||||
},
|
||||
requirements_config,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() != io::ErrorKind::NotFound {
|
||||
return Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!(
|
||||
"Failed to read requirements file {}: {e}",
|
||||
requirements_toml_file.as_ref().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(Path::new("/etc/codex/requirements.toml"))
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
windows_system_requirements_toml_file()
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(Path::new(SYSTEM_CONFIG_TOML_FILE_UNIX))
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn system_config_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
windows_system_config_toml_file()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_codex_system_dir() -> PathBuf {
|
||||
let program_data = windows_program_data_dir_from_known_folder().unwrap_or_else(|err| {
|
||||
tracing::warn!(
|
||||
error = %err,
|
||||
"Failed to resolve ProgramData known folder; using default path"
|
||||
);
|
||||
PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS)
|
||||
});
|
||||
program_data.join("OpenAI").join("Codex")
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_system_requirements_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
let requirements_toml_file = windows_codex_system_dir().join("requirements.toml");
|
||||
AbsolutePathBuf::try_from(requirements_toml_file)
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_system_config_toml_file() -> io::Result<AbsolutePathBuf> {
|
||||
let config_toml_file = windows_codex_system_dir().join("config.toml");
|
||||
AbsolutePathBuf::try_from(config_toml_file)
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn windows_program_data_dir_from_known_folder() -> io::Result<PathBuf> {
|
||||
use std::ffi::OsString;
|
||||
use std::os::windows::ffi::OsStringExt;
|
||||
use windows_sys::Win32::System::Com::CoTaskMemFree;
|
||||
use windows_sys::Win32::UI::Shell::FOLDERID_ProgramData;
|
||||
use windows_sys::Win32::UI::Shell::KF_FLAG_DEFAULT;
|
||||
use windows_sys::Win32::UI::Shell::SHGetKnownFolderPath;
|
||||
|
||||
let mut path_ptr = std::ptr::null_mut::<u16>();
|
||||
let known_folder_flags = u32::try_from(KF_FLAG_DEFAULT).map_err(|_| {
|
||||
io::Error::other(format!(
|
||||
"KF_FLAG_DEFAULT did not fit in u32: {KF_FLAG_DEFAULT}"
|
||||
))
|
||||
})?;
|
||||
// Known folder IDs reference:
|
||||
// https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid
|
||||
// SAFETY: SHGetKnownFolderPath initializes path_ptr with a CoTaskMem-allocated,
|
||||
// null-terminated UTF-16 string on success.
|
||||
let hr = unsafe {
|
||||
SHGetKnownFolderPath(&FOLDERID_ProgramData, known_folder_flags, 0, &mut path_ptr)
|
||||
};
|
||||
if hr != 0 {
|
||||
return Err(io::Error::other(format!(
|
||||
"SHGetKnownFolderPath(FOLDERID_ProgramData) failed with HRESULT {hr:#010x}"
|
||||
)));
|
||||
}
|
||||
if path_ptr.is_null() {
|
||||
return Err(io::Error::other(
|
||||
"SHGetKnownFolderPath(FOLDERID_ProgramData) returned a null pointer",
|
||||
));
|
||||
}
|
||||
|
||||
// SAFETY: path_ptr is a valid null-terminated UTF-16 string allocated by
|
||||
// SHGetKnownFolderPath and must be freed with CoTaskMemFree.
|
||||
let path = unsafe {
|
||||
let mut len = 0usize;
|
||||
while *path_ptr.add(len) != 0 {
|
||||
len += 1;
|
||||
}
|
||||
let wide = std::slice::from_raw_parts(path_ptr, len);
|
||||
let path = PathBuf::from(OsString::from_wide(wide));
|
||||
CoTaskMemFree(path_ptr.cast());
|
||||
path
|
||||
};
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
async fn load_requirements_from_legacy_scheme(
|
||||
config_requirements_toml: &mut ConfigRequirementsWithSources,
|
||||
loaded_config_layers: LoadedConfigLayers,
|
||||
) -> io::Result<()> {
|
||||
// In this implementation, earlier layers cannot be overwritten by later
|
||||
// layers, so list managed_config_from_mdm first because it has the highest
|
||||
// precedence.
|
||||
let LoadedConfigLayers {
|
||||
managed_config,
|
||||
managed_config_from_mdm,
|
||||
} = loaded_config_layers;
|
||||
|
||||
for (source, config) in managed_config_from_mdm
|
||||
.map(|config| {
|
||||
(
|
||||
RequirementSource::LegacyManagedConfigTomlFromMdm,
|
||||
config.managed_config,
|
||||
)
|
||||
})
|
||||
.into_iter()
|
||||
.chain(managed_config.map(|c| {
|
||||
(
|
||||
RequirementSource::LegacyManagedConfigTomlFromFile { file: c.file },
|
||||
c.managed_config,
|
||||
)
|
||||
}))
|
||||
{
|
||||
let legacy_config: LegacyManagedConfigToml =
|
||||
config.try_into().map_err(|err: toml::de::Error| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Failed to parse config requirements as TOML: {err}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
let new_requirements_toml = ConfigRequirementsToml::from(legacy_config);
|
||||
config_requirements_toml.merge_unset_fields(source, new_requirements_toml);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads `project_root_markers` from the [toml::Value] produced by merging
|
||||
/// `config.toml` from the config layers in the stack preceding
|
||||
/// [ConfigLayerSource::Project].
|
||||
@@ -895,51 +705,12 @@ async fn load_project_layers(
|
||||
Ok(layers)
|
||||
}
|
||||
|
||||
/// The legacy mechanism for specifying admin-enforced configuration is to read
|
||||
/// from a file like `/etc/codex/managed_config.toml` that has the same
|
||||
/// structure as `config.toml` where fields like `approval_policy` can specify
|
||||
/// exactly one value rather than a list of allowed values.
|
||||
///
|
||||
/// If present, re-interpret `managed_config.toml` as a `requirements.toml`
|
||||
/// where each specified field is treated as a constraint allowing only that
|
||||
/// value.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
struct LegacyManagedConfigToml {
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_mode: Option<SandboxMode>,
|
||||
}
|
||||
|
||||
impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
|
||||
fn from(legacy: LegacyManagedConfigToml) -> Self {
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
let LegacyManagedConfigToml {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
} = legacy;
|
||||
if let Some(approval_policy) = approval_policy {
|
||||
config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]);
|
||||
}
|
||||
if let Some(sandbox_mode) = sandbox_mode {
|
||||
let required_mode: SandboxModeRequirement = sandbox_mode.into();
|
||||
// Allowing read-only is a requirement for Codex to function correctly.
|
||||
// So in this backfill path, we append read-only if it's not already specified.
|
||||
let mut allowed_modes = vec![SandboxModeRequirement::ReadOnly];
|
||||
if required_mode != SandboxModeRequirement::ReadOnly {
|
||||
allowed_modes.push(required_mode);
|
||||
}
|
||||
config_requirements_toml.allowed_sandbox_modes = Some(allowed_modes);
|
||||
}
|
||||
config_requirements_toml
|
||||
}
|
||||
}
|
||||
|
||||
// Cannot name this `mod tests` because of tests.rs in this folder.
|
||||
#[cfg(test)]
|
||||
mod unit_tests {
|
||||
use super::*;
|
||||
#[cfg(windows)]
|
||||
use std::path::Path;
|
||||
use codex_config::ManagedConfigFromFile;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
@@ -979,65 +750,81 @@ foo = "xyzzy"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_managed_config_backfill_includes_read_only_sandbox_mode() {
|
||||
let legacy = LegacyManagedConfigToml {
|
||||
approval_policy: None,
|
||||
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
|
||||
#[tokio::test]
|
||||
async fn legacy_managed_config_backfill_includes_read_only_sandbox_mode() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = AbsolutePathBuf::try_from(tmp.path().join("managed_config.toml"))
|
||||
.expect("managed path");
|
||||
let loaded_layers = LoadedConfigLayers {
|
||||
managed_config: Some(ManagedConfigFromFile {
|
||||
managed_config: toml::toml! {
|
||||
sandbox_mode = "workspace-write"
|
||||
}
|
||||
.into(),
|
||||
file: managed_path.clone(),
|
||||
}),
|
||||
managed_config_from_mdm: None,
|
||||
};
|
||||
|
||||
let requirements = ConfigRequirementsToml::from(legacy);
|
||||
let mut requirements_with_sources = ConfigRequirementsWithSources::default();
|
||||
load_requirements_from_legacy_scheme(&mut requirements_with_sources, loaded_layers)
|
||||
.await
|
||||
.expect("load legacy requirements");
|
||||
let requirements: ConfigRequirements = requirements_with_sources
|
||||
.try_into()
|
||||
.expect("requirements parse");
|
||||
|
||||
assert_eq!(
|
||||
requirements.allowed_sandbox_modes,
|
||||
Some(vec![
|
||||
SandboxModeRequirement::ReadOnly,
|
||||
SandboxModeRequirement::WorkspaceWrite
|
||||
])
|
||||
requirements.sandbox_policy.get(),
|
||||
&SandboxPolicy::new_read_only_policy()
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::new_workspace_write_policy())
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::DangerFullAccess),
|
||||
Err(codex_config::ConstraintError::InvalidValue {
|
||||
field_name: "sandbox_mode",
|
||||
candidate: "DangerFullAccess".into(),
|
||||
allowed: "[ReadOnly, WorkspaceWrite]".into(),
|
||||
requirement_source: RequirementSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_path,
|
||||
},
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
fn windows_system_requirements_toml_file_uses_expected_suffix() {
|
||||
let expected = windows_program_data_dir_from_known_folder()
|
||||
.unwrap_or_else(|_| PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS))
|
||||
.join("OpenAI")
|
||||
.join("Codex")
|
||||
.join("requirements.toml");
|
||||
assert_eq!(
|
||||
windows_system_requirements_toml_file()
|
||||
.expect("requirements.toml path")
|
||||
.as_path(),
|
||||
expected.as_path()
|
||||
);
|
||||
assert!(
|
||||
windows_system_requirements_toml_file()
|
||||
system_requirements_toml_file()
|
||||
.expect("requirements.toml path")
|
||||
.as_path()
|
||||
.ends_with(Path::new("OpenAI").join("Codex").join("requirements.toml"))
|
||||
.ends_with(
|
||||
std::path::Path::new("OpenAI")
|
||||
.join("Codex")
|
||||
.join("requirements.toml")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
fn windows_system_config_toml_file_uses_expected_suffix() {
|
||||
let expected = windows_program_data_dir_from_known_folder()
|
||||
.unwrap_or_else(|_| PathBuf::from(DEFAULT_PROGRAM_DATA_DIR_WINDOWS))
|
||||
.join("OpenAI")
|
||||
.join("Codex")
|
||||
.join("config.toml");
|
||||
assert_eq!(
|
||||
windows_system_config_toml_file()
|
||||
.expect("config.toml path")
|
||||
.as_path(),
|
||||
expected.as_path()
|
||||
);
|
||||
assert!(
|
||||
windows_system_config_toml_file()
|
||||
system_config_toml_file()
|
||||
.expect("config.toml path")
|
||||
.as_path()
|
||||
.ends_with(Path::new("OpenAI").join("Codex").join("config.toml"))
|
||||
.ends_with(
|
||||
std::path::Path::new("OpenAI")
|
||||
.join("Codex")
|
||||
.join("config.toml")
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,6 +191,15 @@ impl From<CancelErr> for CodexErr {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<codex_models::EnvVarError> for CodexErr {
|
||||
fn from(error: codex_models::EnvVarError) -> Self {
|
||||
Self::EnvVar(EnvVarError {
|
||||
var: error.var,
|
||||
instructions: error.instructions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl CodexErr {
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
match self {
|
||||
|
||||
@@ -1,356 +1 @@
|
||||
//! Registry of model providers supported by Codex.
|
||||
//!
|
||||
//! Providers can be defined in two places:
|
||||
//! 1. Built-in defaults compiled into the binary so Codex works out-of-the-box.
|
||||
//! 2. User-defined entries inside `~/.codex/config.toml` under the `model_providers`
|
||||
//! key. These override or extend the defaults at runtime.
|
||||
|
||||
use crate::auth::AuthMode;
|
||||
use crate::error::EnvVarError;
|
||||
use codex_api::Provider as ApiProvider;
|
||||
use codex_api::provider::RetryConfig as ApiRetryConfig;
|
||||
use http::HeaderMap;
|
||||
use http::header::HeaderName;
|
||||
use http::header::HeaderValue;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::time::Duration;
|
||||
|
||||
const DEFAULT_STREAM_IDLE_TIMEOUT_MS: u64 = 300_000;
|
||||
const DEFAULT_STREAM_MAX_RETRIES: u64 = 5;
|
||||
const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4;
|
||||
pub(crate) const DEFAULT_WEBSOCKET_CONNECT_TIMEOUT_MS: u64 = 15_000;
|
||||
/// Hard cap for user-configured `stream_max_retries`.
|
||||
const MAX_STREAM_MAX_RETRIES: u64 = 100;
|
||||
/// Hard cap for user-configured `request_max_retries`.
|
||||
const MAX_REQUEST_MAX_RETRIES: u64 = 100;
|
||||
|
||||
const OPENAI_PROVIDER_NAME: &str = "OpenAI";
|
||||
pub const OPENAI_PROVIDER_ID: &str = "openai";
|
||||
const CHAT_WIRE_API_REMOVED_ERROR: &str = "`wire_api = \"chat\"` is no longer supported.\nHow to fix: set `wire_api = \"responses\"` in your provider config.\nMore info: https://github.com/openai/codex/discussions/7782";
|
||||
pub(crate) const LEGACY_OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat";
|
||||
pub(crate) const OLLAMA_CHAT_PROVIDER_REMOVED_ERROR: &str = "`ollama-chat` is no longer supported.\nHow to fix: replace `ollama-chat` with `ollama` in `model_provider`, `oss_provider`, or `--local-provider`.\nMore info: https://github.com/openai/codex/discussions/7782";
|
||||
|
||||
/// Wire protocol that the provider speaks.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum WireApi {
|
||||
/// The Responses API exposed by OpenAI at `/v1/responses`.
|
||||
#[default]
|
||||
Responses,
|
||||
}
|
||||
|
||||
impl fmt::Display for WireApi {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let value = match self {
|
||||
Self::Responses => "responses",
|
||||
};
|
||||
f.write_str(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for WireApi {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let value = String::deserialize(deserializer)?;
|
||||
match value.as_str() {
|
||||
"responses" => Ok(Self::Responses),
|
||||
"chat" => Err(serde::de::Error::custom(CHAT_WIRE_API_REMOVED_ERROR)),
|
||||
_ => Err(serde::de::Error::unknown_variant(&value, &["responses"])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializable representation of a provider definition.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ModelProviderInfo {
|
||||
/// Friendly display name.
|
||||
pub name: String,
|
||||
/// Base URL for the provider's OpenAI-compatible API.
|
||||
pub base_url: Option<String>,
|
||||
/// Environment variable that stores the user's API key for this provider.
|
||||
pub env_key: Option<String>,
|
||||
|
||||
/// Optional instructions to help the user get a valid value for the
|
||||
/// variable and set it.
|
||||
pub env_key_instructions: Option<String>,
|
||||
|
||||
/// Value to use with `Authorization: Bearer <token>` header. Use of this
|
||||
/// config is discouraged in favor of `env_key` for security reasons, but
|
||||
/// this may be necessary when using this programmatically.
|
||||
pub experimental_bearer_token: Option<String>,
|
||||
|
||||
/// Which wire protocol this provider expects.
|
||||
#[serde(default)]
|
||||
pub wire_api: WireApi,
|
||||
|
||||
/// Optional query parameters to append to the base URL.
|
||||
pub query_params: Option<HashMap<String, String>>,
|
||||
|
||||
/// Additional HTTP headers to include in requests to this provider where
|
||||
/// the (key, value) pairs are the header name and value.
|
||||
pub http_headers: Option<HashMap<String, String>>,
|
||||
|
||||
/// Optional HTTP headers to include in requests to this provider where the
|
||||
/// (key, value) pairs are the header name and _environment variable_ whose
|
||||
/// value should be used. If the environment variable is not set, or the
|
||||
/// value is empty, the header will not be included in the request.
|
||||
pub env_http_headers: Option<HashMap<String, String>>,
|
||||
|
||||
/// Maximum number of times to retry a failed HTTP request to this provider.
|
||||
pub request_max_retries: Option<u64>,
|
||||
|
||||
/// Number of times to retry reconnecting a dropped streaming response before failing.
|
||||
pub stream_max_retries: Option<u64>,
|
||||
|
||||
/// Idle timeout (in milliseconds) to wait for activity on a streaming response before treating
|
||||
/// the connection as lost.
|
||||
pub stream_idle_timeout_ms: Option<u64>,
|
||||
|
||||
/// Maximum time (in milliseconds) to wait for a websocket connection attempt before treating
|
||||
/// it as failed.
|
||||
pub websocket_connect_timeout_ms: Option<u64>,
|
||||
|
||||
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
|
||||
/// user is presented with login screen on first run, and login preference and token/key
|
||||
/// are stored in auth.json. If false (which is the default), login screen is skipped,
|
||||
/// and API key (if needed) comes from the "env_key" environment variable.
|
||||
#[serde(default)]
|
||||
pub requires_openai_auth: bool,
|
||||
|
||||
/// Whether this provider supports the Responses API WebSocket transport.
|
||||
#[serde(default)]
|
||||
pub supports_websockets: bool,
|
||||
}
|
||||
|
||||
impl ModelProviderInfo {
|
||||
fn build_header_map(&self) -> crate::error::Result<HeaderMap> {
|
||||
let capacity = self.http_headers.as_ref().map_or(0, HashMap::len)
|
||||
+ self.env_http_headers.as_ref().map_or(0, HashMap::len);
|
||||
let mut headers = HeaderMap::with_capacity(capacity);
|
||||
if let Some(extra) = &self.http_headers {
|
||||
for (k, v) in extra {
|
||||
if let (Ok(name), Ok(value)) = (HeaderName::try_from(k), HeaderValue::try_from(v)) {
|
||||
headers.insert(name, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(env_headers) = &self.env_http_headers {
|
||||
for (header, env_var) in env_headers {
|
||||
if let Ok(val) = std::env::var(env_var)
|
||||
&& !val.trim().is_empty()
|
||||
&& let (Ok(name), Ok(value)) =
|
||||
(HeaderName::try_from(header), HeaderValue::try_from(val))
|
||||
{
|
||||
headers.insert(name, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(headers)
|
||||
}
|
||||
|
||||
pub(crate) fn to_api_provider(
|
||||
&self,
|
||||
auth_mode: Option<AuthMode>,
|
||||
) -> crate::error::Result<ApiProvider> {
|
||||
let default_base_url = if matches!(auth_mode, Some(AuthMode::Chatgpt)) {
|
||||
"https://chatgpt.com/backend-api/codex"
|
||||
} else {
|
||||
"https://api.openai.com/v1"
|
||||
};
|
||||
let base_url = self
|
||||
.base_url
|
||||
.clone()
|
||||
.unwrap_or_else(|| default_base_url.to_string());
|
||||
|
||||
let headers = self.build_header_map()?;
|
||||
let retry = ApiRetryConfig {
|
||||
max_attempts: self.request_max_retries(),
|
||||
base_delay: Duration::from_millis(200),
|
||||
retry_429: false,
|
||||
retry_5xx: true,
|
||||
retry_transport: true,
|
||||
};
|
||||
|
||||
Ok(ApiProvider {
|
||||
name: self.name.clone(),
|
||||
base_url,
|
||||
query_params: self.query_params.clone(),
|
||||
headers,
|
||||
retry,
|
||||
stream_idle_timeout: self.stream_idle_timeout(),
|
||||
})
|
||||
}
|
||||
|
||||
/// If `env_key` is Some, returns the API key for this provider if present
|
||||
/// (and non-empty) in the environment. If `env_key` is required but
|
||||
/// cannot be found, returns an error.
|
||||
pub fn api_key(&self) -> crate::error::Result<Option<String>> {
|
||||
match &self.env_key {
|
||||
Some(env_key) => {
|
||||
let api_key = std::env::var(env_key)
|
||||
.ok()
|
||||
.filter(|v| !v.trim().is_empty())
|
||||
.ok_or_else(|| {
|
||||
crate::error::CodexErr::EnvVar(EnvVarError {
|
||||
var: env_key.clone(),
|
||||
instructions: self.env_key_instructions.clone(),
|
||||
})
|
||||
})?;
|
||||
Ok(Some(api_key))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Effective maximum number of request retries for this provider.
|
||||
pub fn request_max_retries(&self) -> u64 {
|
||||
self.request_max_retries
|
||||
.unwrap_or(DEFAULT_REQUEST_MAX_RETRIES)
|
||||
.min(MAX_REQUEST_MAX_RETRIES)
|
||||
}
|
||||
|
||||
/// Effective maximum number of stream reconnection attempts for this provider.
|
||||
pub fn stream_max_retries(&self) -> u64 {
|
||||
self.stream_max_retries
|
||||
.unwrap_or(DEFAULT_STREAM_MAX_RETRIES)
|
||||
.min(MAX_STREAM_MAX_RETRIES)
|
||||
}
|
||||
|
||||
/// Effective idle timeout for streaming responses.
|
||||
pub fn stream_idle_timeout(&self) -> Duration {
|
||||
self.stream_idle_timeout_ms
|
||||
.map(Duration::from_millis)
|
||||
.unwrap_or(Duration::from_millis(DEFAULT_STREAM_IDLE_TIMEOUT_MS))
|
||||
}
|
||||
|
||||
/// Effective timeout for websocket connect attempts.
|
||||
pub fn websocket_connect_timeout(&self) -> Duration {
|
||||
self.websocket_connect_timeout_ms
|
||||
.map(Duration::from_millis)
|
||||
.unwrap_or(Duration::from_millis(DEFAULT_WEBSOCKET_CONNECT_TIMEOUT_MS))
|
||||
}
|
||||
|
||||
pub fn create_openai_provider(base_url: Option<String>) -> ModelProviderInfo {
|
||||
ModelProviderInfo {
|
||||
name: OPENAI_PROVIDER_NAME.into(),
|
||||
base_url,
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: Some(
|
||||
[("version".to_string(), env!("CARGO_PKG_VERSION").to_string())]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
env_http_headers: Some(
|
||||
[
|
||||
(
|
||||
"OpenAI-Organization".to_string(),
|
||||
"OPENAI_ORGANIZATION".to_string(),
|
||||
),
|
||||
("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
// Use global defaults for retry/timeout unless overridden in config.toml.
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
websocket_connect_timeout_ms: None,
|
||||
requires_openai_auth: true,
|
||||
supports_websockets: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_openai(&self) -> bool {
|
||||
self.name == OPENAI_PROVIDER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
pub const DEFAULT_LMSTUDIO_PORT: u16 = 1234;
|
||||
pub const DEFAULT_OLLAMA_PORT: u16 = 11434;
|
||||
|
||||
pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio";
|
||||
pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama";
|
||||
|
||||
/// Built-in default provider list.
|
||||
pub fn built_in_model_providers(
|
||||
openai_base_url: Option<String>,
|
||||
) -> HashMap<String, ModelProviderInfo> {
|
||||
use ModelProviderInfo as P;
|
||||
let openai_provider = P::create_openai_provider(openai_base_url);
|
||||
|
||||
// We do not want to be in the business of adjucating which third-party
|
||||
// providers are bundled with Codex CLI, so we only include the OpenAI and
|
||||
// open source ("oss") providers by default. Users are encouraged to add to
|
||||
// `model_providers` in config.toml to add their own providers.
|
||||
[
|
||||
(OPENAI_PROVIDER_ID, openai_provider),
|
||||
(
|
||||
OLLAMA_OSS_PROVIDER_ID,
|
||||
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses),
|
||||
),
|
||||
(
|
||||
LMSTUDIO_OSS_PROVIDER_ID,
|
||||
create_oss_provider(DEFAULT_LMSTUDIO_PORT, WireApi::Responses),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn create_oss_provider(default_provider_port: u16, wire_api: WireApi) -> ModelProviderInfo {
|
||||
// These CODEX_OSS_ environment variables are experimental: we may
|
||||
// switch to reading values from config.toml instead.
|
||||
let default_codex_oss_base_url = format!(
|
||||
"http://localhost:{codex_oss_port}/v1",
|
||||
codex_oss_port = std::env::var("CODEX_OSS_PORT")
|
||||
.ok()
|
||||
.filter(|value| !value.trim().is_empty())
|
||||
.and_then(|value| value.parse::<u16>().ok())
|
||||
.unwrap_or(default_provider_port)
|
||||
);
|
||||
|
||||
let codex_oss_base_url = std::env::var("CODEX_OSS_BASE_URL")
|
||||
.ok()
|
||||
.filter(|v| !v.trim().is_empty())
|
||||
.unwrap_or(default_codex_oss_base_url);
|
||||
create_oss_provider_with_base_url(&codex_oss_base_url, wire_api)
|
||||
}
|
||||
|
||||
pub fn create_oss_provider_with_base_url(base_url: &str, wire_api: WireApi) -> ModelProviderInfo {
|
||||
ModelProviderInfo {
|
||||
name: "gpt-oss".into(),
|
||||
base_url: Some(base_url.into()),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
websocket_connect_timeout_ms: None,
|
||||
requires_openai_auth: false,
|
||||
supports_websockets: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "model_provider_info_tests.rs"]
|
||||
mod tests;
|
||||
pub use codex_models::model_provider_info::*;
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use super::cache::ModelsCacheManager;
|
||||
use crate::api_bridge::auth_provider_from_auth;
|
||||
use crate::api_bridge::map_api_error;
|
||||
use crate::auth::AuthManager;
|
||||
@@ -10,18 +9,19 @@ use crate::config::Config;
|
||||
use crate::default_client::build_reqwest_client;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CoreResult;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::models_manager::collaboration_mode_presets::CollaborationModesConfig;
|
||||
use crate::models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
|
||||
use crate::models_manager::model_info;
|
||||
use crate::response_debug_context::extract_response_debug_context;
|
||||
use crate::response_debug_context::telemetry_transport_error_message;
|
||||
use crate::util::FeedbackRequestTags;
|
||||
use crate::util::emit_feedback_request_tags_with_auth_env;
|
||||
use codex_api::ModelsClient;
|
||||
use codex_api::RequestTelemetry;
|
||||
use codex_api::ReqwestTransport;
|
||||
use codex_api::TransportError;
|
||||
use codex_models::ModelProviderInfo;
|
||||
use codex_models::models_manager::cache::ModelsCacheManager;
|
||||
use codex_models::models_manager::collaboration_mode_presets::CollaborationModesConfig;
|
||||
use codex_models::models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
|
||||
use codex_models::response_debug_context::extract_response_debug_context;
|
||||
use codex_models::response_debug_context::telemetry_transport_error_message;
|
||||
use codex_otel::TelemetryAuthMode;
|
||||
use codex_protocol::config_types::CollaborationModeMask;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
@@ -433,7 +433,9 @@ impl ModelsManager {
|
||||
codex_otel::start_global_timer("codex.remote_models.fetch_update.duration_ms", &[]);
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let auth_mode = auth.as_ref().map(CodexAuth::auth_mode);
|
||||
let api_provider = self.provider.to_api_provider(auth_mode)?;
|
||||
let api_provider = self
|
||||
.provider
|
||||
.to_api_provider(matches!(auth_mode, Some(AuthMode::Chatgpt)));
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.provider)?;
|
||||
let auth_env = collect_auth_env_telemetry(
|
||||
&self.provider,
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
pub mod cache;
|
||||
pub mod collaboration_mode_presets;
|
||||
pub mod manager;
|
||||
pub mod model_info;
|
||||
pub mod model_presets;
|
||||
pub mod cache {
|
||||
pub use codex_models::models_manager::cache::*;
|
||||
}
|
||||
pub mod collaboration_mode_presets {
|
||||
pub use codex_models::models_manager::collaboration_mode_presets::*;
|
||||
}
|
||||
pub mod model_presets {
|
||||
pub use codex_models::models_manager::model_presets::*;
|
||||
}
|
||||
|
||||
/// Convert the client version string to a whole version string (e.g. "1.2.3-alpha.4" -> "1.2.3").
|
||||
pub fn client_version_to_whole() -> String {
|
||||
format!(
|
||||
"{}.{}.{}",
|
||||
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||
env!("CARGO_PKG_VERSION_MINOR"),
|
||||
env!("CARGO_PKG_VERSION_PATCH")
|
||||
)
|
||||
codex_models::models_manager::client_version_to_whole()
|
||||
}
|
||||
|
||||
@@ -348,7 +348,7 @@ pub(crate) async fn handle_start(
|
||||
let provider = sess.provider().await;
|
||||
let auth = sess.services.auth_manager.auth().await;
|
||||
let realtime_api_key = realtime_api_key(auth.as_ref(), &provider)?;
|
||||
let mut api_provider = provider.to_api_provider(Some(crate::auth::AuthMode::ApiKey))?;
|
||||
let mut api_provider = provider.to_api_provider(/*use_chatgpt_base_url*/ false);
|
||||
let config = sess.get_config().await;
|
||||
if let Some(realtime_ws_base_url) = &config.experimental_realtime_ws_base_url {
|
||||
api_provider.base_url = realtime_ws_base_url.clone();
|
||||
|
||||
@@ -1,167 +1 @@
|
||||
use base64::Engine;
|
||||
use codex_api::TransportError;
|
||||
use codex_api::error::ApiError;
|
||||
|
||||
const REQUEST_ID_HEADER: &str = "x-request-id";
|
||||
const OAI_REQUEST_ID_HEADER: &str = "x-oai-request-id";
|
||||
const CF_RAY_HEADER: &str = "cf-ray";
|
||||
const AUTH_ERROR_HEADER: &str = "x-openai-authorization-error";
|
||||
const X_ERROR_JSON_HEADER: &str = "x-error-json";
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct ResponseDebugContext {
|
||||
pub(crate) request_id: Option<String>,
|
||||
pub(crate) cf_ray: Option<String>,
|
||||
pub(crate) auth_error: Option<String>,
|
||||
pub(crate) auth_error_code: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) fn extract_response_debug_context(transport: &TransportError) -> ResponseDebugContext {
|
||||
let mut context = ResponseDebugContext::default();
|
||||
|
||||
let TransportError::Http {
|
||||
headers, body: _, ..
|
||||
} = transport
|
||||
else {
|
||||
return context;
|
||||
};
|
||||
|
||||
let extract_header = |name: &str| {
|
||||
headers
|
||||
.as_ref()
|
||||
.and_then(|headers| headers.get(name))
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(str::to_string)
|
||||
};
|
||||
|
||||
context.request_id =
|
||||
extract_header(REQUEST_ID_HEADER).or_else(|| extract_header(OAI_REQUEST_ID_HEADER));
|
||||
context.cf_ray = extract_header(CF_RAY_HEADER);
|
||||
context.auth_error = extract_header(AUTH_ERROR_HEADER);
|
||||
context.auth_error_code = extract_header(X_ERROR_JSON_HEADER).and_then(|encoded| {
|
||||
let decoded = base64::engine::general_purpose::STANDARD
|
||||
.decode(encoded)
|
||||
.ok()?;
|
||||
let parsed = serde_json::from_slice::<serde_json::Value>(&decoded).ok()?;
|
||||
parsed
|
||||
.get("error")
|
||||
.and_then(|error| error.get("code"))
|
||||
.and_then(serde_json::Value::as_str)
|
||||
.map(str::to_string)
|
||||
});
|
||||
|
||||
context
|
||||
}
|
||||
|
||||
pub(crate) fn extract_response_debug_context_from_api_error(
|
||||
error: &ApiError,
|
||||
) -> ResponseDebugContext {
|
||||
match error {
|
||||
ApiError::Transport(transport) => extract_response_debug_context(transport),
|
||||
_ => ResponseDebugContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn telemetry_transport_error_message(error: &TransportError) -> String {
|
||||
match error {
|
||||
TransportError::Http { status, .. } => format!("http {}", status.as_u16()),
|
||||
TransportError::RetryLimit => "retry limit reached".to_string(),
|
||||
TransportError::Timeout => "timeout".to_string(),
|
||||
TransportError::Network(err) => err.to_string(),
|
||||
TransportError::Build(err) => err.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn telemetry_api_error_message(error: &ApiError) -> String {
|
||||
match error {
|
||||
ApiError::Transport(transport) => telemetry_transport_error_message(transport),
|
||||
ApiError::Api { status, .. } => format!("api error {}", status.as_u16()),
|
||||
ApiError::Stream(err) => err.to_string(),
|
||||
ApiError::ContextWindowExceeded => "context window exceeded".to_string(),
|
||||
ApiError::QuotaExceeded => "quota exceeded".to_string(),
|
||||
ApiError::UsageNotIncluded => "usage not included".to_string(),
|
||||
ApiError::Retryable { .. } => "retryable error".to_string(),
|
||||
ApiError::RateLimit(_) => "rate limit".to_string(),
|
||||
ApiError::InvalidRequest { .. } => "invalid request".to_string(),
|
||||
ApiError::ServerOverloaded => "server overloaded".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ResponseDebugContext;
|
||||
use super::extract_response_debug_context;
|
||||
use super::telemetry_api_error_message;
|
||||
use super::telemetry_transport_error_message;
|
||||
use codex_api::TransportError;
|
||||
use codex_api::error::ApiError;
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::StatusCode;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn extract_response_debug_context_decodes_identity_headers() {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("x-oai-request-id", HeaderValue::from_static("req-auth"));
|
||||
headers.insert("cf-ray", HeaderValue::from_static("ray-auth"));
|
||||
headers.insert(
|
||||
"x-openai-authorization-error",
|
||||
HeaderValue::from_static("missing_authorization_header"),
|
||||
);
|
||||
headers.insert(
|
||||
"x-error-json",
|
||||
HeaderValue::from_static("eyJlcnJvciI6eyJjb2RlIjoidG9rZW5fZXhwaXJlZCJ9fQ=="),
|
||||
);
|
||||
|
||||
let context = extract_response_debug_context(&TransportError::Http {
|
||||
status: StatusCode::UNAUTHORIZED,
|
||||
url: Some("https://chatgpt.com/backend-api/codex/models".to_string()),
|
||||
headers: Some(headers),
|
||||
body: Some(r#"{"error":{"message":"plain text error"},"status":401}"#.to_string()),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
context,
|
||||
ResponseDebugContext {
|
||||
request_id: Some("req-auth".to_string()),
|
||||
cf_ray: Some("ray-auth".to_string()),
|
||||
auth_error: Some("missing_authorization_header".to_string()),
|
||||
auth_error_code: Some("token_expired".to_string()),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_error_messages_omit_http_bodies() {
|
||||
let transport = TransportError::Http {
|
||||
status: StatusCode::UNAUTHORIZED,
|
||||
url: Some("https://chatgpt.com/backend-api/codex/responses".to_string()),
|
||||
headers: None,
|
||||
body: Some(r#"{"error":{"message":"secret token leaked"}}"#.to_string()),
|
||||
};
|
||||
|
||||
assert_eq!(telemetry_transport_error_message(&transport), "http 401");
|
||||
assert_eq!(
|
||||
telemetry_api_error_message(&ApiError::Transport(transport)),
|
||||
"http 401"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_error_messages_preserve_non_http_details() {
|
||||
let network = TransportError::Network("dns lookup failed".to_string());
|
||||
let build = TransportError::Build("invalid header value".to_string());
|
||||
let stream = ApiError::Stream("socket closed".to_string());
|
||||
|
||||
assert_eq!(
|
||||
telemetry_transport_error_message(&network),
|
||||
"dns lookup failed"
|
||||
);
|
||||
assert_eq!(
|
||||
telemetry_transport_error_message(&build),
|
||||
"invalid header value"
|
||||
);
|
||||
assert_eq!(telemetry_api_error_message(&stream), "socket closed");
|
||||
}
|
||||
}
|
||||
pub use codex_models::response_debug_context::*;
|
||||
|
||||
9
codex-rs/models/BUILD.bazel
Normal file
9
codex-rs/models/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "models",
|
||||
crate_name = "codex_models",
|
||||
compile_data = glob(["templates/**"]) + [
|
||||
"//codex-rs/core:models.json",
|
||||
],
|
||||
)
|
||||
31
codex-rs/models/Cargo.toml
Normal file
31
codex-rs/models/Cargo.toml
Normal file
@@ -0,0 +1,31 @@
|
||||
[package]
|
||||
name = "codex-models"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
name = "codex_models"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
codex-api = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
http = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
maplit = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
18
codex-rs/models/src/lib.rs
Normal file
18
codex-rs/models/src/lib.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
pub mod model_provider_info;
|
||||
pub mod models_manager;
|
||||
pub mod response_debug_context;
|
||||
|
||||
pub use model_provider_info::DEFAULT_LMSTUDIO_PORT;
|
||||
pub use model_provider_info::DEFAULT_OLLAMA_PORT;
|
||||
pub use model_provider_info::DEFAULT_WEBSOCKET_CONNECT_TIMEOUT_MS;
|
||||
pub use model_provider_info::EnvVarError;
|
||||
pub use model_provider_info::LEGACY_OLLAMA_CHAT_PROVIDER_ID;
|
||||
pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
|
||||
pub use model_provider_info::ModelProviderInfo;
|
||||
pub use model_provider_info::OLLAMA_CHAT_PROVIDER_REMOVED_ERROR;
|
||||
pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID;
|
||||
pub use model_provider_info::OPENAI_PROVIDER_ID;
|
||||
pub use model_provider_info::WireApi;
|
||||
pub use model_provider_info::built_in_model_providers;
|
||||
pub use model_provider_info::create_oss_provider;
|
||||
pub use model_provider_info::create_oss_provider_with_base_url;
|
||||
329
codex-rs/models/src/model_provider_info.rs
Normal file
329
codex-rs/models/src/model_provider_info.rs
Normal file
@@ -0,0 +1,329 @@
|
||||
//! Registry of model providers supported by Codex.
|
||||
//!
|
||||
//! Providers can be defined in two places:
|
||||
//! 1. Built-in defaults compiled into the binary so Codex works out-of-the-box.
|
||||
//! 2. User-defined entries inside `~/.codex/config.toml` under the `model_providers`
|
||||
//! key. These override or extend the defaults at runtime.
|
||||
|
||||
use codex_api::Provider as ApiProvider;
|
||||
use codex_api::provider::RetryConfig as ApiRetryConfig;
|
||||
use http::HeaderMap;
|
||||
use http::header::HeaderName;
|
||||
use http::header::HeaderValue;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::time::Duration;
|
||||
|
||||
const DEFAULT_STREAM_IDLE_TIMEOUT_MS: u64 = 300_000;
|
||||
const DEFAULT_STREAM_MAX_RETRIES: u64 = 5;
|
||||
const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4;
|
||||
pub const DEFAULT_WEBSOCKET_CONNECT_TIMEOUT_MS: u64 = 15_000;
|
||||
const MAX_STREAM_MAX_RETRIES: u64 = 100;
|
||||
const MAX_REQUEST_MAX_RETRIES: u64 = 100;
|
||||
|
||||
const OPENAI_PROVIDER_NAME: &str = "OpenAI";
|
||||
pub const OPENAI_PROVIDER_ID: &str = "openai";
|
||||
const CHAT_WIRE_API_REMOVED_ERROR: &str = "`wire_api = \"chat\"` is no longer supported.\nHow to fix: set `wire_api = \"responses\"` in your provider config.\nMore info: https://github.com/openai/codex/discussions/7782";
|
||||
pub const LEGACY_OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat";
|
||||
pub const OLLAMA_CHAT_PROVIDER_REMOVED_ERROR: &str = "`ollama-chat` is no longer supported.\nHow to fix: replace `ollama-chat` with `ollama` in `model_provider`, `oss_provider`, or `--local-provider`.\nMore info: https://github.com/openai/codex/discussions/7782";
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct EnvVarError {
|
||||
pub var: String,
|
||||
pub instructions: Option<String>,
|
||||
}
|
||||
|
||||
impl fmt::Display for EnvVarError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Missing environment variable: `{}`.", self.var)?;
|
||||
if let Some(instructions) = &self.instructions {
|
||||
write!(f, " {instructions}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for EnvVarError {}
|
||||
|
||||
/// Wire protocol that the provider speaks.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum WireApi {
|
||||
/// The Responses API exposed by OpenAI at `/v1/responses`.
|
||||
#[default]
|
||||
Responses,
|
||||
}
|
||||
|
||||
impl fmt::Display for WireApi {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let value = match self {
|
||||
Self::Responses => "responses",
|
||||
};
|
||||
f.write_str(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for WireApi {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let value = String::deserialize(deserializer)?;
|
||||
match value.as_str() {
|
||||
"responses" => Ok(Self::Responses),
|
||||
"chat" => Err(serde::de::Error::custom(CHAT_WIRE_API_REMOVED_ERROR)),
|
||||
_ => Err(serde::de::Error::unknown_variant(&value, &["responses"])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializable representation of a provider definition.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ModelProviderInfo {
|
||||
/// Friendly display name.
|
||||
pub name: String,
|
||||
/// Base URL for the provider's OpenAI-compatible API.
|
||||
pub base_url: Option<String>,
|
||||
/// Environment variable that stores the user's API key for this provider.
|
||||
pub env_key: Option<String>,
|
||||
/// Optional instructions to help the user get a valid value for the variable and set it.
|
||||
pub env_key_instructions: Option<String>,
|
||||
/// Value to use with `Authorization: Bearer <token>`.
|
||||
pub experimental_bearer_token: Option<String>,
|
||||
/// Which wire protocol this provider expects.
|
||||
#[serde(default)]
|
||||
pub wire_api: WireApi,
|
||||
/// Optional query parameters to append to the base URL.
|
||||
pub query_params: Option<HashMap<String, String>>,
|
||||
/// Additional HTTP headers to include in requests to this provider.
|
||||
pub http_headers: Option<HashMap<String, String>>,
|
||||
/// Additional HTTP headers whose values come from environment variables.
|
||||
pub env_http_headers: Option<HashMap<String, String>>,
|
||||
/// Maximum number of times to retry a failed HTTP request to this provider.
|
||||
pub request_max_retries: Option<u64>,
|
||||
/// Number of times to retry reconnecting a dropped streaming response before failing.
|
||||
pub stream_max_retries: Option<u64>,
|
||||
/// Idle timeout for streaming responses.
|
||||
pub stream_idle_timeout_ms: Option<u64>,
|
||||
/// Maximum time to wait for a websocket connection attempt.
|
||||
pub websocket_connect_timeout_ms: Option<u64>,
|
||||
/// Whether the provider requires OpenAI auth.
|
||||
#[serde(default)]
|
||||
pub requires_openai_auth: bool,
|
||||
/// Whether the provider supports websocket transport.
|
||||
#[serde(default)]
|
||||
pub supports_websockets: bool,
|
||||
}
|
||||
|
||||
impl ModelProviderInfo {
|
||||
fn build_header_map(&self) -> HeaderMap {
|
||||
let capacity = self.http_headers.as_ref().map_or(0, HashMap::len)
|
||||
+ self.env_http_headers.as_ref().map_or(0, HashMap::len);
|
||||
let mut headers = HeaderMap::with_capacity(capacity);
|
||||
if let Some(extra) = &self.http_headers {
|
||||
for (k, v) in extra {
|
||||
if let (Ok(name), Ok(value)) = (HeaderName::try_from(k), HeaderValue::try_from(v)) {
|
||||
headers.insert(name, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(env_headers) = &self.env_http_headers {
|
||||
for (header, env_var) in env_headers {
|
||||
if let Ok(val) = std::env::var(env_var)
|
||||
&& !val.trim().is_empty()
|
||||
&& let (Ok(name), Ok(value)) =
|
||||
(HeaderName::try_from(header), HeaderValue::try_from(val))
|
||||
{
|
||||
headers.insert(name, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
headers
|
||||
}
|
||||
|
||||
pub fn to_api_provider(&self, use_chatgpt_base_url: bool) -> ApiProvider {
|
||||
let default_base_url = if use_chatgpt_base_url {
|
||||
"https://chatgpt.com/backend-api/codex"
|
||||
} else {
|
||||
"https://api.openai.com/v1"
|
||||
};
|
||||
let base_url = self
|
||||
.base_url
|
||||
.clone()
|
||||
.unwrap_or_else(|| default_base_url.to_string());
|
||||
|
||||
let retry = ApiRetryConfig {
|
||||
max_attempts: self.request_max_retries(),
|
||||
base_delay: Duration::from_millis(200),
|
||||
retry_429: false,
|
||||
retry_5xx: true,
|
||||
retry_transport: true,
|
||||
};
|
||||
|
||||
ApiProvider {
|
||||
name: self.name.clone(),
|
||||
base_url,
|
||||
query_params: self.query_params.clone(),
|
||||
headers: self.build_header_map(),
|
||||
retry,
|
||||
stream_idle_timeout: self.stream_idle_timeout(),
|
||||
}
|
||||
}
|
||||
|
||||
/// If `env_key` is Some, returns the API key for this provider if present
|
||||
/// (and non-empty) in the environment.
|
||||
pub fn api_key(&self) -> Result<Option<String>, EnvVarError> {
|
||||
match &self.env_key {
|
||||
Some(env_key) => {
|
||||
let api_key = std::env::var(env_key)
|
||||
.ok()
|
||||
.filter(|v| !v.trim().is_empty())
|
||||
.ok_or_else(|| EnvVarError {
|
||||
var: env_key.clone(),
|
||||
instructions: self.env_key_instructions.clone(),
|
||||
})?;
|
||||
Ok(Some(api_key))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_max_retries(&self) -> u64 {
|
||||
self.request_max_retries
|
||||
.unwrap_or(DEFAULT_REQUEST_MAX_RETRIES)
|
||||
.min(MAX_REQUEST_MAX_RETRIES)
|
||||
}
|
||||
|
||||
pub fn stream_max_retries(&self) -> u64 {
|
||||
self.stream_max_retries
|
||||
.unwrap_or(DEFAULT_STREAM_MAX_RETRIES)
|
||||
.min(MAX_STREAM_MAX_RETRIES)
|
||||
}
|
||||
|
||||
pub fn stream_idle_timeout(&self) -> Duration {
|
||||
self.stream_idle_timeout_ms
|
||||
.map(Duration::from_millis)
|
||||
.unwrap_or(Duration::from_millis(DEFAULT_STREAM_IDLE_TIMEOUT_MS))
|
||||
}
|
||||
|
||||
pub fn websocket_connect_timeout(&self) -> Duration {
|
||||
self.websocket_connect_timeout_ms
|
||||
.map(Duration::from_millis)
|
||||
.unwrap_or(Duration::from_millis(DEFAULT_WEBSOCKET_CONNECT_TIMEOUT_MS))
|
||||
}
|
||||
|
||||
pub fn create_openai_provider(base_url: Option<String>) -> ModelProviderInfo {
|
||||
ModelProviderInfo {
|
||||
name: OPENAI_PROVIDER_NAME.into(),
|
||||
base_url,
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: Some(
|
||||
[("version".to_string(), env!("CARGO_PKG_VERSION").to_string())]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
env_http_headers: Some(
|
||||
[
|
||||
(
|
||||
"OpenAI-Organization".to_string(),
|
||||
"OPENAI_ORGANIZATION".to_string(),
|
||||
),
|
||||
("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
websocket_connect_timeout_ms: None,
|
||||
requires_openai_auth: true,
|
||||
supports_websockets: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_openai(&self) -> bool {
|
||||
self.name == OPENAI_PROVIDER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
pub const DEFAULT_LMSTUDIO_PORT: u16 = 1234;
|
||||
pub const DEFAULT_OLLAMA_PORT: u16 = 11434;
|
||||
|
||||
pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio";
|
||||
pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama";
|
||||
|
||||
/// Built-in default provider list.
|
||||
pub fn built_in_model_providers(
|
||||
openai_base_url: Option<String>,
|
||||
) -> HashMap<String, ModelProviderInfo> {
|
||||
use ModelProviderInfo as P;
|
||||
|
||||
let openai_provider = P::create_openai_provider(openai_base_url);
|
||||
[
|
||||
(OPENAI_PROVIDER_ID, openai_provider),
|
||||
(
|
||||
OLLAMA_OSS_PROVIDER_ID,
|
||||
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses),
|
||||
),
|
||||
(
|
||||
LMSTUDIO_OSS_PROVIDER_ID,
|
||||
create_oss_provider(DEFAULT_LMSTUDIO_PORT, WireApi::Responses),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn create_oss_provider(default_provider_port: u16, wire_api: WireApi) -> ModelProviderInfo {
|
||||
let default_codex_oss_base_url = format!(
|
||||
"http://localhost:{codex_oss_port}/v1",
|
||||
codex_oss_port = std::env::var("CODEX_OSS_PORT")
|
||||
.ok()
|
||||
.filter(|value| !value.trim().is_empty())
|
||||
.and_then(|value| value.parse::<u16>().ok())
|
||||
.unwrap_or(default_provider_port)
|
||||
);
|
||||
|
||||
let codex_oss_base_url = std::env::var("CODEX_OSS_BASE_URL")
|
||||
.ok()
|
||||
.filter(|v| !v.trim().is_empty())
|
||||
.unwrap_or(default_codex_oss_base_url);
|
||||
create_oss_provider_with_base_url(&codex_oss_base_url, wire_api)
|
||||
}
|
||||
|
||||
pub fn create_oss_provider_with_base_url(base_url: &str, wire_api: WireApi) -> ModelProviderInfo {
|
||||
ModelProviderInfo {
|
||||
name: "gpt-oss".into(),
|
||||
base_url: Some(base_url.into()),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
websocket_connect_timeout_ms: None,
|
||||
requires_openai_auth: false,
|
||||
supports_websockets: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "model_provider_info_tests.rs"]
|
||||
mod tests;
|
||||
@@ -11,27 +11,24 @@ use tokio::fs;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
|
||||
/// Manages loading and saving of models cache to disk.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ModelsCacheManager {
|
||||
pub struct ModelsCacheManager {
|
||||
cache_path: PathBuf,
|
||||
cache_ttl: Duration,
|
||||
}
|
||||
|
||||
impl ModelsCacheManager {
|
||||
/// Create a new cache manager with the given path and TTL.
|
||||
pub(crate) fn new(cache_path: PathBuf, cache_ttl: Duration) -> Self {
|
||||
pub fn new(cache_path: PathBuf, cache_ttl: Duration) -> Self {
|
||||
Self {
|
||||
cache_path,
|
||||
cache_ttl,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to load a fresh cache entry. Returns `None` if the cache doesn't exist or is stale.
|
||||
pub(crate) async fn load_fresh(&self, expected_version: &str) -> Option<ModelsCache> {
|
||||
pub async fn load_fresh(&self, expected_version: &str) -> Option<ModelsCache> {
|
||||
info!(
|
||||
cache_path = %self.cache_path.display(),
|
||||
expected_version,
|
||||
cache_path = %self.cache_path.display(),
|
||||
expected_version,
|
||||
"models cache: attempting load_fresh"
|
||||
);
|
||||
let cache = match self.load().await {
|
||||
@@ -73,8 +70,7 @@ impl ModelsCacheManager {
|
||||
Some(cache)
|
||||
}
|
||||
|
||||
/// Persist the cache to disk, creating parent directories as needed.
|
||||
pub(crate) async fn persist_cache(
|
||||
pub async fn persist_cache(
|
||||
&self,
|
||||
models: &[ModelInfo],
|
||||
etag: Option<String>,
|
||||
@@ -91,8 +87,7 @@ impl ModelsCacheManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Renew the cache TTL by updating the fetched_at timestamp to now.
|
||||
pub(crate) async fn renew_cache_ttl(&self) -> io::Result<()> {
|
||||
pub async fn renew_cache_ttl(&self) -> io::Result<()> {
|
||||
let mut cache = match self.load().await? {
|
||||
Some(cache) => cache,
|
||||
None => return Err(io::Error::new(ErrorKind::NotFound, "cache not found")),
|
||||
@@ -123,14 +118,12 @@ impl ModelsCacheManager {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Set the cache TTL.
|
||||
pub(crate) fn set_ttl(&mut self, ttl: Duration) {
|
||||
pub fn set_ttl(&mut self, ttl: Duration) {
|
||||
self.cache_ttl = ttl;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Manipulate cache file for testing. Allows setting a custom fetched_at timestamp.
|
||||
pub(crate) async fn manipulate_cache_for_test<F>(&self, f: F) -> io::Result<()>
|
||||
pub async fn manipulate_cache_for_test<F>(&self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce(&mut DateTime<Utc>),
|
||||
{
|
||||
@@ -143,8 +136,7 @@ impl ModelsCacheManager {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Mutate the full cache contents for testing.
|
||||
pub(crate) async fn mutate_cache_for_test<F>(&self, f: F) -> io::Result<()>
|
||||
pub async fn mutate_cache_for_test<F>(&self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce(&mut ModelsCache),
|
||||
{
|
||||
@@ -157,19 +149,17 @@ impl ModelsCacheManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialized snapshot of models and metadata cached on disk.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct ModelsCache {
|
||||
pub(crate) fetched_at: DateTime<Utc>,
|
||||
pub struct ModelsCache {
|
||||
pub fetched_at: DateTime<Utc>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) etag: Option<String>,
|
||||
pub etag: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) client_version: Option<String>,
|
||||
pub(crate) models: Vec<ModelInfo>,
|
||||
pub client_version: Option<String>,
|
||||
pub models: Vec<ModelInfo>,
|
||||
}
|
||||
|
||||
impl ModelsCache {
|
||||
/// Returns `true` when the cache entry has not exceeded the configured TTL.
|
||||
fn is_fresh(&self, ttl: Duration) -> bool {
|
||||
if ttl.is_zero() {
|
||||
return false;
|
||||
@@ -10,18 +10,12 @@ const KNOWN_MODE_NAMES_PLACEHOLDER: &str = "{{KNOWN_MODE_NAMES}}";
|
||||
const REQUEST_USER_INPUT_AVAILABILITY_PLACEHOLDER: &str = "{{REQUEST_USER_INPUT_AVAILABILITY}}";
|
||||
const ASKING_QUESTIONS_GUIDANCE_PLACEHOLDER: &str = "{{ASKING_QUESTIONS_GUIDANCE}}";
|
||||
|
||||
/// Stores feature flags that control collaboration-mode behavior.
|
||||
///
|
||||
/// Keep mode-related flags here so new collaboration-mode capabilities can be
|
||||
/// added without large cross-cutting diffs to constructor and call-site
|
||||
/// signatures.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct CollaborationModesConfig {
|
||||
/// Enables `request_user_input` availability in Default mode.
|
||||
pub default_mode_request_user_input: bool,
|
||||
}
|
||||
|
||||
pub(crate) fn builtin_collaboration_mode_presets(
|
||||
pub fn builtin_collaboration_mode_presets(
|
||||
collaboration_modes_config: CollaborationModesConfig,
|
||||
) -> Vec<CollaborationModeMask> {
|
||||
vec![plan_preset(), default_preset(collaboration_modes_config)]
|
||||
13
codex-rs/models/src/models_manager/mod.rs
Normal file
13
codex-rs/models/src/models_manager/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
pub mod cache;
|
||||
pub mod collaboration_mode_presets;
|
||||
pub mod model_presets;
|
||||
|
||||
/// Convert the client version string to a whole version string.
|
||||
pub fn client_version_to_whole() -> String {
|
||||
format!(
|
||||
"{}.{}.{}",
|
||||
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||
env!("CARGO_PKG_VERSION_MINOR"),
|
||||
env!("CARGO_PKG_VERSION_PATCH")
|
||||
)
|
||||
}
|
||||
165
codex-rs/models/src/response_debug_context.rs
Normal file
165
codex-rs/models/src/response_debug_context.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use base64::Engine;
|
||||
use codex_api::TransportError;
|
||||
use codex_api::error::ApiError;
|
||||
|
||||
const REQUEST_ID_HEADER: &str = "x-request-id";
|
||||
const OAI_REQUEST_ID_HEADER: &str = "x-oai-request-id";
|
||||
const CF_RAY_HEADER: &str = "cf-ray";
|
||||
const AUTH_ERROR_HEADER: &str = "x-openai-authorization-error";
|
||||
const X_ERROR_JSON_HEADER: &str = "x-error-json";
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct ResponseDebugContext {
|
||||
pub request_id: Option<String>,
|
||||
pub cf_ray: Option<String>,
|
||||
pub auth_error: Option<String>,
|
||||
pub auth_error_code: Option<String>,
|
||||
}
|
||||
|
||||
pub fn extract_response_debug_context(transport: &TransportError) -> ResponseDebugContext {
|
||||
let mut context = ResponseDebugContext::default();
|
||||
|
||||
let TransportError::Http {
|
||||
headers, body: _, ..
|
||||
} = transport
|
||||
else {
|
||||
return context;
|
||||
};
|
||||
|
||||
let extract_header = |name: &str| {
|
||||
headers
|
||||
.as_ref()
|
||||
.and_then(|headers| headers.get(name))
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(str::to_string)
|
||||
};
|
||||
|
||||
context.request_id =
|
||||
extract_header(REQUEST_ID_HEADER).or_else(|| extract_header(OAI_REQUEST_ID_HEADER));
|
||||
context.cf_ray = extract_header(CF_RAY_HEADER);
|
||||
context.auth_error = extract_header(AUTH_ERROR_HEADER);
|
||||
context.auth_error_code = extract_header(X_ERROR_JSON_HEADER).and_then(|encoded| {
|
||||
let decoded = base64::engine::general_purpose::STANDARD
|
||||
.decode(encoded)
|
||||
.ok()?;
|
||||
let parsed = serde_json::from_slice::<serde_json::Value>(&decoded).ok()?;
|
||||
parsed
|
||||
.get("error")
|
||||
.and_then(|error| error.get("code"))
|
||||
.and_then(serde_json::Value::as_str)
|
||||
.map(str::to_string)
|
||||
});
|
||||
|
||||
context
|
||||
}
|
||||
|
||||
pub fn extract_response_debug_context_from_api_error(error: &ApiError) -> ResponseDebugContext {
|
||||
match error {
|
||||
ApiError::Transport(transport) => extract_response_debug_context(transport),
|
||||
_ => ResponseDebugContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn telemetry_transport_error_message(error: &TransportError) -> String {
|
||||
match error {
|
||||
TransportError::Http { status, .. } => format!("http {}", status.as_u16()),
|
||||
TransportError::RetryLimit => "retry limit reached".to_string(),
|
||||
TransportError::Timeout => "timeout".to_string(),
|
||||
TransportError::Network(err) => err.to_string(),
|
||||
TransportError::Build(err) => err.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn telemetry_api_error_message(error: &ApiError) -> String {
|
||||
match error {
|
||||
ApiError::Transport(transport) => telemetry_transport_error_message(transport),
|
||||
ApiError::Api { status, .. } => format!("api error {}", status.as_u16()),
|
||||
ApiError::Stream(err) => err.to_string(),
|
||||
ApiError::ContextWindowExceeded => "context window exceeded".to_string(),
|
||||
ApiError::QuotaExceeded => "quota exceeded".to_string(),
|
||||
ApiError::UsageNotIncluded => "usage not included".to_string(),
|
||||
ApiError::Retryable { .. } => "retryable error".to_string(),
|
||||
ApiError::RateLimit(_) => "rate limit".to_string(),
|
||||
ApiError::InvalidRequest { .. } => "invalid request".to_string(),
|
||||
ApiError::ServerOverloaded => "server overloaded".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ResponseDebugContext;
|
||||
use super::extract_response_debug_context;
|
||||
use super::telemetry_api_error_message;
|
||||
use super::telemetry_transport_error_message;
|
||||
use codex_api::TransportError;
|
||||
use codex_api::error::ApiError;
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::StatusCode;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn extract_response_debug_context_decodes_identity_headers() {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("x-oai-request-id", HeaderValue::from_static("req-auth"));
|
||||
headers.insert("cf-ray", HeaderValue::from_static("ray-auth"));
|
||||
headers.insert(
|
||||
"x-openai-authorization-error",
|
||||
HeaderValue::from_static("missing_authorization_header"),
|
||||
);
|
||||
headers.insert(
|
||||
"x-error-json",
|
||||
HeaderValue::from_static("eyJlcnJvciI6eyJjb2RlIjoidG9rZW5fZXhwaXJlZCJ9fQ=="),
|
||||
);
|
||||
|
||||
let context = extract_response_debug_context(&TransportError::Http {
|
||||
status: StatusCode::UNAUTHORIZED,
|
||||
url: Some("https://chatgpt.com/backend-api/codex/models".to_string()),
|
||||
headers: Some(headers),
|
||||
body: Some(r#"{"error":{"message":"plain text error"},"status":401}"#.to_string()),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
context,
|
||||
ResponseDebugContext {
|
||||
request_id: Some("req-auth".to_string()),
|
||||
cf_ray: Some("ray-auth".to_string()),
|
||||
auth_error: Some("missing_authorization_header".to_string()),
|
||||
auth_error_code: Some("token_expired".to_string()),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_error_messages_omit_http_bodies() {
|
||||
let transport = TransportError::Http {
|
||||
status: StatusCode::UNAUTHORIZED,
|
||||
url: Some("https://chatgpt.com/backend-api/codex/responses".to_string()),
|
||||
headers: None,
|
||||
body: Some(r#"{"error":{"message":"secret token leaked"}}"#.to_string()),
|
||||
};
|
||||
|
||||
assert_eq!(telemetry_transport_error_message(&transport), "http 401");
|
||||
assert_eq!(
|
||||
telemetry_api_error_message(&ApiError::Transport(transport)),
|
||||
"http 401"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_error_messages_preserve_non_http_details() {
|
||||
let network = TransportError::Network("dns lookup failed".to_string());
|
||||
let build = TransportError::Build("invalid header value".to_string());
|
||||
let stream = ApiError::Stream("socket closed".to_string());
|
||||
|
||||
assert_eq!(
|
||||
telemetry_transport_error_message(&network),
|
||||
"dns lookup failed"
|
||||
);
|
||||
assert_eq!(
|
||||
telemetry_transport_error_message(&build),
|
||||
"invalid header value"
|
||||
);
|
||||
assert_eq!(telemetry_api_error_message(&stream), "socket closed");
|
||||
}
|
||||
}
|
||||
11
codex-rs/models/templates/collaboration_mode/default.md
Normal file
11
codex-rs/models/templates/collaboration_mode/default.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Collaboration Mode: Default
|
||||
|
||||
You are now in Default mode. Any previous instructions for other modes (e.g. Plan mode) are no longer active.
|
||||
|
||||
Your active mode changes only when new developer instructions with a different `<collaboration_mode>...</collaboration_mode>` change it; user requests or tool descriptions do not change mode by themselves. Known mode names are {{KNOWN_MODE_NAMES}}.
|
||||
|
||||
## request_user_input availability
|
||||
|
||||
{{REQUEST_USER_INPUT_AVAILABILITY}}
|
||||
|
||||
{{ASKING_QUESTIONS_GUIDANCE}}
|
||||
128
codex-rs/models/templates/collaboration_mode/plan.md
Normal file
128
codex-rs/models/templates/collaboration_mode/plan.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Plan Mode (Conversational)
|
||||
|
||||
You work in 3 phases, and you should *chat your way* to a great plan before finalizing it. A great plan is very detailed—intent- and implementation-wise—so that it can be handed to another engineer or agent to be implemented right away. It must be **decision complete**, where the implementer does not need to make any decisions.
|
||||
|
||||
## Mode rules (strict)
|
||||
|
||||
You are in **Plan Mode** until a developer message explicitly ends it.
|
||||
|
||||
Plan Mode is not changed by user intent, tone, or imperative language. If a user asks for execution while still in Plan Mode, treat it as a request to **plan the execution**, not perform it.
|
||||
|
||||
## Plan Mode vs update_plan tool
|
||||
|
||||
Plan Mode is a collaboration mode that can involve requesting user input and eventually issuing a `<proposed_plan>` block.
|
||||
|
||||
Separately, `update_plan` is a checklist/progress/TODOs tool; it does not enter or exit Plan Mode. Do not confuse it with Plan mode or try to use it while in Plan mode. If you try to use `update_plan` in Plan mode, it will return an error.
|
||||
|
||||
## Execution vs. mutation in Plan Mode
|
||||
|
||||
You may explore and execute **non-mutating** actions that improve the plan. You must not perform **mutating** actions.
|
||||
|
||||
### Allowed (non-mutating, plan-improving)
|
||||
|
||||
Actions that gather truth, reduce ambiguity, or validate feasibility without changing repo-tracked state. Examples:
|
||||
|
||||
* Reading or searching files, configs, schemas, types, manifests, and docs
|
||||
* Static analysis, inspection, and repo exploration
|
||||
* Dry-run style commands when they do not edit repo-tracked files
|
||||
* Tests, builds, or checks that may write to caches or build artifacts (for example, `target/`, `.cache/`, or snapshots) so long as they do not edit repo-tracked files
|
||||
|
||||
### Not allowed (mutating, plan-executing)
|
||||
|
||||
Actions that implement the plan or change repo-tracked state. Examples:
|
||||
|
||||
* Editing or writing files
|
||||
* Running formatters or linters that rewrite files
|
||||
* Applying patches, migrations, or codegen that updates repo-tracked files
|
||||
* Side-effectful commands whose purpose is to carry out the plan rather than refine it
|
||||
|
||||
When in doubt: if the action would reasonably be described as "doing the work" rather than "planning the work," do not do it.
|
||||
|
||||
## PHASE 1 — Ground in the environment (explore first, ask second)
|
||||
|
||||
Begin by grounding yourself in the actual environment. Eliminate unknowns in the prompt by discovering facts, not by asking the user. Resolve all questions that can be answered through exploration or inspection. Identify missing or ambiguous details only if they cannot be derived from the environment. Silent exploration between turns is allowed and encouraged.
|
||||
|
||||
Before asking the user any question, perform at least one targeted non-mutating exploration pass (for example: search relevant files, inspect likely entrypoints/configs, confirm current implementation shape), unless no local environment/repo is available.
|
||||
|
||||
Exception: you may ask clarifying questions about the user's prompt before exploring, ONLY if there are obvious ambiguities or contradictions in the prompt itself. However, if ambiguity might be resolved by exploring, always prefer exploring first.
|
||||
|
||||
Do not ask questions that can be answered from the repo or system (for example, "where is this struct?" or "which UI component should we use?" when exploration can make it clear). Only ask once you have exhausted reasonable non-mutating exploration.
|
||||
|
||||
## PHASE 2 — Intent chat (what they actually want)
|
||||
|
||||
* Keep asking until you can clearly state: goal + success criteria, audience, in/out of scope, constraints, current state, and the key preferences/tradeoffs.
|
||||
* Bias toward questions over guessing: if any high-impact ambiguity remains, do NOT plan yet—ask.
|
||||
|
||||
## PHASE 3 — Implementation chat (what/how we’ll build)
|
||||
|
||||
* Once intent is stable, keep asking until the spec is decision complete: approach, interfaces (APIs/schemas/I/O), data flow, edge cases/failure modes, testing + acceptance criteria, rollout/monitoring, and any migrations/compat constraints.
|
||||
|
||||
## Asking questions
|
||||
|
||||
Critical rules:
|
||||
|
||||
* Strongly prefer using the `request_user_input` tool to ask any questions.
|
||||
* Offer only meaningful multiple‑choice options; don’t include filler choices that are obviously wrong or irrelevant.
|
||||
* In rare cases where an unavoidable, important question can’t be expressed with reasonable multiple‑choice options (due to extreme ambiguity), you may ask it directly without the tool.
|
||||
|
||||
You SHOULD ask many questions, but each question must:
|
||||
|
||||
* materially change the spec/plan, OR
|
||||
* confirm/lock an assumption, OR
|
||||
* choose between meaningful tradeoffs.
|
||||
* not be answerable by non-mutating commands.
|
||||
|
||||
Use the `request_user_input` tool only for decisions that materially change the plan, for confirming important assumptions, or for information that cannot be discovered via non-mutating exploration.
|
||||
|
||||
## Two kinds of unknowns (treat differently)
|
||||
|
||||
1. **Discoverable facts** (repo/system truth): explore first.
|
||||
|
||||
* Before asking, run targeted searches and check likely sources of truth (configs/manifests/entrypoints/schemas/types/constants).
|
||||
* Ask only if: multiple plausible candidates; nothing found but you need a missing identifier/context; or ambiguity is actually product intent.
|
||||
* If asking, present concrete candidates (paths/service names) + recommend one.
|
||||
* Never ask questions you can answer from your environment (e.g., “where is this struct”).
|
||||
|
||||
2. **Preferences/tradeoffs** (not discoverable): ask early.
|
||||
|
||||
* These are intent or implementation preferences that cannot be derived from exploration.
|
||||
* Provide 2–4 mutually exclusive options + a recommended default.
|
||||
* If unanswered, proceed with the recommended option and record it as an assumption in the final plan.
|
||||
|
||||
## Finalization rule
|
||||
|
||||
Only output the final plan when it is decision complete and leaves no decisions to the implementer.
|
||||
|
||||
When you present the official plan, wrap it in a `<proposed_plan>` block so the client can render it specially:
|
||||
|
||||
1) The opening tag must be on its own line.
|
||||
2) Start the plan content on the next line (no text on the same line as the tag).
|
||||
3) The closing tag must be on its own line.
|
||||
4) Use Markdown inside the block.
|
||||
5) Keep the tags exactly as `<proposed_plan>` and `</proposed_plan>` (do not translate or rename them), even if the plan content is in another language.
|
||||
|
||||
Example:
|
||||
|
||||
<proposed_plan>
|
||||
plan content
|
||||
</proposed_plan>
|
||||
|
||||
plan content should be human and agent digestible. The final plan must be plan-only, concise by default, and include:
|
||||
|
||||
* A clear title
|
||||
* A brief summary section
|
||||
* Important changes or additions to public APIs/interfaces/types
|
||||
* Test cases and scenarios
|
||||
* Explicit assumptions and defaults chosen where needed
|
||||
|
||||
When possible, prefer a compact structure with 3-5 short sections, usually: Summary, Key Changes or Implementation Changes, Test Plan, and Assumptions. Do not include a separate Scope section unless scope boundaries are genuinely important to avoid mistakes.
|
||||
|
||||
Prefer grouped implementation bullets by subsystem or behavior over file-by-file inventories. Mention files only when needed to disambiguate a non-obvious change, and avoid naming more than 3 paths unless extra specificity is necessary to prevent mistakes. Prefer behavior-level descriptions over symbol-by-symbol removal lists. For v1 feature-addition plans, do not invent detailed schema, validation, precedence, fallback, or wire-shape policy unless the request establishes it or it is needed to prevent a concrete implementation mistake; prefer the intended capability and minimum interface/behavior changes.
|
||||
|
||||
Keep bullets short and avoid explanatory sub-bullets unless they are needed to prevent ambiguity. Prefer the minimum detail needed for implementation safety, not exhaustive coverage. Within each section, compress related changes into a few high-signal bullets and omit branch-by-branch logic, repeated invariants, and long lists of unaffected behavior unless they are necessary to prevent a likely implementation mistake. Avoid repeated repo facts and irrelevant edge-case or rollout detail. For straightforward refactors, keep the plan to a compact summary, key edits, tests, and assumptions. If the user asks for more detail, then expand.
|
||||
|
||||
Do not ask "should I proceed?" in the final output. The user can easily switch out of Plan mode and request implementation if you have included a `<proposed_plan>` block in your response. Alternatively, they can decide to stay in Plan mode and continue refining the plan.
|
||||
|
||||
Only produce at most one `<proposed_plan>` block per turn, and only when you are presenting a complete spec.
|
||||
|
||||
If the user stays in Plan mode and asks for revisions after a prior `<proposed_plan>`, any new `<proposed_plan>` must be a complete replacement.
|
||||
Reference in New Issue
Block a user