chore: migrate from Config::load_from_base_config_with_overrides to ConfigBuilder (#8276)

https://github.com/openai/codex/pull/8235 introduced `ConfigBuilder` and
this PR updates all call non-test call sites to use it instead of
`Config::load_from_base_config_with_overrides()`.

This is important because `load_from_base_config_with_overrides()` uses
an empty `ConfigRequirements`, which is a reasonable default for testing
so the tests are not influenced by the settings on the host. This method
is now guarded by `#[cfg(test)]` so it cannot be used by business logic.

Because `ConfigBuilder::build()` is `async`, many of the test methods
had to be migrated to be `async`, as well. On the bright side, this made
it possible to eliminate a bunch of `block_on_future()` stuff.
This commit is contained in:
Michael Bolin
2025-12-18 16:12:52 -08:00
committed by GitHub
parent 2d9826098e
commit 3d4ced3ff5
42 changed files with 1081 additions and 1176 deletions

View File

@@ -2134,8 +2134,8 @@ mod tests {
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender();
async fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
let server = Arc::new(ConversationManager::with_models_provider(
@@ -2173,12 +2173,12 @@ mod tests {
}
}
fn make_test_app_with_channels() -> (
async fn make_test_app_with_channels() -> (
App,
tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
tokio::sync::mpsc::UnboundedReceiver<Op>,
) {
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender();
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
let server = Arc::new(ConversationManager::with_models_provider(
@@ -2224,8 +2224,8 @@ mod tests {
codex_core::openai_models::model_presets::all_model_presets().clone()
}
#[test]
fn model_migration_prompt_only_shows_for_deprecated_models() {
#[tokio::test]
async fn model_migration_prompt_only_shows_for_deprecated_models() {
let seen = BTreeMap::new();
assert!(should_show_model_migration_prompt(
"gpt-5",
@@ -2259,8 +2259,8 @@ mod tests {
));
}
#[test]
fn model_migration_prompt_respects_hide_flag_and_self_target() {
#[tokio::test]
async fn model_migration_prompt_respects_hide_flag_and_self_target() {
let mut seen = BTreeMap::new();
seen.insert("gpt-5".to_string(), "gpt-5.1".to_string());
assert!(!should_show_model_migration_prompt(
@@ -2277,9 +2277,9 @@ mod tests {
));
}
#[test]
fn update_reasoning_effort_updates_config() {
let mut app = make_test_app();
#[tokio::test]
async fn update_reasoning_effort_updates_config() {
let mut app = make_test_app().await;
app.config.model_reasoning_effort = Some(ReasoningEffortConfig::Medium);
app.chat_widget
.set_reasoning_effort(Some(ReasoningEffortConfig::Medium));
@@ -2296,9 +2296,9 @@ mod tests {
);
}
#[test]
fn backtrack_selection_with_duplicate_history_targets_unique_turn() {
let mut app = make_test_app();
#[tokio::test]
async fn backtrack_selection_with_duplicate_history_targets_unique_turn() {
let mut app = make_test_app().await;
let user_cell = |text: &str| -> Arc<dyn HistoryCell> {
Arc::new(UserHistoryCell {
@@ -2363,12 +2363,12 @@ mod tests {
assert_eq!(prefill, "follow-up (edited)");
}
#[test]
fn transcript_selection_moves_with_scroll() {
#[tokio::test]
async fn transcript_selection_moves_with_scroll() {
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
let mut app = make_test_app();
let mut app = make_test_app().await;
app.transcript_total_lines = 3;
let area = Rect {
@@ -2427,7 +2427,7 @@ mod tests {
#[tokio::test]
async fn new_session_requests_shutdown_for_previous_conversation() {
let (mut app, mut app_event_rx, mut op_rx) = make_test_app_with_channels();
let (mut app, mut app_event_rx, mut op_rx) = make_test_app_with_channels().await;
let conversation_id = ConversationId::new();
let event = SessionConfiguredEvent {
@@ -2461,13 +2461,13 @@ mod tests {
}
}
#[test]
fn session_summary_skip_zero_usage() {
#[tokio::test]
async fn session_summary_skip_zero_usage() {
assert!(session_summary(TokenUsage::default(), None).is_none());
}
#[test]
fn render_lines_to_ansi_pads_user_rows_to_full_width() {
#[tokio::test]
async fn render_lines_to_ansi_pads_user_rows_to_full_width() {
let line: Line<'static> = Line::from("hi");
let lines = vec![line];
let line_meta = vec![TranscriptLineMeta::CellLine {
@@ -2482,8 +2482,8 @@ mod tests {
assert!(rendered[0].contains("hi"));
}
#[test]
fn session_summary_includes_resume_hint() {
#[tokio::test]
async fn session_summary_includes_resume_hint() {
let usage = TokenUsage {
input_tokens: 10,
output_tokens: 2,

File diff suppressed because it is too large Load Diff

View File

@@ -1514,8 +1514,7 @@ mod tests {
use crate::exec_cell::ExecCall;
use crate::exec_cell::ExecCell;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::ConfigBuilder;
use codex_core::config::types::McpServerConfig;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::openai_models::models_manager::ModelsManager;
@@ -1532,14 +1531,13 @@ mod tests {
use mcp_types::TextContent;
use mcp_types::Tool;
use mcp_types::ToolInputSchema;
fn test_config() -> Config {
Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
std::env::temp_dir(),
)
.expect("config")
async fn test_config() -> Config {
let codex_home = std::env::temp_dir();
ConfigBuilder::default()
.codex_home(codex_home.clone())
.build()
.await
.expect("config")
}
fn render_lines(lines: &[Line<'static>]) -> Vec<String> {
@@ -1558,9 +1556,9 @@ mod tests {
render_lines(&cell.transcript_lines(u16::MAX))
}
#[test]
fn mcp_tools_output_masks_sensitive_values() {
let mut config = test_config();
#[tokio::test]
async fn mcp_tools_output_masks_sensitive_values() {
let mut config = test_config().await;
let mut env = HashMap::new();
env.insert("TOKEN".to_string(), "secret".to_string());
let stdio_config = McpServerConfig {
@@ -2391,9 +2389,9 @@ mod tests {
assert_eq!(rendered, vec!["• Detailed reasoning goes here."]);
}
#[test]
fn reasoning_summary_block_respects_config_overrides() {
let mut config = test_config();
#[tokio::test]
async fn reasoning_summary_block_respects_config_overrides() {
let mut config = test_config().await;
config.model = Some("gpt-3.5-turbo".to_string());
config.model_supports_reasoning_summaries = Some(true);
config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental);

View File

@@ -625,21 +625,23 @@ fn should_show_login_screen(login_status: LoginStatus, config: &Config) -> bool
#[cfg(test)]
mod tests {
use super::*;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::ConfigBuilder;
use codex_core::config::ProjectConfig;
use serial_test::serial;
use tempfile::TempDir;
#[test]
async fn build_config(temp_dir: &TempDir) -> std::io::Result<Config> {
ConfigBuilder::default()
.codex_home(temp_dir.path().to_path_buf())
.build()
.await
}
#[tokio::test]
#[serial]
fn windows_skips_trust_prompt_without_sandbox() -> std::io::Result<()> {
async fn windows_skips_trust_prompt_without_sandbox() -> std::io::Result<()> {
let temp_dir = TempDir::new()?;
let mut config = Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
temp_dir.path().to_path_buf(),
)?;
let mut config = build_config(&temp_dir).await?;
config.did_user_set_custom_approval_policy_or_sandbox_mode = false;
config.active_project = ProjectConfig { trust_level: None };
config.set_windows_sandbox_globally(false);
@@ -658,15 +660,11 @@ mod tests {
}
Ok(())
}
#[test]
#[tokio::test]
#[serial]
fn windows_shows_trust_prompt_with_sandbox() -> std::io::Result<()> {
async fn windows_shows_trust_prompt_with_sandbox() -> std::io::Result<()> {
let temp_dir = TempDir::new()?;
let mut config = Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
temp_dir.path().to_path_buf(),
)?;
let mut config = build_config(&temp_dir).await?;
config.did_user_set_custom_approval_policy_or_sandbox_mode = false;
config.active_project = ProjectConfig { trust_level: None };
config.set_windows_sandbox_globally(true);
@@ -685,15 +683,11 @@ mod tests {
}
Ok(())
}
#[test]
fn untrusted_project_skips_trust_prompt() -> std::io::Result<()> {
#[tokio::test]
async fn untrusted_project_skips_trust_prompt() -> std::io::Result<()> {
use codex_protocol::config_types::TrustLevel;
let temp_dir = TempDir::new()?;
let mut config = Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
temp_dir.path().to_path_buf(),
)?;
let mut config = build_config(&temp_dir).await?;
config.did_user_set_custom_approval_policy_or_sandbox_mode = false;
config.active_project = ProjectConfig {
trust_level: Some(TrustLevel::Untrusted),

View File

@@ -1059,7 +1059,6 @@ mod tests {
use crossterm::event::KeyModifiers;
use insta::assert_snapshot;
use serde_json::json;
use std::future::Future;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
@@ -1106,14 +1105,6 @@ mod tests {
}
}
fn block_on_future<F: Future<Output = T>, T>(future: F) -> T {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(future)
}
#[test]
fn preview_uses_first_message_input_text() {
let head = vec![
@@ -1267,8 +1258,8 @@ mod tests {
assert_snapshot!("resume_picker_table", snapshot);
}
#[test]
fn resume_picker_screen_snapshot() {
#[tokio::test]
async fn resume_picker_screen_snapshot() {
use crate::custom_terminal::Terminal;
use crate::test_backend::VT100Backend;
use uuid::Uuid;
@@ -1360,14 +1351,15 @@ mod tests {
None,
);
let page = block_on_future(RolloutRecorder::list_conversations(
let page = RolloutRecorder::list_conversations(
&state.codex_home,
PAGE_SIZE,
None,
INTERACTIVE_SESSION_SOURCES,
Some(&[String::from("openai")]),
"openai",
))
)
.await
.expect("list conversations");
let rows = rows_from_items(page.items);
@@ -1526,8 +1518,8 @@ mod tests {
assert!(guard[0].search_token.is_none());
}
#[test]
fn page_navigation_uses_view_rows() {
#[tokio::test]
async fn page_navigation_uses_view_rows() {
let loader: PageLoader = Arc::new(|_| {});
let mut state = PickerState::new(
PathBuf::from("/tmp"),
@@ -1551,33 +1543,27 @@ mod tests {
state.update_view_rows(5);
assert_eq!(state.selected, 0);
block_on_future(async {
state
.handle_key(KeyEvent::new(KeyCode::PageDown, KeyModifiers::NONE))
.await
.unwrap();
});
state
.handle_key(KeyEvent::new(KeyCode::PageDown, KeyModifiers::NONE))
.await
.unwrap();
assert_eq!(state.selected, 5);
block_on_future(async {
state
.handle_key(KeyEvent::new(KeyCode::PageDown, KeyModifiers::NONE))
.await
.unwrap();
});
state
.handle_key(KeyEvent::new(KeyCode::PageDown, KeyModifiers::NONE))
.await
.unwrap();
assert_eq!(state.selected, 10);
block_on_future(async {
state
.handle_key(KeyEvent::new(KeyCode::PageUp, KeyModifiers::NONE))
.await
.unwrap();
});
state
.handle_key(KeyEvent::new(KeyCode::PageUp, KeyModifiers::NONE))
.await
.unwrap();
assert_eq!(state.selected, 5);
}
#[test]
fn up_at_bottom_does_not_scroll_when_visible() {
#[tokio::test]
async fn up_at_bottom_does_not_scroll_when_visible() {
let loader: PageLoader = Arc::new(|_| {});
let mut state = PickerState::new(
PathBuf::from("/tmp"),
@@ -1606,12 +1592,10 @@ mod tests {
let initial_top = state.scroll_top;
assert_eq!(initial_top, state.filtered_rows.len().saturating_sub(5));
block_on_future(async {
state
.handle_key(KeyEvent::new(KeyCode::Up, KeyModifiers::NONE))
.await
.unwrap();
});
state
.handle_key(KeyEvent::new(KeyCode::Up, KeyModifiers::NONE))
.await
.unwrap();
assert_eq!(state.scroll_top, initial_top);
assert_eq!(state.selected, state.filtered_rows.len().saturating_sub(2));

View File

@@ -6,8 +6,7 @@ use chrono::TimeZone;
use chrono::Utc;
use codex_core::AuthManager;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::ConfigBuilder;
use codex_core::openai_models::model_family::ModelFamily;
use codex_core::openai_models::models_manager::ModelsManager;
use codex_core::protocol::CreditsSnapshot;
@@ -22,13 +21,12 @@ use ratatui::prelude::*;
use std::path::PathBuf;
use tempfile::TempDir;
fn test_config(temp_home: &TempDir) -> Config {
Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
temp_home.path().to_path_buf(),
)
.expect("load config")
async fn test_config(temp_home: &TempDir) -> Config {
ConfigBuilder::default()
.codex_home(temp_home.path().to_path_buf())
.build()
.await
.expect("load config")
}
fn test_auth_manager(config: &Config) -> AuthManager {
@@ -84,10 +82,10 @@ fn reset_at_from(captured_at: &chrono::DateTime<chrono::Local>, seconds: i64) ->
.timestamp()
}
#[test]
fn status_snapshot_includes_reasoning_details() {
#[tokio::test]
async fn status_snapshot_includes_reasoning_details() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.model_provider_id = "openai".to_string();
config.model_reasoning_effort = Some(ReasoningEffort::High);
@@ -155,10 +153,10 @@ fn status_snapshot_includes_reasoning_details() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_includes_monthly_limit() {
#[tokio::test]
async fn status_snapshot_includes_monthly_limit() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.model_provider_id = "openai".to_string();
config.cwd = PathBuf::from("/workspace/tests");
@@ -212,10 +210,10 @@ fn status_snapshot_includes_monthly_limit() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_shows_unlimited_credits() {
#[tokio::test]
async fn status_snapshot_shows_unlimited_credits() {
let temp_home = TempDir::new().expect("temp home");
let config = test_config(&temp_home);
let config = test_config(&temp_home).await;
let auth_manager = test_auth_manager(&config);
let usage = TokenUsage::default();
let captured_at = chrono::Local
@@ -256,10 +254,10 @@ fn status_snapshot_shows_unlimited_credits() {
);
}
#[test]
fn status_snapshot_shows_positive_credits() {
#[tokio::test]
async fn status_snapshot_shows_positive_credits() {
let temp_home = TempDir::new().expect("temp home");
let config = test_config(&temp_home);
let config = test_config(&temp_home).await;
let auth_manager = test_auth_manager(&config);
let usage = TokenUsage::default();
let captured_at = chrono::Local
@@ -300,10 +298,10 @@ fn status_snapshot_shows_positive_credits() {
);
}
#[test]
fn status_snapshot_hides_zero_credits() {
#[tokio::test]
async fn status_snapshot_hides_zero_credits() {
let temp_home = TempDir::new().expect("temp home");
let config = test_config(&temp_home);
let config = test_config(&temp_home).await;
let auth_manager = test_auth_manager(&config);
let usage = TokenUsage::default();
let captured_at = chrono::Local
@@ -342,10 +340,10 @@ fn status_snapshot_hides_zero_credits() {
);
}
#[test]
fn status_snapshot_hides_when_has_no_credits_flag() {
#[tokio::test]
async fn status_snapshot_hides_when_has_no_credits_flag() {
let temp_home = TempDir::new().expect("temp home");
let config = test_config(&temp_home);
let config = test_config(&temp_home).await;
let auth_manager = test_auth_manager(&config);
let usage = TokenUsage::default();
let captured_at = chrono::Local
@@ -384,10 +382,10 @@ fn status_snapshot_hides_when_has_no_credits_flag() {
);
}
#[test]
fn status_card_token_usage_excludes_cached_tokens() {
#[tokio::test]
async fn status_card_token_usage_excludes_cached_tokens() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.cwd = PathBuf::from("/workspace/tests");
@@ -427,10 +425,10 @@ fn status_card_token_usage_excludes_cached_tokens() {
);
}
#[test]
fn status_snapshot_truncates_in_narrow_terminal() {
#[tokio::test]
async fn status_snapshot_truncates_in_narrow_terminal() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.model_provider_id = "openai".to_string();
config.model_reasoning_effort = Some(ReasoningEffort::High);
@@ -487,10 +485,10 @@ fn status_snapshot_truncates_in_narrow_terminal() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_shows_missing_limits_message() {
#[tokio::test]
async fn status_snapshot_shows_missing_limits_message() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.cwd = PathBuf::from("/workspace/tests");
@@ -532,10 +530,10 @@ fn status_snapshot_shows_missing_limits_message() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_includes_credits_and_limits() {
#[tokio::test]
async fn status_snapshot_includes_credits_and_limits() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex".to_string());
config.cwd = PathBuf::from("/workspace/tests");
@@ -596,10 +594,10 @@ fn status_snapshot_includes_credits_and_limits() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_shows_empty_limits_message() {
#[tokio::test]
async fn status_snapshot_shows_empty_limits_message() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.cwd = PathBuf::from("/workspace/tests");
@@ -648,10 +646,10 @@ fn status_snapshot_shows_empty_limits_message() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_shows_stale_limits_message() {
#[tokio::test]
async fn status_snapshot_shows_stale_limits_message() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex-max".to_string());
config.cwd = PathBuf::from("/workspace/tests");
@@ -709,10 +707,10 @@ fn status_snapshot_shows_stale_limits_message() {
assert_snapshot!(sanitized);
}
#[test]
fn status_snapshot_cached_limits_hide_credits_without_flag() {
#[tokio::test]
async fn status_snapshot_cached_limits_hide_credits_without_flag() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model = Some("gpt-5.1-codex".to_string());
config.cwd = PathBuf::from("/workspace/tests");
@@ -774,10 +772,10 @@ fn status_snapshot_cached_limits_hide_credits_without_flag() {
assert_snapshot!(sanitized);
}
#[test]
fn status_context_window_uses_last_usage() {
#[tokio::test]
async fn status_context_window_uses_last_usage() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
let mut config = test_config(&temp_home).await;
config.model_context_window = Some(272_000);
let auth_manager = test_auth_manager(&config);