diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index eb4eccd897..649b7ac33d 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -873,6 +873,7 @@ dependencies = [ "supports-color", "textwrap 0.16.2", "tokio", + "toml 0.8.23", "tracing", "tracing-appender", "tracing-subscriber", @@ -4814,6 +4815,7 @@ dependencies = [ "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", + "toml_write", "winnow", ] @@ -4826,6 +4828,12 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + [[package]] name = "toml_writer" version = "1.0.2" diff --git a/codex-rs/config.md b/codex-rs/config.md index c7dfe42a75..fe42cb63f1 100644 --- a/codex-rs/config.md +++ b/codex-rs/config.md @@ -3,6 +3,7 @@ Codex supports several mechanisms for setting config values: - Config-specific command-line flags, such as `--model o3` (highest precedence). +- Convenience provider flags, such as `--ollama` (equivalent to `-c model_provider=ollama`). - A generic `-c`/`--config` flag that takes a `key=value` pair, such as `--config model="o3"`. - The key can contain dots to set a value deeper than the root, e.g. `--config model_providers.openai.wire_api="chat"`. - Values can contain objects, such as `--config shell_environment_policy.include_only=["PATH", "HOME", "USER"]`. @@ -56,6 +57,13 @@ name = "Ollama" base_url = "http://localhost:11434/v1" ``` +Alternatively, you can pass `--ollama` on the CLI, which is equivalent to `-c model_provider=ollama`. +When using `--ollama`, Codex will verify that an Ollama server is running locally and +will create a `[model_providers.ollama]` entry in your `config.toml` with sensible defaults +(`base_url = "http://localhost:11434/v1"`, `wire_api = "chat"`) if one does not already exist. +If no running Ollama server is detected, Codex will print instructions to install/start Ollama +and exit: https://github.com/ollama/ollama?tab=readme-ov-file#ollama + Or a third-party provider (using a distinct environment variable for the API key): ```toml diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index b43dc56ba0..4487c41439 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -11,6 +11,7 @@ use crate::config_types::Tui; use crate::config_types::UriBasedFileOpener; use crate::flags::OPENAI_DEFAULT_MODEL; use crate::model_provider_info::ModelProviderInfo; +use crate::model_provider_info::WireApi; use crate::model_provider_info::built_in_model_providers; use crate::openai_model_info::get_model_info; use crate::protocol::AskForApproval; @@ -187,6 +188,130 @@ impl Config { } } +/// Ensure that when the `--ollama` flag is used, there is a configured +/// `model_providers.ollama` entry in `config.toml` and that an Ollama server is +/// reachable. If a provider entry is missing but a local server is running, +/// this function will create the provider entry with sensible defaults. +/// +/// Returns `Ok(())` when the provider exists and the server is reachable. If +/// the server is not reachable, returns an error that callers can surface to +/// the end user (and should exit the process). +pub async fn ensure_ollama_provider_configured_and_running() -> std::io::Result<()> { + use std::time::Duration; + + // Resolve the Codex home directory and parse the current config (if + // present). + let codex_home = find_codex_home()?; + let mut root_value = load_config_as_toml(&codex_home)?; + + // Determine whether an ollama provider entry already exists and select a + // base URL to probe for the health check. If a provider entry exists and + // specifies a base_url, use it; otherwise fall back to the default. + let (has_ollama_entry, base_url_to_use) = match &root_value { + TomlValue::Table(tbl) => { + let model_providers = tbl.get("model_providers"); + match model_providers { + Some(TomlValue::Table(mp_tbl)) => match mp_tbl.get("ollama") { + Some(TomlValue::Table(ollama_tbl)) => { + // Use the configured base_url when present; otherwise default. + let base_url = ollama_tbl + .get("base_url") + .and_then(|v| v.as_str()) + .unwrap_or("http://localhost:11434/v1") + .to_string(); + (true, base_url) + } + _ => (false, "http://localhost:11434/v1".to_string()), + }, + _ => (false, "http://localhost:11434/v1".to_string()), + } + } + _ => (false, "http://localhost:11434/v1".to_string()), + }; + + // Select a URL to probe to determine if an Ollama server is reachable. + // Prefer the OpenAI-compatible endpoint when the base_url ends with /v1, + // fall back to the native Ollama tags endpoint otherwise. + let probe_path = if base_url_to_use.trim_end_matches('/').ends_with("/v1") { + "models" + } else { + "api/tags" + }; + let probe_url = format!( + "{}/{}", + base_url_to_use.trim_end_matches('/'), + probe_path.trim_start_matches('/') + ); + + // Probe the server with a short timeout. + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(2)) + .build() + .map_err(std::io::Error::other)?; + let resp = client.get(probe_url).send().await; + let server_ok = match resp { + Ok(r) => r.status().is_success(), + Err(_) => false, + }; + + if !server_ok { + return Err(std::io::Error::other( + "No running Ollama server detected. Please install/start Ollama: https://github.com/ollama/ollama?tab=readme-ov-file#ollama", + )); + } + + // If no provider entry existed, create one with sensible defaults and + // persist back to config.toml. + if !has_ollama_entry { + use toml::value::Table as TomlTable; + + // Ensure we have a root table to operate on. + if !matches!(root_value, TomlValue::Table(_)) { + root_value = TomlValue::Table(TomlTable::new()); + } + let root_tbl = match &mut root_value { + TomlValue::Table(tbl) => tbl, + _ => unreachable!(), + }; + + // Create or fetch the model_providers table. + let mp_val = root_tbl + .entry("model_providers".to_string()) + .or_insert_with(|| TomlValue::Table(TomlTable::new())); + let mp_tbl = match mp_val { + TomlValue::Table(t) => t, + _ => { + *mp_val = TomlValue::Table(TomlTable::new()); + match mp_val { + TomlValue::Table(t) => t, + _ => unreachable!(), + } + } + }; + + // Insert the ollama provider entry. + let mut ollama_tbl = TomlTable::new(); + ollama_tbl.insert("name".to_string(), TomlValue::String("Ollama".to_string())); + ollama_tbl.insert( + "base_url".to_string(), + TomlValue::String("http://localhost:11434/v1".to_string()), + ); + ollama_tbl.insert( + "wire_api".to_string(), + TomlValue::String("chat".to_string()), + ); + mp_tbl.insert("ollama".to_string(), TomlValue::Table(ollama_tbl)); + + // Persist the updated TOML back to disk. + std::fs::create_dir_all(&codex_home)?; + let config_path = codex_home.join("config.toml"); + let updated = toml::to_string_pretty(&root_value).map_err(std::io::Error::other)?; + std::fs::write(config_path, updated)?; + } + + Ok(()) +} + /// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns /// an empty TOML table when the file does not exist. fn load_config_as_toml(codex_home: &Path) -> std::io::Result { @@ -428,6 +553,28 @@ impl Config { .or(config_profile.model_provider) .or(cfg.model_provider) .unwrap_or_else(|| "openai".to_string()); + // If the user explicitly selected the `ollama` provider but it is not + // defined in `config.toml`, inject a sensible default so the flag works + // out of the box without requiring manual configuration. + if model_provider_id == "ollama" && !model_providers.contains_key("ollama") { + model_providers.insert( + "ollama".to_string(), + ModelProviderInfo { + name: "Ollama".to_string(), + base_url: Some("http://localhost:11434/v1".to_string()), + env_key: None, + env_key_instructions: None, + wire_api: WireApi::Chat, + query_params: None, + http_headers: None, + env_http_headers: None, + request_max_retries: None, + stream_max_retries: None, + stream_idle_timeout_ms: None, + requires_auth: false, + }, + ); + } let model_provider = model_providers .get(&model_provider_id) .ok_or_else(|| { diff --git a/codex-rs/exec/src/cli.rs b/codex-rs/exec/src/cli.rs index 53af25c7e9..23bb6225cd 100644 --- a/codex-rs/exec/src/cli.rs +++ b/codex-rs/exec/src/cli.rs @@ -14,6 +14,12 @@ pub struct Cli { #[arg(long, short = 'm')] pub model: Option, + /// Convenience flag to select the local Ollama provider. + /// Equivalent to -c model_provider=ollama; verifies a local Ollama server is running and + /// creates a model_providers.ollama entry in config.toml if missing. + #[arg(long = "ollama", default_value_t = false)] + pub ollama: bool, + /// Select the sandbox policy to use when executing model-generated shell /// commands. #[arg(long = "sandbox", short = 's')] diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index ce4d7f65cc..809de76251 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -35,6 +35,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any let Cli { images, model, + ollama, config_profile, full_auto, dangerously_bypass_approvals_and_sandbox, @@ -114,6 +115,15 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any sandbox_mode_cli_arg.map(Into::::into) }; + // When the user opts into the Ollama provider via `--ollama`, ensure we + // have a configured provider entry and that a local server is running. + if ollama { + if let Err(e) = codex_core::config::ensure_ollama_provider_configured_and_running().await { + eprintln!("{e}"); + std::process::exit(1); + } + } + // Load configuration and determine approval policy let overrides = ConfigOverrides { model, @@ -123,7 +133,11 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any approval_policy: Some(AskForApproval::Never), sandbox_mode, cwd: cwd.map(|p| p.canonicalize().unwrap_or(p)), - model_provider: None, + model_provider: if ollama { + Some("ollama".to_string()) + } else { + None + }, codex_linux_sandbox_exe, base_instructions: None, include_plan_tool: None, diff --git a/codex-rs/tui/Cargo.toml b/codex-rs/tui/Cargo.toml index a571b32c8d..79c8af07ba 100644 --- a/codex-rs/tui/Cargo.toml +++ b/codex-rs/tui/Cargo.toml @@ -45,6 +45,7 @@ regex-lite = "0.1" reqwest = { version = "0.12", features = ["json"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } +toml = "0.8" shlex = "1.3.0" strum = "0.27.2" strum_macros = "0.27.2" diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 1142bd87fc..1642bbbec8 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -43,6 +43,7 @@ enum AppState<'a> { }, /// The start-up warning that recommends running codex inside a Git repo. GitWarning { screen: GitWarningScreen }, + // (no additional states) } pub(crate) struct App<'a> { diff --git a/codex-rs/tui/src/cli.rs b/codex-rs/tui/src/cli.rs index cb1b725a64..a6c0fe7d58 100644 --- a/codex-rs/tui/src/cli.rs +++ b/codex-rs/tui/src/cli.rs @@ -17,6 +17,12 @@ pub struct Cli { #[arg(long, short = 'm')] pub model: Option, + /// Convenience flag to select the local Ollama provider. + /// Equivalent to -c model_provider=ollama; verifies a local Ollama server is running and + /// creates a model_providers.ollama entry in config.toml if missing. + #[arg(long = "ollama", default_value_t = false)] + pub ollama: bool, + /// Configuration profile from config.toml to specify default options. #[arg(long = "profile", short = 'p')] pub config_profile: Option, diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 0ec9be6153..edb02400ce 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -9,10 +9,13 @@ use codex_core::config_types::SandboxMode; use codex_core::protocol::AskForApproval; use codex_core::util::is_inside_git_repo; use codex_login::load_auth; +use crossterm::event::{self, Event as CEvent, KeyCode, KeyEvent}; +use crossterm::terminal::{disable_raw_mode, enable_raw_mode}; use log_layer::TuiLogLayer; use std::fs::OpenOptions; -use std::io::Write; +use std::io::{self, Write}; use std::path::PathBuf; +use toml as _; use tracing::error; use tracing_appender::non_blocking; use tracing_subscriber::EnvFilter; @@ -48,6 +51,327 @@ use color_eyre::owo_colors::OwoColorize; pub use cli::Cli; +fn read_ollama_models_list(config_path: &std::path::Path) -> Vec { + match std::fs::read_to_string(config_path) + .ok() + .and_then(|s| toml::from_str::(&s).ok()) + { + Some(toml::Value::Table(root)) => root + .get("model_providers") + .and_then(|v| v.as_table()) + .and_then(|t| t.get("ollama")) + .and_then(|v| v.as_table()) + .and_then(|t| t.get("models")) + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect::>() + }) + .unwrap_or_default(), + _ => Vec::new(), + } +} + +fn read_ollama_config_state(config_path: &std::path::Path) -> (bool, usize) { + match std::fs::read_to_string(config_path) + .ok() + .and_then(|s| toml::from_str::(&s).ok()) + { + Some(toml::Value::Table(root)) => { + let provider_present = root + .get("model_providers") + .and_then(|v| v.as_table()) + .and_then(|t| t.get("ollama")) + .map(|_| true) + .unwrap_or(false); + let models_count = root + .get("model_providers") + .and_then(|v| v.as_table()) + .and_then(|t| t.get("ollama")) + .and_then(|v| v.as_table()) + .and_then(|t| t.get("models")) + .and_then(|v| v.as_array()) + .map(|arr| arr.len()) + .unwrap_or(0); + (provider_present, models_count) + } + _ => (false, 0), + } +} + +fn save_ollama_models(config_path: &std::path::Path, models: &[String]) -> std::io::Result<()> { + use toml::value::Table as TomlTable; + let mut root_value = if let Ok(contents) = std::fs::read_to_string(config_path) { + toml::from_str::(&contents).unwrap_or(toml::Value::Table(TomlTable::new())) + } else { + toml::Value::Table(TomlTable::new()) + }; + + if !matches!(root_value, toml::Value::Table(_)) { + root_value = toml::Value::Table(TomlTable::new()); + } + let root_tbl = match root_value.as_table_mut() { + Some(t) => t, + None => return Err(std::io::Error::other("invalid TOML root value")), + }; + + let mp_val = root_tbl + .entry("model_providers".to_string()) + .or_insert_with(|| toml::Value::Table(TomlTable::new())); + if !mp_val.is_table() { + *mp_val = toml::Value::Table(TomlTable::new()); + } + let mp_tbl = match mp_val.as_table_mut() { + Some(t) => t, + None => return Err(std::io::Error::other("invalid model_providers table")), + }; + + let ollama_val = mp_tbl + .entry("ollama".to_string()) + .or_insert_with(|| toml::Value::Table(TomlTable::new())); + if !ollama_val.is_table() { + *ollama_val = toml::Value::Table(TomlTable::new()); + } + let ollama_tbl = match ollama_val.as_table_mut() { + Some(t) => t, + None => return Err(std::io::Error::other("invalid ollama table")), + }; + let arr = toml::Value::Array( + models + .iter() + .map(|m| toml::Value::String(m.clone())) + .collect(), + ); + ollama_tbl.insert("models".to_string(), arr); + + let updated = toml::to_string_pretty(&root_value) + .map_err(|e| std::io::Error::other(e.to_string()))?; + if let Some(parent) = config_path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(config_path, updated) +} + +fn print_inline_message_no_models( + host_root: &str, + config_path: &std::path::Path, + provider_was_present_before: bool, +) -> io::Result<()> { + let mut out = std::io::stdout(); + let path = config_path.display().to_string(); + // green bold helper + let b = |s: &str| format!("\x1b[1m{s}\x1b[0m"); + out.write_all( + format!( + "{}\n\n", + b("we've discovered no models on your local Ollama instance.") + ) + .as_bytes(), + )?; + out.write_all(format!("endpoint: {host_root}\n").as_bytes())?; + if provider_was_present_before { + out.write_all(format!("config: ollama provider already present in {path}\n").as_bytes())?; + } else { + out.write_all(format!("config: added ollama as a model provider in {path}\n").as_bytes())?; + } + out.write_all( + b"models: none recorded in config (pull models with `ollama pull `).\n\n", + )?; + out.flush() +} + +fn run_inline_models_picker( + host_root: &str, + available: &[String], + preselected: &[String], + config_path: &std::path::Path, + provider_was_present_before: bool, + models_count_before: usize, +) -> io::Result<()> { + let mut out = std::io::stdout(); + let mut selected: Vec = available + .iter() + .map(|m| preselected.iter().any(|x| x == m)) + .collect(); + let mut cursor: usize = 0; + + let mut first = true; + let mut lines_printed: usize = 0; + + enable_raw_mode()?; + + loop { + // Render block + render_inline_picker( + &mut out, + host_root, + available, + &selected, + cursor, + &mut first, + &mut lines_printed, + )?; + + // Wait for key + match event::read()? { + CEvent::Key(KeyEvent { + code: KeyCode::Up, .. + }) + | CEvent::Key(KeyEvent { + code: KeyCode::Char('k'), + .. + }) => { + cursor = cursor.saturating_sub(1); + } + CEvent::Key(KeyEvent { + code: KeyCode::Down, + .. + }) + | CEvent::Key(KeyEvent { + code: KeyCode::Char('j'), + .. + }) => { + if cursor + 1 < available.len() { + cursor += 1; + } + } + CEvent::Key(KeyEvent { + code: KeyCode::Char(' '), + .. + }) => { + if let Some(s) = selected.get_mut(cursor) { + *s = !*s; + } + } + CEvent::Key(KeyEvent { + code: KeyCode::Char('a'), + .. + }) => { + let all_sel = selected.iter().all(|s| *s); + selected.fill(!all_sel); + } + CEvent::Key(KeyEvent { + code: KeyCode::Enter, + .. + }) => { + break; + } + CEvent::Key(KeyEvent { + code: KeyCode::Char('q'), + .. + }) + | CEvent::Key(KeyEvent { + code: KeyCode::Esc, .. + }) => { + // Skip saving – print summary and continue. + disable_raw_mode()?; + print_config_summary_after_save( + config_path, + provider_was_present_before, + models_count_before, + None, + )?; + return Ok(()); + } + _ => {} + } + } + + disable_raw_mode()?; + + // Compute chosen + let chosen: Vec = available + .iter() + .cloned() + .zip(selected.iter()) + .filter_map(|(name, sel)| if *sel { Some(name) } else { None }) + .collect(); + + let _ = save_ollama_models(config_path, &chosen); + print_config_summary_after_save( + config_path, + provider_was_present_before, + models_count_before, + Some(chosen.len()), + ) +} + +fn render_inline_picker( + out: &mut std::io::Stdout, + host_root: &str, + items: &[String], + selected: &[bool], + cursor: usize, + first: &mut bool, + lines_printed: &mut usize, +) -> io::Result<()> { + // If not first render, move to the start of the block. We will clear each line as we redraw. + if !*first { + out.write_all(format!("\x1b[{}A", *lines_printed).as_bytes())?; // up N + } + + let mut lines = Vec::new(); + let bold = |s: &str| format!("\x1b[1m{s}\x1b[0m"); + lines.push(bold("we've discovered some models on ollama:").to_string()); + lines.push(format!("endpoint: {host_root}")); + lines.push( + "controls: ↑/↓ move, space toggle, 'a' select/unselect all, enter confirm, 'q' skip" + .to_string(), + ); + lines.push(String::new()); + for (i, name) in items.iter().enumerate() { + let mark = if selected.get(i).copied().unwrap_or(false) { + "\x1b[32m[x]\x1b[0m" // green + } else { + "[ ]" + }; + let mut line = format!("{mark} {name}"); + if i == cursor { + line = format!("\x1b[7m{line}\x1b[0m"); // reverse video for current row + } + lines.push(line); + } + + for l in &lines { + out.write_all(b"\x1b[2K")?; // clear current line + out.write_all(l.as_bytes())?; + out.write_all(b"\n")?; + } + out.flush()?; + *first = false; + *lines_printed = lines.len(); + Ok(()) +} + +fn print_config_summary_after_save( + config_path: &std::path::Path, + provider_was_present_before: bool, + models_count_before: usize, + models_count_after: Option, +) -> io::Result<()> { + let mut out = std::io::stdout(); + let path = config_path.display().to_string(); + if provider_was_present_before { + out.write_all(format!("config: ollama provider already present in {path}\n").as_bytes())?; + } else { + out.write_all(format!("config: added ollama as a model provider in {path}\n").as_bytes())?; + } + if let Some(after) = models_count_after { + let names = read_ollama_models_list(config_path); + if names.is_empty() { + out.write_all(format!("models: recorded {after}\n\n").as_bytes())?; + } else { + out.write_all( + format!("models: recorded {} ({})\n\n", after, names.join(", ")).as_bytes(), + )?; + } + } else { + out.write_all(b"models: no changes recorded\n\n")?; + } + out.flush() +} + pub async fn run_main( cli: Cli, codex_linux_sandbox_exe: Option, @@ -69,14 +393,43 @@ pub async fn run_main( ) }; + // Track config.toml state for messaging before launching TUI. + let (provider_was_present_before, models_count_before) = if cli.ollama { + let codex_home = codex_core::config::find_codex_home()?; + let config_path = codex_home.join("config.toml"); + let (p, m) = read_ollama_config_state(&config_path); + (p, m) + } else { + (false, 0) + }; + let config = { + // If the user selected the Ollama provider via `--ollama`, verify a + // local server is reachable and ensure a provider entry exists in + // config.toml. Exit early with a helpful message otherwise. + if cli.ollama { + if let Err(e) = + codex_core::config::ensure_ollama_provider_configured_and_running().await + { + #[allow(clippy::print_stderr)] + { + eprintln!("{e}"); + } + std::process::exit(1); + } + } + // Load configuration and support CLI overrides. let overrides = ConfigOverrides { model: cli.model.clone(), approval_policy, sandbox_mode, cwd: cli.cwd.clone().map(|p| p.canonicalize().unwrap_or(p)), - model_provider: None, + model_provider: if cli.ollama { + Some("ollama".to_string()) + } else { + None + }, config_profile: cli.config_profile.clone(), codex_linux_sandbox_exe, base_instructions: None, @@ -101,6 +454,71 @@ pub async fn run_main( } } }; + // If the user passed --ollama, fetch available models from the local + // Ollama instance and, if they differ from what is listed in + // config.toml, display a minimal inline selection UI before launching the TUI. + if cli.ollama { + // Determine host root for the Ollama native API (e.g. http://localhost:11434). + let base_url = config + .model_provider + .base_url + .clone() + .unwrap_or_else(|| "http://localhost:11434/v1".to_string()); + let host_root = base_url + .trim_end_matches('/') + .trim_end_matches("/v1") + .to_string(); + + // Query the list of local models via GET /api/tags. + let tags_url = format!("{host_root}/api/tags"); + let available_models: Vec = match reqwest::Client::new().get(&tags_url).send().await + { + Ok(resp) if resp.status().is_success() => { + match resp.json::().await { + Ok(val) => val + .get("models") + .and_then(|m| m.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.get("name").and_then(|n| n.as_str())) + .map(|s| s.to_string()) + .collect::>() + }) + .unwrap_or_default(), + Err(_) => Vec::new(), + } + } + _ => Vec::new(), + }; + + let config_path = config.codex_home.join("config.toml"); + // Read existing models in config. + let existing_models: Vec = read_ollama_models_list(&config_path); + + if available_models.is_empty() { + // Inform the user and continue launching the TUI. + print_inline_message_no_models(&host_root, &config_path, provider_was_present_before)?; + } else { + // Compare sets to decide whether to show the prompt. + let set_eq = { + use std::collections::HashSet; + let a: HashSet<_> = available_models.iter().collect(); + let b: HashSet<_> = existing_models.iter().collect(); + a == b + }; + + if !set_eq { + run_inline_models_picker( + &host_root, + &available_models, + &existing_models, + &config_path, + provider_was_present_before, + models_count_before, + )?; + } + } + } let log_dir = codex_core::config::log_dir(&config)?; std::fs::create_dir_all(&log_dir)?;