Files
codex/codex-rs/lmstudio/src/lib.rs
rugvedS07 837bc98a1d LM Studio OSS Support (#2312)
## Overview

Adds LM Studio OSS support. Closes #1883


### Changes
This PR enhances the behavior of `--oss` flag to support LM Studio as a
provider. Additionally, it introduces a new flag`--local-provider` which
can take in `lmstudio` or `ollama` as values if the user wants to
explicitly choose which one to use.

If no provider is specified `codex --oss` will auto-select the provider
based on whichever is running.

#### Additional enhancements 
The default can be set using `oss-provider` in config like:

```
oss_provider = "lmstudio"
```

For non-interactive users, they will need to either provide the provider
as an arg or have it in their `config.toml`

### Notes
For best performance, [set the default context
length](https://lmstudio.ai/docs/app/advanced/per-model) for gpt-oss to
the maximum your machine can support

---------

Co-authored-by: Matt Clayton <matt@lmstudio.ai>
Co-authored-by: Eric Traut <etraut@openai.com>
2025-11-17 11:49:09 -08:00

44 lines
1.4 KiB
Rust

mod client;
pub use client::LMStudioClient;
use codex_core::config::Config;
/// Default OSS model to use when `--oss` is passed without an explicit `-m`.
pub const DEFAULT_OSS_MODEL: &str = "openai/gpt-oss-20b";
/// Prepare the local OSS environment when `--oss` is selected.
///
/// - Ensures a local LM Studio server is reachable.
/// - Checks if the model exists locally and downloads it if missing.
pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> {
let model: &str = config.model.as_ref();
// Verify local LM Studio is reachable.
let lmstudio_client = LMStudioClient::try_from_provider(config).await?;
match lmstudio_client.fetch_models().await {
Ok(models) => {
if !models.iter().any(|m| m == model) {
lmstudio_client.download_model(model).await?;
}
}
Err(err) => {
// Not fatal; higher layers may still proceed and surface errors later.
tracing::warn!("Failed to query local models from LM Studio: {}.", err);
}
}
// Load the model in the background
tokio::spawn({
let client = lmstudio_client.clone();
let model = model.to_string();
async move {
if let Err(e) = client.load_model(&model).await {
tracing::warn!("Failed to load model {}: {}", model, e);
}
}
});
Ok(())
}