Request compression.

Add new model_provider flag for compression to enable request
compression.  We support zstd and gzip, server also supports brotli

You can test this against the sign in with chatgpt flow by adding the
following profile:

```
[profiles.compressed]
name = "compressed"
model_provider = "openai-zstd"

[model_providers.openai-zstd]
name = "OpenAI (ChatGPT, zstd)"
wire_api = "responses"
request_compression = "zstd"
requires_openai_auth = true
```

This will zstd compress your request before sending it to the server.
This commit is contained in:
Channing Conger
2025-11-25 11:53:38 -08:00
parent 07f077dfb3
commit 55acaaffac
37 changed files with 491 additions and 79 deletions

View File

@@ -351,6 +351,7 @@ fn push_tool_call_message(messages: &mut Vec<Value>, tool_call: Value, reasoning
#[cfg(test)]
mod tests {
use super::*;
use crate::provider::RequestCompression;
use crate::provider::RetryConfig;
use crate::provider::WireApi;
use codex_protocol::models::FunctionCallOutputPayload;
@@ -374,6 +375,7 @@ mod tests {
retry_5xx: true,
retry_transport: true,
},
request_compression: RequestCompression::None,
stream_idle_timeout: Duration::from_secs(1),
}
}

View File

@@ -172,6 +172,7 @@ fn attach_item_ids(payload_json: &mut Value, original_items: &[ResponseItem]) {
#[cfg(test)]
mod tests {
use super::*;
use crate::provider::RequestCompression;
use crate::provider::RetryConfig;
use crate::provider::WireApi;
use codex_protocol::protocol::SubAgentSource;
@@ -193,6 +194,7 @@ mod tests {
retry_5xx: true,
retry_transport: true,
},
request_compression: RequestCompression::None,
stream_idle_timeout: Duration::from_secs(5),
}
}