mirror of
https://github.com/openai/codex.git
synced 2026-05-04 13:21:54 +03:00
Surface reasoning tokens in exec JSON usage (#19308)
## Summary Fixes #19022. `codex exec --json` currently emits `turn.completed.usage` with input, cached input, and output token counts, but drops the reasoning-token split that Codex already receives through thread token usage updates. Programmatic consumers that rely on the JSON stream, especially ephemeral runs that do not write rollout files, need this field to accurately display reasoning-model usage. This PR adds `reasoning_output_tokens` to the public exec JSON `Usage` payload and maps it from the existing `ThreadTokenUsageUpdated` total token usage data. ## Verification - Added coverage to `event_processor_with_json_output::token_usage_update_is_emitted_on_turn_completion` so `turn.completed.usage.reasoning_output_tokens` is asserted. - Updated SDK expectations for `run()` and `runStreamed()` so TypeScript consumers see the new usage field. - Ran `cargo test -p codex-exec`. - Ran `pnpm --filter ./sdk/typescript run build`. - Ran `pnpm --filter ./sdk/typescript run lint`. - Ran `pnpm --filter ./sdk/typescript exec jest --runInBand --testTimeout=30000`.
This commit is contained in:
@@ -122,6 +122,7 @@ impl EventProcessorWithJsonOutput {
|
||||
input_tokens: usage.total.input_tokens,
|
||||
cached_input_tokens: usage.total.cached_input_tokens,
|
||||
output_tokens: usage.total.output_tokens,
|
||||
reasoning_output_tokens: usage.total.reasoning_output_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -65,6 +65,8 @@ pub struct Usage {
|
||||
pub cached_input_tokens: i64,
|
||||
/// The number of output tokens used during the turn.
|
||||
pub output_tokens: i64,
|
||||
/// The number of reasoning output tokens used during the turn.
|
||||
pub reasoning_output_tokens: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)]
|
||||
|
||||
Reference in New Issue
Block a user