Revert "fix: read max_output_tokens param from config" (#7088)

Reverts openai/codex#4139
This commit is contained in:
jif-oai
2025-11-21 11:40:02 +01:00
committed by GitHub
parent f4af6e389e
commit bce030ddb5
3 changed files with 5 additions and 16 deletions

View File

@@ -282,8 +282,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
"prompt_cache_key": prompt_cache_key
});
let compact_1 = json!(
{
@@ -352,8 +351,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
"prompt_cache_key": prompt_cache_key
});
let user_turn_2_after_compact = json!(
{
@@ -413,8 +411,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
"prompt_cache_key": prompt_cache_key
});
let usert_turn_3_after_resume = json!(
{
@@ -494,8 +491,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": prompt_cache_key,
"max_output_tokens": 128000,
"prompt_cache_key": prompt_cache_key
});
let user_turn_3_after_fork = json!(
{
@@ -575,8 +571,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"include": [
"reasoning.encrypted_content"
],
"prompt_cache_key": fork_prompt_cache_key,
"max_output_tokens": 128000,
"prompt_cache_key": fork_prompt_cache_key
});
let mut expected = json!([
user_turn_1,