Use prompt-based co-author attribution with config override (#11617)

This commit is contained in:
gabec-openai
2026-02-17 12:15:54 -08:00
committed by GitHub
parent 4c4255fcfc
commit 5341ad08f8
6 changed files with 122 additions and 0 deletions

View File

@@ -206,6 +206,13 @@ pub struct Config {
/// Compact prompt override.
pub compact_prompt: Option<String>,
/// Optional commit attribution text for commit message co-author trailers.
///
/// - `None`: use default attribution (`Codex <noreply@openai.com>`)
/// - `Some("")` or whitespace-only: disable commit attribution
/// - `Some("...")`: use the provided attribution text verbatim
pub commit_attribution: Option<String>,
/// Optional external notifier command. When set, Codex will spawn this
/// program after each completed *turn* (i.e. when the agent finishes
/// processing a user submission). The value must be the full command
@@ -917,6 +924,11 @@ pub struct ConfigToml {
/// Compact prompt used for history compaction.
pub compact_prompt: Option<String>,
/// Optional commit attribution text for commit message co-author trailers.
///
/// Set to an empty string to disable automatic commit attribution.
pub commit_attribution: Option<String>,
/// When set, restricts ChatGPT login to a specific workspace identifier.
#[serde(default)]
pub forced_chatgpt_workspace_id: Option<String>,
@@ -1696,6 +1708,8 @@ impl Config {
}
});
let commit_attribution = cfg.commit_attribution;
// Load base instructions override from a file if specified. If the
// path is relative, resolve it against the effective cwd so the
// behaviour matches other path-like config values.
@@ -1816,6 +1830,7 @@ impl Config {
personality,
developer_instructions,
compact_prompt,
commit_attribution,
// The config.toml omits "_mode" because it's a config file. However, "_mode"
// is important in code to differentiate the mode from the store implementation.
cli_auth_credentials_store_mode: cfg.cli_auth_credentials_store.unwrap_or_default(),
@@ -4192,6 +4207,7 @@ model_verbosity = "high"
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
include_apply_patch_tool: false,
@@ -4304,6 +4320,7 @@ model_verbosity = "high"
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
include_apply_patch_tool: false,
@@ -4414,6 +4431,7 @@ model_verbosity = "high"
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
include_apply_patch_tool: false,
@@ -4510,6 +4528,7 @@ model_verbosity = "high"
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
commit_attribution: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
include_apply_patch_tool: false,