【教学】压箱底Codex gpt-5.4 Fast模式完全指南config.toml 直接用(26.03.06 最新版v0.111.0)
- 内容介绍
- 文章标签
- 相关推荐
直接分享了(这个是Mac版本和linux版本哦,windows小伙伴按需自己加上windows优化参数 )
大家复制粘贴后还需要改改,我用的是中转站,是支持远程压缩的,其余的参数项直接把我的拿去让codex对着源码和我这份toml,再结合你自己的api去改一下就好了
建议多阅读如下资料来更新自己的知识和配置
- openai Codex changelog Codex changelog
- Codex官方配置指南(一般有滞后性不建议看) 注意里面还有advanced的标签页: Config basics
- 官方的最新Codex配置schema, 实测也可能有滞后性,但是一般更新的比较勤, 新发Codex软件包这个一般都会立刻跟上, 大家可以看看scheme就懂config.toml可以开启什么实验性功能, 比如memory这种好东西,以及提取memory用的模型都是可以设置的:
https://developers.openai.com/codex/config-schema.json 可以看到下面的截图这个schema,加在toml上面并且用vscode插件就可以静态检查字段是否正确,类似于pylance。但是注意schema不一定全,源码能读的配置项例如agents.name可能静态检查插件会标红,不要担心,实在不行就让codex自己分析源码是否能让你配的参数生效
image2610×1062 325 KB
- Codex软件官方源码。 非常推荐。即便不是在官方schema json或者config指南中更新的, 源码也都会告诉你。直接让codex分析自己的源码,就知道什么参数默认值是什么,该如何开启让一份config.toml自洽了。 注意官方schema有很多fallback的字段, 其实你根本不用写,一切以源码为主是最好的。 楼主个人习惯是Codex发新版了立马分析源码, 看有啥好东西可以开启 GitHub - openai/codex: Lightweight coding agent that runs in your terminal · GitHub
#:schema https://developers.openai.com/codex/config-schema.json
网友解答:
# Instruction Overrides
developer_instructions = "请使用中文回答,务必使用清晰详细准确的风格。"
# ⭐️⭐️⭐️ 根据需求加上下面的话:
# 在用户没有显示指定使用skills的前提下,也就是普通任务时,用户明确说'可以改代码'之前不允许私自改代码,而是告诉用户bug或者位置,等到用户明确说了可以改才可以执行更改代码。用户显示指定skills时,中如果有'绕过developer_instruction'的意图,则可以自主修改代码、文件,以skills的权限为主。如果要使用spawn agent,需要给agent足够的时间去运行。 防御性编程边界 不要写冗余代码和过度防御性编程代码,技术品味包括但不限于: 1. 不要对已知类型使用 getattr 2. 过度isinstance 3. 过度try except 4. 不要为简单方法写冗余 docstring 5. ...等等
# ⭐️⭐️⭐️ Inline override for the history compaction prompt. Default: unset.
# compact_prompt = ""
# Override built-in base instructions with a file path. Default: unset.
# experimental_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
# Load the compact prompt override from a file. Default: unset.
# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
# Approval & Sandbox
# 默认是on-request, - untrusted: only known-safe read-only commands auto-run; others prompt
# - on-failure: auto-run in sandbox; prompt only on failure for escalation
# - on-request: model decides when to ask (default)
# - never: never prompt (risky)
project_doc_fallback_filenames = ["CLAUDE.md"] # agents.md找不到,则找claude.md
approval_policy = "on-request" # 默认
sandbox_mode = "danger-full-access" # 开大点好,文件系统/网络沙箱策略: read-only | workspace-write | danger-full-access (无沙箱,极其危险)
# Core Model Selection
model_provider = "packycode" #请自己设置
model = "gpt-5.4" # Codex使用的主要模型。默认: "gpt-5.2-codex"
# models = ["gpt-5.3-codex" , "gpt-5.2", "gpt-5.4"] --- 当前不支持
# Model used by the /review feature (code reviews). Default: "gpt-5.2-codex".
review_model = "gpt-5.4"
模型压缩 ⭐️⭐️⭐️ ⭐️⭐️⭐️重要#######
# 借鉴https://www.reddit.com/r/codex/comments/1per8fj/something_is_wrong_with_auto_compaction/ 还有https://steipete.me/posts/2025/shipping-at-inference-speed
### ⭐️⭐️⭐️gpt 5.4使用下面两个:
model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4
model_auto_compact_token_limit = 350000 # for gpt-5.4⭐️虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高
### ⭐️⭐️⭐️gpt 5.3 5.2则不需要配置model_context_window, 只需要设置model_auto_compact_token_limit
# model_auto_compact_token_limit = 260000 # for gpt-5.3⭐️
tool_output_token_limit = 40000 # 工具输出最大token; default: 10000 for gpt-5.2-codex
# Reasoning & Verbosity (Responses API capable models)
model_reasoning_effort = "xhigh" # 推理努力程度: minimal | low | medium | high | xhigh (默认: medium; gpt-5.2-codex和gpt-5.2上默认xhigh)
model_reasoning_summary = "detailed" # 模型输出思维链的summary风格,可以是auto | concise | detailed | none (default: auto)
# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)
model_verbosity = "high" # 如有需要可以开大哦!!!low则 Shorten responses
model_supports_reasoning_summaries = true # Force reasoning
service_tier = "fast" # 开启后会变快哦,用量2倍
# Model Providers (extend/override built-ins)
[model_providers.packycode]
name = "OpenAI" # ⭐️⭐️⭐️ 如果你的中转支持, 这里就用大写的OpenAI,可以启用远程压缩效果更好哦, 不支持的话就换个别的字符串就行
base_url = "https://codex-api-slb.packycode.com/v1"
wire_api = "responses"
requires_openai_auth = true
# Centralized Feature Flags (preferred)
[features]
shell_tool = true # 启用 shell 工具。默认: true
apply_patch_freeform = true # 通过自由格式编辑路径包含apply_patch(影响默认工具集)。默认: false
shell_snapshot = true # 启用shell快照功能。默认: false
undo = true # 启用undo功能。默认: true
unified_exec = true # 使用统一 PTY 执行工具
# exec_policy = true # Enforce rules checks for shell/unified_exec
multi_agent = true
steer = true
prevent_idle_sleep = true
# voice_transcription = true
child_agents_md = true
memories = true # 开启记忆 ⭐️⭐️⭐️
sqlite = true # 其实可以不设置吧,开了也行
fast_mode = true # 必开 -- 当然会让gpt-5.4用量2倍
[memories] # ⭐️⭐️⭐️,强烈建议用新模型来总结memories
consolidation_model = "gpt-5.4"
extract_model = "gpt-5.4"
# generate_memories = true # 默认true
# use_memories = true # 默认true,表示把 memory_summary.md 注入 developer instructions
max_raw_memories_for_consolidation = 512
max_unused_days = 30 # 默认 30
max_rollout_age_days = 45 # 默认 30
# max_rollouts_per_startup = 16 # 默认 16
# min_rollout_idle_hours = 6 # 默认 6
[agents]
max_threads = 12
max_depth = 2
[agents.default]
description = "General-purpose helper."
model = "gpt-5.3-codex"
model_reasoning_effort = "xhigh"
[agents.worker]
description = "Do not use this agent, it's silly named and not optimized for any particular role."
model = "gpt-5.2"
model_reasoning_effort = "high"
[agents.explorer]
description = "Fast codebase explorer for read-heavy tasks."
model = "gpt-5.3-codex"
model_reasoning_effort = "xhigh"
sandbox_mode = "read-only"
[agents.awaiter]
description = "Long-running command and task monitoring role."
model = "gpt-5.4"
model_reasoning_effort = "high"
[agents.reviewer]
description = "Reviewer agent for code reviews and feedback."
model = "gpt-5.4"
model_reasoning_effort = "high"
# Shell Environment Policy for spawned processes (table)
[shell_environment_policy] # Shell环境配置
# 环境变量继承策略inherit: all (default) | core | none
inherit = "all" # 可以全给他看
# 是否忽略默认的 KEY/SECRET/TOKEN Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true
ignore_default_excludes = true # 意思是可以给他看
# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
# exclude = []
# Explicit key/value overrides (always win). Default: {}
# set = {}
# Whitelist; if non-empty, keep only matching vars. Default: []
# include_only = []
# Experimental: run via user shell profile. Default: false
# experimental_use_profile = false
# Sandbox settings (tables)
[sandbox_workspace_write]
# Additional writable roots beyond the workspace (cwd). Default: [] 例如在/root/paddlejob/RLHF 启动,但需要让他访问 /root/paddlejob 下的其他文件夹,可以在这里加上 /root/paddlejob
# writable_roots = []
network_access = true # # Allow outbound network access inside the sandbox. Default: false
# Notifications
# External notifier program (argv array). When unset: disabled.
# Example: notify = ["notify-send", "Codex"]
[tui]
# Send desktop notifications when approvals are required or a turn completes. # Defaults to false.
notifications = true
# 使用包装脚本以集中配置通知偏好(声音/图标/分组等)
# notify = [ ] # linux机器不开 wrapper.sh
# Project Controls
# Max bytes from AGENTS.md to embed into first-turn instructions. Default: 32768
# project_doc_max_bytes = 32768
status_line = ["model-with-reasoning", "current-dir", "git-branch", "context-remaining", "five-hour-limit", "codex-version", "context-window-size", "used-tokens", "session-id"]
[notice]
hide_rate_limit_model_nudge = true
[notice.model_migrations]
"gpt-5.2" = "gpt-5.3-codex"
"gpt-5.2-codex" = "gpt-5.3-codex"
--【壹】--: # Reasoning & Verbosity (Responses API capable models)
--【贰】--:
感谢佬友!中转站有推荐吗
--【叁】--:
感谢分享大佬厉害
--【肆】--: # Approval & Sandbox
--【伍】--: # Core Model Selection
--【陆】--:
这是真干货,学习了。
--【柒】--: [model_providers.packycode] name = "OpenAI" # ⭐️⭐️⭐️ 如果你的中转支持, 这里就用大写的OpenAI,可以启用远程压缩效果更好哦, 不支持的话就换个别的字符串就行 base_url = "https://codex-api-slb.packycode.com/v1" wire_api = "responses" requires_openai_auth = true
--【捌】--: # Sandbox settings (tables)
--【玖】--: 模型压缩 ⭐️⭐️⭐️ ⭐️⭐️⭐️重要####### # 借鉴https://www.reddit.com/r/codex/comments/1per8fj/something_is_wrong_with_auto_compaction/ 还有https://steipete.me/posts/2025/shipping-at-inference-speed ### ⭐️⭐️⭐️gpt 5.4使用下面两个: model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4 model_auto_compact_token_limit = 350000 # for gpt-5.4⭐️虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高 ### ⭐️⭐️⭐️gpt 5.3 5.2则不需要配置model_context_window, 只需要设置model_auto_compact_token_limit # model_auto_compact_token_limit = 260000 # for gpt-5.3⭐️ tool_output_token_limit = 40000 # 工具输出最大token; default: 10000 for gpt-5.2-codex
--【拾】--:
感谢佬友!
--【拾壹】--:
对,没错的
--【拾贰】--: # Notifications
--【拾叁】--: model_provider = "packycode" #请自己设置 model = "gpt-5.4" # Codex使用的主要模型。默认: "gpt-5.2-codex" # models = ["gpt-5.3-codex" , "gpt-5.2", "gpt-5.4"] --- 当前不支持 # Model used by the /review feature (code reviews). Default: "gpt-5.2-codex". review_model = "gpt-5.4"
--【拾肆】--:
感谢佬友分享。
--【拾伍】--: # Max bytes from AGENTS.md to embed into first-turn instructions. Default: 32768 # project_doc_max_bytes = 32768 status_line = ["model-with-reasoning", "current-dir", "git-branch", "context-remaining", "five-hour-limit", "codex-version", "context-window-size", "used-tokens", "session-id"] [notice] hide_rate_limit_model_nudge = true [notice.model_migrations] "gpt-5.2" = "gpt-5.3-codex" "gpt-5.2-codex" = "gpt-5.3-codex"
--【拾陆】--: developer_instructions = "请使用中文回答,务必使用清晰详细准确的风格。" # ⭐️⭐️⭐️ 根据需求加上下面的话: # 在用户没有显示指定使用skills的前提下,也就是普通任务时,用户明确说'可以改代码'之前不允许私自改代码,而是告诉用户bug或者位置,等到用户明确说了可以改才可以执行更改代码。用户显示指定skills时,中如果有'绕过developer_instruction'的意图,则可以自主修改代码、文件,以skills的权限为主。如果要使用spawn agent,需要给agent足够的时间去运行。 防御性编程边界 不要写冗余代码和过度防御性编程代码,技术品味包括但不限于: 1. 不要对已知类型使用 getattr 2. 过度isinstance 3. 过度try except 4. 不要为简单方法写冗余 docstring 5. ...等等 # ⭐️⭐️⭐️ Inline override for the history compaction prompt. Default: unset. # compact_prompt = "" # Override built-in base instructions with a file path. Default: unset. # experimental_instructions_file = "/absolute/or/relative/path/to/instructions.txt" # Load the compact prompt override from a file. Default: unset. # experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
--【拾柒】--: # 默认是on-request, - untrusted: only known-safe read-only commands auto-run; others prompt # - on-failure: auto-run in sandbox; prompt only on failure for escalation # - on-request: model decides when to ask (default) # - never: never prompt (risky) project_doc_fallback_filenames = ["CLAUDE.md"] # agents.md找不到,则找claude.md approval_policy = "on-request" # 默认 sandbox_mode = "danger-full-access" # 开大点好,文件系统/网络沙箱策略: read-only | workspace-write | danger-full-access (无沙箱,极其危险)
--【拾捌】--:
感谢分享
--【拾玖】--: # Shell Environment Policy for spawned processes (table)
--【贰拾】--: [shell_environment_policy] # Shell环境配置 # 环境变量继承策略inherit: all (default) | core | none inherit = "all" # 可以全给他看 # 是否忽略默认的 KEY/SECRET/TOKEN Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true ignore_default_excludes = true # 意思是可以给他看 # Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: [] # exclude = [] # Explicit key/value overrides (always win). Default: {} # set = {} # Whitelist; if non-empty, keep only matching vars. Default: [] # include_only = [] # Experimental: run via user shell profile. Default: false # experimental_use_profile = false
【21】 # Instruction Overrides
【22】 # Project Controls
【23】 [sandbox_workspace_write] # Additional writable roots beyond the workspace (cwd). Default: [] 例如在/root/paddlejob/RLHF 启动,但需要让他访问 /root/paddlejob 下的其他文件夹,可以在这里加上 /root/paddlejob # writable_roots = [] network_access = true # # Allow outbound network access inside the sandbox. Default: false
【24】 model_reasoning_effort = "xhigh" # 推理努力程度: minimal | low | medium | high | xhigh (默认: medium; gpt-5.2-codex和gpt-5.2上默认xhigh) model_reasoning_summary = "detailed" # 模型输出思维链的summary风格,可以是auto | concise | detailed | none (default: auto) # Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium) model_verbosity = "high" # 如有需要可以开大哦!!!low则 Shorten responses model_supports_reasoning_summaries = true # Force reasoning service_tier = "fast" # 开启后会变快哦,用量2倍
【25】 # External notifier program (argv array). When unset: disabled. # Example: notify = ["notify-send", "Codex"] [tui] # Send desktop notifications when approvals are required or a turn completes. # Defaults to false. notifications = true # 使用包装脚本以集中配置通知偏好(声音/图标/分组等) # notify = [ ] # linux机器不开 wrapper.sh
【26】
感谢佬友分享
【27】
感谢大佬分享,赶紧抄起来
【28】
感谢分享
【29】
感谢佬友分享,好详细~
【30】
话说有没有可以不让它跑1m上下文的设置呢?
【31】
感谢大佬!
【32】 justin666:
model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4 model_auto_compact_token_limit = 350000 # for gpt-5.4⭐️虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高
model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4
model_auto_compact_token_limit = 350000 # for gpt-5.4虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高
这两段是Windows也可以通用吧
【33】
感谢佬友分享
【34】
感谢佬友分享
【35】
感谢分享
【36】 # Model Providers (extend/override built-ins)
【37】 [features] shell_tool = true # 启用 shell 工具。默认: true apply_patch_freeform = true # 通过自由格式编辑路径包含apply_patch(影响默认工具集)。默认: false shell_snapshot = true # 启用shell快照功能。默认: false undo = true # 启用undo功能。默认: true unified_exec = true # 使用统一 PTY 执行工具 # exec_policy = true # Enforce rules checks for shell/unified_exec multi_agent = true steer = true prevent_idle_sleep = true # voice_transcription = true child_agents_md = true memories = true # 开启记忆 ⭐️⭐️⭐️ sqlite = true # 其实可以不设置吧,开了也行 fast_mode = true # 必开 -- 当然会让gpt-5.4用量2倍 [memories] # ⭐️⭐️⭐️,强烈建议用新模型来总结memories consolidation_model = "gpt-5.4" extract_model = "gpt-5.4" # generate_memories = true # 默认true # use_memories = true # 默认true,表示把 memory_summary.md 注入 developer instructions max_raw_memories_for_consolidation = 512 max_unused_days = 30 # 默认 30 max_rollout_age_days = 45 # 默认 30 # max_rollouts_per_startup = 16 # 默认 16 # min_rollout_idle_hours = 6 # 默认 6 [agents] max_threads = 12 max_depth = 2 [agents.default] description = "General-purpose helper." model = "gpt-5.3-codex" model_reasoning_effort = "xhigh" [agents.worker] description = "Do not use this agent, it's silly named and not optimized for any particular role." model = "gpt-5.2" model_reasoning_effort = "high" [agents.explorer] description = "Fast codebase explorer for read-heavy tasks." model = "gpt-5.3-codex" model_reasoning_effort = "xhigh" sandbox_mode = "read-only" [agents.awaiter] description = "Long-running command and task monitoring role." model = "gpt-5.4" model_reasoning_effort = "high" [agents.reviewer] description = "Reviewer agent for code reviews and feedback." model = "gpt-5.4" model_reasoning_effort = "high"
【38】
感谢佬友
【39】
压缩那个你拉到顶咯
【40】 # Centralized Feature Flags (preferred)
直接分享了(这个是Mac版本和linux版本哦,windows小伙伴按需自己加上windows优化参数 )
大家复制粘贴后还需要改改,我用的是中转站,是支持远程压缩的,其余的参数项直接把我的拿去让codex对着源码和我这份toml,再结合你自己的api去改一下就好了
建议多阅读如下资料来更新自己的知识和配置
- openai Codex changelog Codex changelog
- Codex官方配置指南(一般有滞后性不建议看) 注意里面还有advanced的标签页: Config basics
- 官方的最新Codex配置schema, 实测也可能有滞后性,但是一般更新的比较勤, 新发Codex软件包这个一般都会立刻跟上, 大家可以看看scheme就懂config.toml可以开启什么实验性功能, 比如memory这种好东西,以及提取memory用的模型都是可以设置的:
https://developers.openai.com/codex/config-schema.json 可以看到下面的截图这个schema,加在toml上面并且用vscode插件就可以静态检查字段是否正确,类似于pylance。但是注意schema不一定全,源码能读的配置项例如agents.name可能静态检查插件会标红,不要担心,实在不行就让codex自己分析源码是否能让你配的参数生效
image2610×1062 325 KB
- Codex软件官方源码。 非常推荐。即便不是在官方schema json或者config指南中更新的, 源码也都会告诉你。直接让codex分析自己的源码,就知道什么参数默认值是什么,该如何开启让一份config.toml自洽了。 注意官方schema有很多fallback的字段, 其实你根本不用写,一切以源码为主是最好的。 楼主个人习惯是Codex发新版了立马分析源码, 看有啥好东西可以开启 GitHub - openai/codex: Lightweight coding agent that runs in your terminal · GitHub
#:schema https://developers.openai.com/codex/config-schema.json
网友解答:
# Instruction Overrides
developer_instructions = "请使用中文回答,务必使用清晰详细准确的风格。"
# ⭐️⭐️⭐️ 根据需求加上下面的话:
# 在用户没有显示指定使用skills的前提下,也就是普通任务时,用户明确说'可以改代码'之前不允许私自改代码,而是告诉用户bug或者位置,等到用户明确说了可以改才可以执行更改代码。用户显示指定skills时,中如果有'绕过developer_instruction'的意图,则可以自主修改代码、文件,以skills的权限为主。如果要使用spawn agent,需要给agent足够的时间去运行。 防御性编程边界 不要写冗余代码和过度防御性编程代码,技术品味包括但不限于: 1. 不要对已知类型使用 getattr 2. 过度isinstance 3. 过度try except 4. 不要为简单方法写冗余 docstring 5. ...等等
# ⭐️⭐️⭐️ Inline override for the history compaction prompt. Default: unset.
# compact_prompt = ""
# Override built-in base instructions with a file path. Default: unset.
# experimental_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
# Load the compact prompt override from a file. Default: unset.
# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
# Approval & Sandbox
# 默认是on-request, - untrusted: only known-safe read-only commands auto-run; others prompt
# - on-failure: auto-run in sandbox; prompt only on failure for escalation
# - on-request: model decides when to ask (default)
# - never: never prompt (risky)
project_doc_fallback_filenames = ["CLAUDE.md"] # agents.md找不到,则找claude.md
approval_policy = "on-request" # 默认
sandbox_mode = "danger-full-access" # 开大点好,文件系统/网络沙箱策略: read-only | workspace-write | danger-full-access (无沙箱,极其危险)
# Core Model Selection
model_provider = "packycode" #请自己设置
model = "gpt-5.4" # Codex使用的主要模型。默认: "gpt-5.2-codex"
# models = ["gpt-5.3-codex" , "gpt-5.2", "gpt-5.4"] --- 当前不支持
# Model used by the /review feature (code reviews). Default: "gpt-5.2-codex".
review_model = "gpt-5.4"
模型压缩 ⭐️⭐️⭐️ ⭐️⭐️⭐️重要#######
# 借鉴https://www.reddit.com/r/codex/comments/1per8fj/something_is_wrong_with_auto_compaction/ 还有https://steipete.me/posts/2025/shipping-at-inference-speed
### ⭐️⭐️⭐️gpt 5.4使用下面两个:
model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4
model_auto_compact_token_limit = 350000 # for gpt-5.4⭐️虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高
### ⭐️⭐️⭐️gpt 5.3 5.2则不需要配置model_context_window, 只需要设置model_auto_compact_token_limit
# model_auto_compact_token_limit = 260000 # for gpt-5.3⭐️
tool_output_token_limit = 40000 # 工具输出最大token; default: 10000 for gpt-5.2-codex
# Reasoning & Verbosity (Responses API capable models)
model_reasoning_effort = "xhigh" # 推理努力程度: minimal | low | medium | high | xhigh (默认: medium; gpt-5.2-codex和gpt-5.2上默认xhigh)
model_reasoning_summary = "detailed" # 模型输出思维链的summary风格,可以是auto | concise | detailed | none (default: auto)
# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)
model_verbosity = "high" # 如有需要可以开大哦!!!low则 Shorten responses
model_supports_reasoning_summaries = true # Force reasoning
service_tier = "fast" # 开启后会变快哦,用量2倍
# Model Providers (extend/override built-ins)
[model_providers.packycode]
name = "OpenAI" # ⭐️⭐️⭐️ 如果你的中转支持, 这里就用大写的OpenAI,可以启用远程压缩效果更好哦, 不支持的话就换个别的字符串就行
base_url = "https://codex-api-slb.packycode.com/v1"
wire_api = "responses"
requires_openai_auth = true
# Centralized Feature Flags (preferred)
[features]
shell_tool = true # 启用 shell 工具。默认: true
apply_patch_freeform = true # 通过自由格式编辑路径包含apply_patch(影响默认工具集)。默认: false
shell_snapshot = true # 启用shell快照功能。默认: false
undo = true # 启用undo功能。默认: true
unified_exec = true # 使用统一 PTY 执行工具
# exec_policy = true # Enforce rules checks for shell/unified_exec
multi_agent = true
steer = true
prevent_idle_sleep = true
# voice_transcription = true
child_agents_md = true
memories = true # 开启记忆 ⭐️⭐️⭐️
sqlite = true # 其实可以不设置吧,开了也行
fast_mode = true # 必开 -- 当然会让gpt-5.4用量2倍
[memories] # ⭐️⭐️⭐️,强烈建议用新模型来总结memories
consolidation_model = "gpt-5.4"
extract_model = "gpt-5.4"
# generate_memories = true # 默认true
# use_memories = true # 默认true,表示把 memory_summary.md 注入 developer instructions
max_raw_memories_for_consolidation = 512
max_unused_days = 30 # 默认 30
max_rollout_age_days = 45 # 默认 30
# max_rollouts_per_startup = 16 # 默认 16
# min_rollout_idle_hours = 6 # 默认 6
[agents]
max_threads = 12
max_depth = 2
[agents.default]
description = "General-purpose helper."
model = "gpt-5.3-codex"
model_reasoning_effort = "xhigh"
[agents.worker]
description = "Do not use this agent, it's silly named and not optimized for any particular role."
model = "gpt-5.2"
model_reasoning_effort = "high"
[agents.explorer]
description = "Fast codebase explorer for read-heavy tasks."
model = "gpt-5.3-codex"
model_reasoning_effort = "xhigh"
sandbox_mode = "read-only"
[agents.awaiter]
description = "Long-running command and task monitoring role."
model = "gpt-5.4"
model_reasoning_effort = "high"
[agents.reviewer]
description = "Reviewer agent for code reviews and feedback."
model = "gpt-5.4"
model_reasoning_effort = "high"
# Shell Environment Policy for spawned processes (table)
[shell_environment_policy] # Shell环境配置
# 环境变量继承策略inherit: all (default) | core | none
inherit = "all" # 可以全给他看
# 是否忽略默认的 KEY/SECRET/TOKEN Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true
ignore_default_excludes = true # 意思是可以给他看
# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
# exclude = []
# Explicit key/value overrides (always win). Default: {}
# set = {}
# Whitelist; if non-empty, keep only matching vars. Default: []
# include_only = []
# Experimental: run via user shell profile. Default: false
# experimental_use_profile = false
# Sandbox settings (tables)
[sandbox_workspace_write]
# Additional writable roots beyond the workspace (cwd). Default: [] 例如在/root/paddlejob/RLHF 启动,但需要让他访问 /root/paddlejob 下的其他文件夹,可以在这里加上 /root/paddlejob
# writable_roots = []
network_access = true # # Allow outbound network access inside the sandbox. Default: false
# Notifications
# External notifier program (argv array). When unset: disabled.
# Example: notify = ["notify-send", "Codex"]
[tui]
# Send desktop notifications when approvals are required or a turn completes. # Defaults to false.
notifications = true
# 使用包装脚本以集中配置通知偏好(声音/图标/分组等)
# notify = [ ] # linux机器不开 wrapper.sh
# Project Controls
# Max bytes from AGENTS.md to embed into first-turn instructions. Default: 32768
# project_doc_max_bytes = 32768
status_line = ["model-with-reasoning", "current-dir", "git-branch", "context-remaining", "five-hour-limit", "codex-version", "context-window-size", "used-tokens", "session-id"]
[notice]
hide_rate_limit_model_nudge = true
[notice.model_migrations]
"gpt-5.2" = "gpt-5.3-codex"
"gpt-5.2-codex" = "gpt-5.3-codex"
--【壹】--: # Reasoning & Verbosity (Responses API capable models)
--【贰】--:
感谢佬友!中转站有推荐吗
--【叁】--:
感谢分享大佬厉害
--【肆】--: # Approval & Sandbox
--【伍】--: # Core Model Selection
--【陆】--:
这是真干货,学习了。
--【柒】--: [model_providers.packycode] name = "OpenAI" # ⭐️⭐️⭐️ 如果你的中转支持, 这里就用大写的OpenAI,可以启用远程压缩效果更好哦, 不支持的话就换个别的字符串就行 base_url = "https://codex-api-slb.packycode.com/v1" wire_api = "responses" requires_openai_auth = true
--【捌】--: # Sandbox settings (tables)
--【玖】--: 模型压缩 ⭐️⭐️⭐️ ⭐️⭐️⭐️重要####### # 借鉴https://www.reddit.com/r/codex/comments/1per8fj/something_is_wrong_with_auto_compaction/ 还有https://steipete.me/posts/2025/shipping-at-inference-speed ### ⭐️⭐️⭐️gpt 5.4使用下面两个: model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4 model_auto_compact_token_limit = 350000 # for gpt-5.4⭐️虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高 ### ⭐️⭐️⭐️gpt 5.3 5.2则不需要配置model_context_window, 只需要设置model_auto_compact_token_limit # model_auto_compact_token_limit = 260000 # for gpt-5.3⭐️ tool_output_token_limit = 40000 # 工具输出最大token; default: 10000 for gpt-5.2-codex
--【拾】--:
感谢佬友!
--【拾壹】--:
对,没错的
--【拾贰】--: # Notifications
--【拾叁】--: model_provider = "packycode" #请自己设置 model = "gpt-5.4" # Codex使用的主要模型。默认: "gpt-5.2-codex" # models = ["gpt-5.3-codex" , "gpt-5.2", "gpt-5.4"] --- 当前不支持 # Model used by the /review feature (code reviews). Default: "gpt-5.2-codex". review_model = "gpt-5.4"
--【拾肆】--:
感谢佬友分享。
--【拾伍】--: # Max bytes from AGENTS.md to embed into first-turn instructions. Default: 32768 # project_doc_max_bytes = 32768 status_line = ["model-with-reasoning", "current-dir", "git-branch", "context-remaining", "five-hour-limit", "codex-version", "context-window-size", "used-tokens", "session-id"] [notice] hide_rate_limit_model_nudge = true [notice.model_migrations] "gpt-5.2" = "gpt-5.3-codex" "gpt-5.2-codex" = "gpt-5.3-codex"
--【拾陆】--: developer_instructions = "请使用中文回答,务必使用清晰详细准确的风格。" # ⭐️⭐️⭐️ 根据需求加上下面的话: # 在用户没有显示指定使用skills的前提下,也就是普通任务时,用户明确说'可以改代码'之前不允许私自改代码,而是告诉用户bug或者位置,等到用户明确说了可以改才可以执行更改代码。用户显示指定skills时,中如果有'绕过developer_instruction'的意图,则可以自主修改代码、文件,以skills的权限为主。如果要使用spawn agent,需要给agent足够的时间去运行。 防御性编程边界 不要写冗余代码和过度防御性编程代码,技术品味包括但不限于: 1. 不要对已知类型使用 getattr 2. 过度isinstance 3. 过度try except 4. 不要为简单方法写冗余 docstring 5. ...等等 # ⭐️⭐️⭐️ Inline override for the history compaction prompt. Default: unset. # compact_prompt = "" # Override built-in base instructions with a file path. Default: unset. # experimental_instructions_file = "/absolute/or/relative/path/to/instructions.txt" # Load the compact prompt override from a file. Default: unset. # experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
--【拾柒】--: # 默认是on-request, - untrusted: only known-safe read-only commands auto-run; others prompt # - on-failure: auto-run in sandbox; prompt only on failure for escalation # - on-request: model decides when to ask (default) # - never: never prompt (risky) project_doc_fallback_filenames = ["CLAUDE.md"] # agents.md找不到,则找claude.md approval_policy = "on-request" # 默认 sandbox_mode = "danger-full-access" # 开大点好,文件系统/网络沙箱策略: read-only | workspace-write | danger-full-access (无沙箱,极其危险)
--【拾捌】--:
感谢分享
--【拾玖】--: # Shell Environment Policy for spawned processes (table)
--【贰拾】--: [shell_environment_policy] # Shell环境配置 # 环境变量继承策略inherit: all (default) | core | none inherit = "all" # 可以全给他看 # 是否忽略默认的 KEY/SECRET/TOKEN Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true ignore_default_excludes = true # 意思是可以给他看 # Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: [] # exclude = [] # Explicit key/value overrides (always win). Default: {} # set = {} # Whitelist; if non-empty, keep only matching vars. Default: [] # include_only = [] # Experimental: run via user shell profile. Default: false # experimental_use_profile = false
【21】 # Instruction Overrides
【22】 # Project Controls
【23】 [sandbox_workspace_write] # Additional writable roots beyond the workspace (cwd). Default: [] 例如在/root/paddlejob/RLHF 启动,但需要让他访问 /root/paddlejob 下的其他文件夹,可以在这里加上 /root/paddlejob # writable_roots = [] network_access = true # # Allow outbound network access inside the sandbox. Default: false
【24】 model_reasoning_effort = "xhigh" # 推理努力程度: minimal | low | medium | high | xhigh (默认: medium; gpt-5.2-codex和gpt-5.2上默认xhigh) model_reasoning_summary = "detailed" # 模型输出思维链的summary风格,可以是auto | concise | detailed | none (default: auto) # Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium) model_verbosity = "high" # 如有需要可以开大哦!!!low则 Shorten responses model_supports_reasoning_summaries = true # Force reasoning service_tier = "fast" # 开启后会变快哦,用量2倍
【25】 # External notifier program (argv array). When unset: disabled. # Example: notify = ["notify-send", "Codex"] [tui] # Send desktop notifications when approvals are required or a turn completes. # Defaults to false. notifications = true # 使用包装脚本以集中配置通知偏好(声音/图标/分组等) # notify = [ ] # linux机器不开 wrapper.sh
【26】
感谢佬友分享
【27】
感谢大佬分享,赶紧抄起来
【28】
感谢分享
【29】
感谢佬友分享,好详细~
【30】
话说有没有可以不让它跑1m上下文的设置呢?
【31】
感谢大佬!
【32】 justin666:
model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4 model_auto_compact_token_limit = 350000 # for gpt-5.4⭐️虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高
model_context_window = 1000000 # 模型上下文窗口大小,默认1000000(1M); gpt-5.4
model_auto_compact_token_limit = 350000 # for gpt-5.4虽然是1M ,但是有效注意力不够,可以自己网上查,不建议开的太高
这两段是Windows也可以通用吧
【33】
感谢佬友分享
【34】
感谢佬友分享
【35】
感谢分享
【36】 # Model Providers (extend/override built-ins)
【37】 [features] shell_tool = true # 启用 shell 工具。默认: true apply_patch_freeform = true # 通过自由格式编辑路径包含apply_patch(影响默认工具集)。默认: false shell_snapshot = true # 启用shell快照功能。默认: false undo = true # 启用undo功能。默认: true unified_exec = true # 使用统一 PTY 执行工具 # exec_policy = true # Enforce rules checks for shell/unified_exec multi_agent = true steer = true prevent_idle_sleep = true # voice_transcription = true child_agents_md = true memories = true # 开启记忆 ⭐️⭐️⭐️ sqlite = true # 其实可以不设置吧,开了也行 fast_mode = true # 必开 -- 当然会让gpt-5.4用量2倍 [memories] # ⭐️⭐️⭐️,强烈建议用新模型来总结memories consolidation_model = "gpt-5.4" extract_model = "gpt-5.4" # generate_memories = true # 默认true # use_memories = true # 默认true,表示把 memory_summary.md 注入 developer instructions max_raw_memories_for_consolidation = 512 max_unused_days = 30 # 默认 30 max_rollout_age_days = 45 # 默认 30 # max_rollouts_per_startup = 16 # 默认 16 # min_rollout_idle_hours = 6 # 默认 6 [agents] max_threads = 12 max_depth = 2 [agents.default] description = "General-purpose helper." model = "gpt-5.3-codex" model_reasoning_effort = "xhigh" [agents.worker] description = "Do not use this agent, it's silly named and not optimized for any particular role." model = "gpt-5.2" model_reasoning_effort = "high" [agents.explorer] description = "Fast codebase explorer for read-heavy tasks." model = "gpt-5.3-codex" model_reasoning_effort = "xhigh" sandbox_mode = "read-only" [agents.awaiter] description = "Long-running command and task monitoring role." model = "gpt-5.4" model_reasoning_effort = "high" [agents.reviewer] description = "Reviewer agent for code reviews and feedback." model = "gpt-5.4" model_reasoning_effort = "high"
【38】
感谢佬友
【39】
压缩那个你拉到顶咯
【40】 # Centralized Feature Flags (preferred)

