Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
陈曦
sub2api
Commits
4de4823a
Commit
4de4823a
authored
Apr 09, 2026
by
IanShaw027
Browse files
feat(openai): 支持messages模型映射与instructions模板注入
parent
23c4d592
Changes
11
Hide whitespace changes
Inline
Side-by-side
backend/internal/config/config.go
View file @
4de4823a
...
@@ -318,6 +318,12 @@ type GatewayConfig struct {
...
@@ -318,6 +318,12 @@ type GatewayConfig struct {
// ForceCodexCLI: 强制将 OpenAI `/v1/responses` 请求按 Codex CLI 处理。
// ForceCodexCLI: 强制将 OpenAI `/v1/responses` 请求按 Codex CLI 处理。
// 用于网关未透传/改写 User-Agent 时的兼容兜底(默认关闭,避免影响其他客户端)。
// 用于网关未透传/改写 User-Agent 时的兼容兜底(默认关闭,避免影响其他客户端)。
ForceCodexCLI
bool
`mapstructure:"force_codex_cli"`
ForceCodexCLI
bool
`mapstructure:"force_codex_cli"`
// ForcedCodexInstructionsTemplateFile: 服务端强制附加到 Codex 顶层 instructions 的模板文件路径。
// 模板渲染后会直接覆盖最终 instructions;若需要保留客户端 system 转换结果,请在模板中显式引用 {{ .ExistingInstructions }}。
ForcedCodexInstructionsTemplateFile
string
`mapstructure:"forced_codex_instructions_template_file"`
// ForcedCodexInstructionsTemplate: 启动时从模板文件读取并缓存的模板内容。
// 该字段不直接参与配置反序列化,仅用于请求热路径避免重复读盘。
ForcedCodexInstructionsTemplate
string
`mapstructure:"-"`
// OpenAIPassthroughAllowTimeoutHeaders: OpenAI 透传模式是否放行客户端超时头
// OpenAIPassthroughAllowTimeoutHeaders: OpenAI 透传模式是否放行客户端超时头
// 关闭(默认)可避免 x-stainless-timeout 等头导致上游提前断流。
// 关闭(默认)可避免 x-stainless-timeout 等头导致上游提前断流。
OpenAIPassthroughAllowTimeoutHeaders
bool
`mapstructure:"openai_passthrough_allow_timeout_headers"`
OpenAIPassthroughAllowTimeoutHeaders
bool
`mapstructure:"openai_passthrough_allow_timeout_headers"`
...
@@ -983,6 +989,14 @@ func load(allowMissingJWTSecret bool) (*Config, error) {
...
@@ -983,6 +989,14 @@ func load(allowMissingJWTSecret bool) (*Config, error) {
cfg
.
Log
.
Environment
=
strings
.
TrimSpace
(
cfg
.
Log
.
Environment
)
cfg
.
Log
.
Environment
=
strings
.
TrimSpace
(
cfg
.
Log
.
Environment
)
cfg
.
Log
.
StacktraceLevel
=
strings
.
ToLower
(
strings
.
TrimSpace
(
cfg
.
Log
.
StacktraceLevel
))
cfg
.
Log
.
StacktraceLevel
=
strings
.
ToLower
(
strings
.
TrimSpace
(
cfg
.
Log
.
StacktraceLevel
))
cfg
.
Log
.
Output
.
FilePath
=
strings
.
TrimSpace
(
cfg
.
Log
.
Output
.
FilePath
)
cfg
.
Log
.
Output
.
FilePath
=
strings
.
TrimSpace
(
cfg
.
Log
.
Output
.
FilePath
)
cfg
.
Gateway
.
ForcedCodexInstructionsTemplateFile
=
strings
.
TrimSpace
(
cfg
.
Gateway
.
ForcedCodexInstructionsTemplateFile
)
if
cfg
.
Gateway
.
ForcedCodexInstructionsTemplateFile
!=
""
{
content
,
err
:=
os
.
ReadFile
(
cfg
.
Gateway
.
ForcedCodexInstructionsTemplateFile
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"read forced codex instructions template %q: %w"
,
cfg
.
Gateway
.
ForcedCodexInstructionsTemplateFile
,
err
)
}
cfg
.
Gateway
.
ForcedCodexInstructionsTemplate
=
string
(
content
)
}
// 兼容旧键 gateway.openai_ws.sticky_previous_response_ttl_seconds。
// 兼容旧键 gateway.openai_ws.sticky_previous_response_ttl_seconds。
// 新键未配置(<=0)时回退旧键;新键优先。
// 新键未配置(<=0)时回退旧键;新键优先。
...
...
backend/internal/config/config_test.go
View file @
4de4823a
package
config
package
config
import
(
import
(
"os"
"path/filepath"
"strings"
"strings"
"testing"
"testing"
"time"
"time"
...
@@ -223,6 +225,23 @@ func TestLoadSchedulingConfigFromEnv(t *testing.T) {
...
@@ -223,6 +225,23 @@ func TestLoadSchedulingConfigFromEnv(t *testing.T) {
}
}
}
}
func
TestLoadForcedCodexInstructionsTemplate
(
t
*
testing
.
T
)
{
resetViperWithJWTSecret
(
t
)
tempDir
:=
t
.
TempDir
()
templatePath
:=
filepath
.
Join
(
tempDir
,
"codex-instructions.md.tmpl"
)
configPath
:=
filepath
.
Join
(
tempDir
,
"config.yaml"
)
require
.
NoError
(
t
,
os
.
WriteFile
(
templatePath
,
[]
byte
(
"server-prefix
\n\n
{{ .ExistingInstructions }}"
),
0
o644
))
require
.
NoError
(
t
,
os
.
WriteFile
(
configPath
,
[]
byte
(
"gateway:
\n
forced_codex_instructions_template_file:
\"
"
+
templatePath
+
"
\"\n
"
),
0
o644
))
t
.
Setenv
(
"DATA_DIR"
,
tempDir
)
cfg
,
err
:=
Load
()
require
.
NoError
(
t
,
err
)
require
.
Equal
(
t
,
templatePath
,
cfg
.
Gateway
.
ForcedCodexInstructionsTemplateFile
)
require
.
Equal
(
t
,
"server-prefix
\n\n
{{ .ExistingInstructions }}"
,
cfg
.
Gateway
.
ForcedCodexInstructionsTemplate
)
}
func
TestLoadDefaultSecurityToggles
(
t
*
testing
.
T
)
{
func
TestLoadDefaultSecurityToggles
(
t
*
testing
.
T
)
{
resetViperWithJWTSecret
(
t
)
resetViperWithJWTSecret
(
t
)
...
...
backend/internal/handler/openai_gateway_handler.go
View file @
4de4823a
...
@@ -47,6 +47,13 @@ func resolveOpenAIForwardDefaultMappedModel(apiKey *service.APIKey, fallbackMode
...
@@ -47,6 +47,13 @@ func resolveOpenAIForwardDefaultMappedModel(apiKey *service.APIKey, fallbackMode
return
strings
.
TrimSpace
(
apiKey
.
Group
.
DefaultMappedModel
)
return
strings
.
TrimSpace
(
apiKey
.
Group
.
DefaultMappedModel
)
}
}
func
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
*
service
.
APIKey
,
requestedModel
string
)
string
{
if
apiKey
==
nil
||
apiKey
.
Group
==
nil
{
return
""
}
return
strings
.
TrimSpace
(
apiKey
.
Group
.
ResolveMessagesDispatchModel
(
requestedModel
))
}
// NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler
// NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler
func
NewOpenAIGatewayHandler
(
func
NewOpenAIGatewayHandler
(
gatewayService
*
service
.
OpenAIGatewayService
,
gatewayService
*
service
.
OpenAIGatewayService
,
...
@@ -551,6 +558,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -551,6 +558,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
}
}
reqModel
:=
modelResult
.
String
()
reqModel
:=
modelResult
.
String
()
routingModel
:=
service
.
NormalizeOpenAICompatRequestedModel
(
reqModel
)
routingModel
:=
service
.
NormalizeOpenAICompatRequestedModel
(
reqModel
)
preferredMappedModel
:=
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
reqModel
)
reqStream
:=
gjson
.
GetBytes
(
body
,
"stream"
)
.
Bool
()
reqStream
:=
gjson
.
GetBytes
(
body
,
"stream"
)
.
Bool
()
reqLog
=
reqLog
.
With
(
zap
.
String
(
"model"
,
reqModel
),
zap
.
Bool
(
"stream"
,
reqStream
))
reqLog
=
reqLog
.
With
(
zap
.
String
(
"model"
,
reqModel
),
zap
.
Bool
(
"stream"
,
reqStream
))
...
@@ -609,17 +617,20 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -609,17 +617,20 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
failedAccountIDs
:=
make
(
map
[
int64
]
struct
{})
failedAccountIDs
:=
make
(
map
[
int64
]
struct
{})
sameAccountRetryCount
:=
make
(
map
[
int64
]
int
)
sameAccountRetryCount
:=
make
(
map
[
int64
]
int
)
var
lastFailoverErr
*
service
.
UpstreamFailoverError
var
lastFailoverErr
*
service
.
UpstreamFailoverError
effectiveMappedModel
:=
preferredMappedModel
for
{
for
{
// 清除上一次迭代的降级模型标记,避免残留影响本次迭代
currentRoutingModel
:=
routingModel
c
.
Set
(
"openai_messages_fallback_model"
,
""
)
if
effectiveMappedModel
!=
""
{
currentRoutingModel
=
effectiveMappedModel
}
reqLog
.
Debug
(
"openai_messages.account_selecting"
,
zap
.
Int
(
"excluded_account_count"
,
len
(
failedAccountIDs
)))
reqLog
.
Debug
(
"openai_messages.account_selecting"
,
zap
.
Int
(
"excluded_account_count"
,
len
(
failedAccountIDs
)))
selection
,
scheduleDecision
,
err
:=
h
.
gatewayService
.
SelectAccountWithScheduler
(
selection
,
scheduleDecision
,
err
:=
h
.
gatewayService
.
SelectAccountWithScheduler
(
c
.
Request
.
Context
(),
c
.
Request
.
Context
(),
apiKey
.
GroupID
,
apiKey
.
GroupID
,
""
,
// no previous_response_id
""
,
// no previous_response_id
sessionHash
,
sessionHash
,
r
outingModel
,
currentR
outingModel
,
failedAccountIDs
,
failedAccountIDs
,
service
.
OpenAIUpstreamTransportAny
,
service
.
OpenAIUpstreamTransportAny
,
)
)
...
@@ -628,29 +639,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -628,29 +639,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
zap
.
Error
(
err
),
zap
.
Error
(
err
),
zap
.
Int
(
"excluded_account_count"
,
len
(
failedAccountIDs
)),
zap
.
Int
(
"excluded_account_count"
,
len
(
failedAccountIDs
)),
)
)
// 首次调度失败 + 有默认映射模型 → 用默认模型重试
if
len
(
failedAccountIDs
)
==
0
{
if
len
(
failedAccountIDs
)
==
0
{
defaultModel
:=
""
if
apiKey
.
Group
!=
nil
{
defaultModel
=
apiKey
.
Group
.
DefaultMappedModel
}
if
defaultModel
!=
""
&&
defaultModel
!=
routingModel
{
reqLog
.
Info
(
"openai_messages.fallback_to_default_model"
,
zap
.
String
(
"default_mapped_model"
,
defaultModel
),
)
selection
,
scheduleDecision
,
err
=
h
.
gatewayService
.
SelectAccountWithScheduler
(
c
.
Request
.
Context
(),
apiKey
.
GroupID
,
""
,
sessionHash
,
defaultModel
,
failedAccountIDs
,
service
.
OpenAIUpstreamTransportAny
,
)
if
err
==
nil
&&
selection
!=
nil
{
c
.
Set
(
"openai_messages_fallback_model"
,
defaultModel
)
}
}
if
err
!=
nil
{
if
err
!=
nil
{
h
.
anthropicStreamingAwareError
(
c
,
http
.
StatusServiceUnavailable
,
"api_error"
,
"Service temporarily unavailable"
,
streamStarted
)
h
.
anthropicStreamingAwareError
(
c
,
http
.
StatusServiceUnavailable
,
"api_error"
,
"Service temporarily unavailable"
,
streamStarted
)
return
return
...
@@ -682,9 +671,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -682,9 +671,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
service
.
SetOpsLatencyMs
(
c
,
service
.
OpsRoutingLatencyMsKey
,
time
.
Since
(
routingStart
)
.
Milliseconds
())
service
.
SetOpsLatencyMs
(
c
,
service
.
OpsRoutingLatencyMsKey
,
time
.
Since
(
routingStart
)
.
Milliseconds
())
forwardStart
:=
time
.
Now
()
forwardStart
:=
time
.
Now
()
// Forward 层需要始终拿到 group 默认映射模型,这样未命中账号级映射的
defaultMappedModel
:=
strings
.
TrimSpace
(
effectiveMappedModel
)
// Claude 兼容模型才不会在后续 Codex 规范化中意外退化到 gpt-5.1。
defaultMappedModel
:=
resolveOpenAIForwardDefaultMappedModel
(
apiKey
,
c
.
GetString
(
"openai_messages_fallback_model"
))
// 应用渠道模型映射到请求体
// 应用渠道模型映射到请求体
forwardBody
:=
body
forwardBody
:=
body
if
channelMappingMsg
.
Mapped
{
if
channelMappingMsg
.
Mapped
{
...
...
backend/internal/handler/openai_gateway_handler_test.go
View file @
4de4823a
...
@@ -360,7 +360,7 @@ func TestResolveOpenAIForwardDefaultMappedModel(t *testing.T) {
...
@@ -360,7 +360,7 @@ func TestResolveOpenAIForwardDefaultMappedModel(t *testing.T) {
require
.
Equal
(
t
,
"gpt-5.2"
,
resolveOpenAIForwardDefaultMappedModel
(
apiKey
,
" gpt-5.2 "
))
require
.
Equal
(
t
,
"gpt-5.2"
,
resolveOpenAIForwardDefaultMappedModel
(
apiKey
,
" gpt-5.2 "
))
})
})
t
.
Run
(
"uses_group_default_
on_normal_path
"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"uses_group_default_
when_explicit_fallback_absent
"
,
func
(
t
*
testing
.
T
)
{
apiKey
:=
&
service
.
APIKey
{
apiKey
:=
&
service
.
APIKey
{
Group
:
&
service
.
Group
{
DefaultMappedModel
:
"gpt-5.4"
},
Group
:
&
service
.
Group
{
DefaultMappedModel
:
"gpt-5.4"
},
}
}
...
@@ -376,6 +376,45 @@ func TestResolveOpenAIForwardDefaultMappedModel(t *testing.T) {
...
@@ -376,6 +376,45 @@ func TestResolveOpenAIForwardDefaultMappedModel(t *testing.T) {
})
})
}
}
func
TestResolveOpenAIMessagesDispatchMappedModel
(
t
*
testing
.
T
)
{
t
.
Run
(
"exact_claude_model_override_wins"
,
func
(
t
*
testing
.
T
)
{
apiKey
:=
&
service
.
APIKey
{
Group
:
&
service
.
Group
{
MessagesDispatchModelConfig
:
service
.
OpenAIMessagesDispatchModelConfig
{
SonnetMappedModel
:
"gpt-5.2"
,
ExactModelMappings
:
map
[
string
]
string
{
"claude-sonnet-4-5-20250929"
:
"gpt-5.4-mini-high"
,
},
},
},
}
require
.
Equal
(
t
,
"gpt-5.4-mini"
,
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
"claude-sonnet-4-5-20250929"
))
})
t
.
Run
(
"uses_family_default_when_no_override"
,
func
(
t
*
testing
.
T
)
{
apiKey
:=
&
service
.
APIKey
{
Group
:
&
service
.
Group
{}}
require
.
Equal
(
t
,
"gpt-5.4"
,
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
"claude-opus-4-6"
))
require
.
Equal
(
t
,
"gpt-5.3-codex"
,
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
"claude-sonnet-4-5-20250929"
))
require
.
Equal
(
t
,
"gpt-5.4-mini"
,
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
"claude-haiku-4-5-20251001"
))
})
t
.
Run
(
"returns_empty_for_non_claude_or_missing_group"
,
func
(
t
*
testing
.
T
)
{
require
.
Empty
(
t
,
resolveOpenAIMessagesDispatchMappedModel
(
nil
,
"claude-sonnet-4-5-20250929"
))
require
.
Empty
(
t
,
resolveOpenAIMessagesDispatchMappedModel
(
&
service
.
APIKey
{},
"claude-sonnet-4-5-20250929"
))
require
.
Empty
(
t
,
resolveOpenAIMessagesDispatchMappedModel
(
&
service
.
APIKey
{
Group
:
&
service
.
Group
{}},
"gpt-5.4"
))
})
t
.
Run
(
"does_not_fall_back_to_group_default_mapped_model"
,
func
(
t
*
testing
.
T
)
{
apiKey
:=
&
service
.
APIKey
{
Group
:
&
service
.
Group
{
DefaultMappedModel
:
"gpt-5.4"
,
},
}
require
.
Empty
(
t
,
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
"gpt-5.4"
))
require
.
Equal
(
t
,
"gpt-5.3-codex"
,
resolveOpenAIMessagesDispatchMappedModel
(
apiKey
,
"claude-sonnet-4-5-20250929"
))
})
}
func
TestOpenAIResponses_MissingDependencies_ReturnsServiceUnavailable
(
t
*
testing
.
T
)
{
func
TestOpenAIResponses_MissingDependencies_ReturnsServiceUnavailable
(
t
*
testing
.
T
)
{
gin
.
SetMode
(
gin
.
TestMode
)
gin
.
SetMode
(
gin
.
TestMode
)
...
...
backend/internal/pkg/apicompat/types.go
View file @
4de4823a
...
@@ -28,7 +28,7 @@ type AnthropicRequest struct {
...
@@ -28,7 +28,7 @@ type AnthropicRequest struct {
// AnthropicOutputConfig controls output generation parameters.
// AnthropicOutputConfig controls output generation parameters.
type
AnthropicOutputConfig
struct
{
type
AnthropicOutputConfig
struct
{
Effort
string
`json:"effort,omitempty"`
// "low" | "medium" | "high"
Effort
string
`json:"effort,omitempty"`
// "low" | "medium" | "high"
| "max"
}
}
// AnthropicThinking configures extended thinking in the Anthropic API.
// AnthropicThinking configures extended thinking in the Anthropic API.
...
@@ -167,7 +167,7 @@ type ResponsesRequest struct {
...
@@ -167,7 +167,7 @@ type ResponsesRequest struct {
// ResponsesReasoning configures reasoning effort in the Responses API.
// ResponsesReasoning configures reasoning effort in the Responses API.
type
ResponsesReasoning
struct
{
type
ResponsesReasoning
struct
{
Effort
string
`json:"effort"`
// "low" | "medium" | "high"
Effort
string
`json:"effort"`
// "low" | "medium" | "high"
| "xhigh"
Summary
string
`json:"summary,omitempty"`
// "auto" | "concise" | "detailed"
Summary
string
`json:"summary,omitempty"`
// "auto" | "concise" | "detailed"
}
}
...
@@ -345,7 +345,7 @@ type ChatCompletionsRequest struct {
...
@@ -345,7 +345,7 @@ type ChatCompletionsRequest struct {
StreamOptions
*
ChatStreamOptions
`json:"stream_options,omitempty"`
StreamOptions
*
ChatStreamOptions
`json:"stream_options,omitempty"`
Tools
[]
ChatTool
`json:"tools,omitempty"`
Tools
[]
ChatTool
`json:"tools,omitempty"`
ToolChoice
json
.
RawMessage
`json:"tool_choice,omitempty"`
ToolChoice
json
.
RawMessage
`json:"tool_choice,omitempty"`
ReasoningEffort
string
`json:"reasoning_effort,omitempty"`
// "low" | "medium" | "high"
ReasoningEffort
string
`json:"reasoning_effort,omitempty"`
// "low" | "medium" | "high"
| "xhigh"
ServiceTier
string
`json:"service_tier,omitempty"`
ServiceTier
string
`json:"service_tier,omitempty"`
Stop
json
.
RawMessage
`json:"stop,omitempty"`
// string or []string
Stop
json
.
RawMessage
`json:"stop,omitempty"`
// string or []string
...
...
backend/internal/service/openai_codex_instructions_template.go
0 → 100644
View file @
4de4823a
package
service
import
(
"bytes"
"fmt"
"strings"
"text/template"
)
type
forcedCodexInstructionsTemplateData
struct
{
ExistingInstructions
string
OriginalModel
string
NormalizedModel
string
BillingModel
string
UpstreamModel
string
}
func
applyForcedCodexInstructionsTemplate
(
reqBody
map
[
string
]
any
,
templateText
string
,
data
forcedCodexInstructionsTemplateData
,
)
(
bool
,
error
)
{
rendered
,
err
:=
renderForcedCodexInstructionsTemplate
(
templateText
,
data
)
if
err
!=
nil
{
return
false
,
err
}
if
rendered
==
""
{
return
false
,
nil
}
existing
,
_
:=
reqBody
[
"instructions"
]
.
(
string
)
if
strings
.
TrimSpace
(
existing
)
==
rendered
{
return
false
,
nil
}
reqBody
[
"instructions"
]
=
rendered
return
true
,
nil
}
func
renderForcedCodexInstructionsTemplate
(
templateText
string
,
data
forcedCodexInstructionsTemplateData
,
)
(
string
,
error
)
{
tmpl
,
err
:=
template
.
New
(
"forced_codex_instructions"
)
.
Option
(
"missingkey=zero"
)
.
Parse
(
templateText
)
if
err
!=
nil
{
return
""
,
fmt
.
Errorf
(
"parse forced codex instructions template: %w"
,
err
)
}
var
buf
bytes
.
Buffer
if
err
:=
tmpl
.
Execute
(
&
buf
,
data
);
err
!=
nil
{
return
""
,
fmt
.
Errorf
(
"render forced codex instructions template: %w"
,
err
)
}
return
strings
.
TrimSpace
(
buf
.
String
()),
nil
}
backend/internal/service/openai_compat_model_test.go
View file @
4de4823a
...
@@ -6,9 +6,12 @@ import (
...
@@ -6,9 +6,12 @@ import (
"io"
"io"
"net/http"
"net/http"
"net/http/httptest"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"strings"
"testing"
"testing"
"github.com/Wei-Shaw/sub2api/internal/config"
"github.com/Wei-Shaw/sub2api/internal/pkg/apicompat"
"github.com/Wei-Shaw/sub2api/internal/pkg/apicompat"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/require"
...
@@ -127,3 +130,101 @@ func TestForwardAsAnthropic_NormalizesRoutingAndEffortForGpt54XHigh(t *testing.T
...
@@ -127,3 +130,101 @@ func TestForwardAsAnthropic_NormalizesRoutingAndEffortForGpt54XHigh(t *testing.T
t
.
Logf
(
"upstream body: %s"
,
string
(
upstream
.
lastBody
))
t
.
Logf
(
"upstream body: %s"
,
string
(
upstream
.
lastBody
))
t
.
Logf
(
"response body: %s"
,
rec
.
Body
.
String
())
t
.
Logf
(
"response body: %s"
,
rec
.
Body
.
String
())
}
}
func
TestForwardAsAnthropic_ForcedCodexInstructionsTemplatePrependsRenderedInstructions
(
t
*
testing
.
T
)
{
t
.
Parallel
()
gin
.
SetMode
(
gin
.
TestMode
)
templateDir
:=
t
.
TempDir
()
templatePath
:=
filepath
.
Join
(
templateDir
,
"codex-instructions.md.tmpl"
)
require
.
NoError
(
t
,
os
.
WriteFile
(
templatePath
,
[]
byte
(
"server-prefix
\n\n
{{ .ExistingInstructions }}"
),
0
o644
))
rec
:=
httptest
.
NewRecorder
()
c
,
_
:=
gin
.
CreateTestContext
(
rec
)
body
:=
[]
byte
(
`{"model":"gpt-5.4","max_tokens":16,"system":"client-system","messages":[{"role":"user","content":"hello"}],"stream":false}`
)
c
.
Request
=
httptest
.
NewRequest
(
http
.
MethodPost
,
"/v1/messages"
,
bytes
.
NewReader
(
body
))
c
.
Request
.
Header
.
Set
(
"Content-Type"
,
"application/json"
)
upstreamBody
:=
strings
.
Join
([]
string
{
`data: {"type":"response.completed","response":{"id":"resp_1","object":"response","model":"gpt-5.4","status":"completed","output":[{"type":"message","id":"msg_1","role":"assistant","status":"completed","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":5,"output_tokens":2,"total_tokens":7}}}`
,
""
,
"data: [DONE]"
,
""
,
},
"
\n
"
)
upstream
:=
&
httpUpstreamRecorder
{
resp
:
&
http
.
Response
{
StatusCode
:
http
.
StatusOK
,
Header
:
http
.
Header
{
"Content-Type"
:
[]
string
{
"text/event-stream"
},
"x-request-id"
:
[]
string
{
"rid_forced"
}},
Body
:
io
.
NopCloser
(
strings
.
NewReader
(
upstreamBody
)),
}}
svc
:=
&
OpenAIGatewayService
{
cfg
:
&
config
.
Config
{
Gateway
:
config
.
GatewayConfig
{
ForcedCodexInstructionsTemplateFile
:
templatePath
,
ForcedCodexInstructionsTemplate
:
"server-prefix
\n\n
{{ .ExistingInstructions }}"
,
}},
httpUpstream
:
upstream
,
}
account
:=
&
Account
{
ID
:
1
,
Name
:
"openai-oauth"
,
Platform
:
PlatformOpenAI
,
Type
:
AccountTypeOAuth
,
Concurrency
:
1
,
Credentials
:
map
[
string
]
any
{
"access_token"
:
"oauth-token"
,
"chatgpt_account_id"
:
"chatgpt-acc"
,
},
}
result
,
err
:=
svc
.
ForwardAsAnthropic
(
context
.
Background
(),
c
,
account
,
body
,
""
,
"gpt-5.1"
)
require
.
NoError
(
t
,
err
)
require
.
NotNil
(
t
,
result
)
require
.
Equal
(
t
,
"server-prefix
\n\n
client-system"
,
gjson
.
GetBytes
(
upstream
.
lastBody
,
"instructions"
)
.
String
())
}
func
TestForwardAsAnthropic_ForcedCodexInstructionsTemplateUsesCachedTemplateContent
(
t
*
testing
.
T
)
{
t
.
Parallel
()
gin
.
SetMode
(
gin
.
TestMode
)
rec
:=
httptest
.
NewRecorder
()
c
,
_
:=
gin
.
CreateTestContext
(
rec
)
body
:=
[]
byte
(
`{"model":"gpt-5.4","max_tokens":16,"system":"client-system","messages":[{"role":"user","content":"hello"}],"stream":false}`
)
c
.
Request
=
httptest
.
NewRequest
(
http
.
MethodPost
,
"/v1/messages"
,
bytes
.
NewReader
(
body
))
c
.
Request
.
Header
.
Set
(
"Content-Type"
,
"application/json"
)
upstreamBody
:=
strings
.
Join
([]
string
{
`data: {"type":"response.completed","response":{"id":"resp_1","object":"response","model":"gpt-5.4","status":"completed","output":[{"type":"message","id":"msg_1","role":"assistant","status":"completed","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":5,"output_tokens":2,"total_tokens":7}}}`
,
""
,
"data: [DONE]"
,
""
,
},
"
\n
"
)
upstream
:=
&
httpUpstreamRecorder
{
resp
:
&
http
.
Response
{
StatusCode
:
http
.
StatusOK
,
Header
:
http
.
Header
{
"Content-Type"
:
[]
string
{
"text/event-stream"
},
"x-request-id"
:
[]
string
{
"rid_forced_cached"
}},
Body
:
io
.
NopCloser
(
strings
.
NewReader
(
upstreamBody
)),
}}
svc
:=
&
OpenAIGatewayService
{
cfg
:
&
config
.
Config
{
Gateway
:
config
.
GatewayConfig
{
ForcedCodexInstructionsTemplateFile
:
"/path/that/should/not/be/read.tmpl"
,
ForcedCodexInstructionsTemplate
:
"cached-prefix
\n\n
{{ .ExistingInstructions }}"
,
}},
httpUpstream
:
upstream
,
}
account
:=
&
Account
{
ID
:
1
,
Name
:
"openai-oauth"
,
Platform
:
PlatformOpenAI
,
Type
:
AccountTypeOAuth
,
Concurrency
:
1
,
Credentials
:
map
[
string
]
any
{
"access_token"
:
"oauth-token"
,
"chatgpt_account_id"
:
"chatgpt-acc"
,
},
}
result
,
err
:=
svc
.
ForwardAsAnthropic
(
context
.
Background
(),
c
,
account
,
body
,
""
,
"gpt-5.1"
)
require
.
NoError
(
t
,
err
)
require
.
NotNil
(
t
,
result
)
require
.
Equal
(
t
,
"cached-prefix
\n\n
client-system"
,
gjson
.
GetBytes
(
upstream
.
lastBody
,
"instructions"
)
.
String
())
}
backend/internal/service/openai_gateway_messages.go
View file @
4de4823a
...
@@ -86,6 +86,24 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
...
@@ -86,6 +86,24 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
return
nil
,
fmt
.
Errorf
(
"unmarshal for codex transform: %w"
,
err
)
return
nil
,
fmt
.
Errorf
(
"unmarshal for codex transform: %w"
,
err
)
}
}
codexResult
:=
applyCodexOAuthTransform
(
reqBody
,
false
,
false
)
codexResult
:=
applyCodexOAuthTransform
(
reqBody
,
false
,
false
)
forcedTemplateText
:=
""
if
s
.
cfg
!=
nil
{
forcedTemplateText
=
s
.
cfg
.
Gateway
.
ForcedCodexInstructionsTemplate
}
templateUpstreamModel
:=
upstreamModel
if
codexResult
.
NormalizedModel
!=
""
{
templateUpstreamModel
=
codexResult
.
NormalizedModel
}
existingInstructions
,
_
:=
reqBody
[
"instructions"
]
.
(
string
)
if
_
,
err
:=
applyForcedCodexInstructionsTemplate
(
reqBody
,
forcedTemplateText
,
forcedCodexInstructionsTemplateData
{
ExistingInstructions
:
strings
.
TrimSpace
(
existingInstructions
),
OriginalModel
:
originalModel
,
NormalizedModel
:
normalizedModel
,
BillingModel
:
billingModel
,
UpstreamModel
:
templateUpstreamModel
,
});
err
!=
nil
{
return
nil
,
err
}
if
codexResult
.
NormalizedModel
!=
""
{
if
codexResult
.
NormalizedModel
!=
""
{
upstreamModel
=
codexResult
.
NormalizedModel
upstreamModel
=
codexResult
.
NormalizedModel
}
}
...
...
deploy/codex-instructions.md.tmpl
0 → 100644
View file @
4de4823a
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
{{ if .ExistingInstructions }}
{{ .ExistingInstructions }}
{{ end }}
deploy/config.example.yaml
View file @
4de4823a
...
@@ -202,6 +202,32 @@ gateway:
...
@@ -202,6 +202,32 @@ gateway:
#
#
# 注意:开启后会影响所有客户端的行为(不仅限于 VS Code / Codex CLI),请谨慎开启。
# 注意:开启后会影响所有客户端的行为(不仅限于 VS Code / Codex CLI),请谨慎开启。
force_codex_cli
:
false
force_codex_cli
:
false
# Optional: template file used to build the final top-level Codex `instructions`.
# 可选:用于构建最终 Codex 顶层 `instructions` 的模板文件路径。
#
# This is applied on the `/v1/messages -> Responses/Codex` conversion path,
# after Claude `system` has already been normalized into Codex `instructions`.
# 该模板作用于 `/v1/messages -> Responses/Codex` 转换链路,且发生在 Claude `system`
# 已经被归一化为 Codex `instructions` 之后。
#
# The template can reference:
# 模板可引用:
# - {{ .ExistingInstructions }} : converted client instructions/system
# - {{ .OriginalModel }} : original requested model
# - {{ .NormalizedModel }} : normalized routing model
# - {{ .BillingModel }} : billing model
# - {{ .UpstreamModel }} : final upstream model
#
# If you want to preserve client system prompts, keep {{ .ExistingInstructions }}
# somewhere in the template. If omitted, the template output fully replaces it.
# 如需保留客户端 system 提示词,请在模板中显式包含 {{ .ExistingInstructions }}。
# 若省略,则模板输出会完全覆盖它。
#
# Docker users can mount a host file to /app/data/codex-instructions.md.tmpl
# and point this field there.
# Docker 用户可将宿主机文件挂载到 /app/data/codex-instructions.md.tmpl,
# 然后把本字段指向该路径。
forced_codex_instructions_template_file
:
"
"
# OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout)
# OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout)
# 默认 false:过滤超时头,降低上游提前断流风险。
# 默认 false:过滤超时头,降低上游提前断流风险。
openai_passthrough_allow_timeout_headers
:
false
openai_passthrough_allow_timeout_headers
:
false
...
@@ -347,12 +373,6 @@ gateway:
...
@@ -347,12 +373,6 @@ gateway:
# Enable batch load calculation for scheduling
# Enable batch load calculation for scheduling
# 启用调度批量负载计算
# 启用调度批量负载计算
load_batch_enabled
:
true
load_batch_enabled
:
true
# Snapshot bucket MGET chunk size
# 调度快照分桶读取时的 MGET 分块大小
snapshot_mget_chunk_size
:
128
# Snapshot bucket write chunk size
# 调度快照重建写入时的分块大小
snapshot_write_chunk_size
:
256
# Slot cleanup interval (duration)
# Slot cleanup interval (duration)
# 并发槽位清理周期(时间段)
# 并发槽位清理周期(时间段)
slot_cleanup_interval
:
30s
slot_cleanup_interval
:
30s
...
...
deploy/docker-compose.yml
View file @
4de4823a
...
@@ -31,6 +31,10 @@ services:
...
@@ -31,6 +31,10 @@ services:
# Optional: Mount custom config.yaml (uncomment and create the file first)
# Optional: Mount custom config.yaml (uncomment and create the file first)
# Copy config.example.yaml to config.yaml, modify it, then uncomment:
# Copy config.example.yaml to config.yaml, modify it, then uncomment:
# - ./config.yaml:/app/data/config.yaml
# - ./config.yaml:/app/data/config.yaml
# Optional: Mount a custom Codex instructions template file, then point
# gateway.forced_codex_instructions_template_file at /app/data/codex-instructions.md.tmpl
# in config.yaml.
# - ./codex-instructions.md.tmpl:/app/data/codex-instructions.md.tmpl:ro
environment
:
environment
:
# =======================================================================
# =======================================================================
# Auto Setup (REQUIRED for Docker deployment)
# Auto Setup (REQUIRED for Docker deployment)
...
@@ -146,7 +150,17 @@ services:
...
@@ -146,7 +150,17 @@ services:
networks
:
networks
:
-
sub2api-network
-
sub2api-network
healthcheck
:
healthcheck
:
test
:
[
"
CMD"
,
"
wget"
,
"
-q"
,
"
-T"
,
"
5"
,
"
-O"
,
"
/dev/null"
,
"
http://localhost:8080/health"
]
test
:
[
"
CMD"
,
"
wget"
,
"
-q"
,
"
-T"
,
"
5"
,
"
-O"
,
"
/dev/null"
,
"
http://localhost:8080/health"
,
]
interval
:
30s
interval
:
30s
timeout
:
10s
timeout
:
10s
retries
:
3
retries
:
3
...
@@ -177,11 +191,17 @@ services:
...
@@ -177,11 +191,17 @@ services:
networks
:
networks
:
-
sub2api-network
-
sub2api-network
healthcheck
:
healthcheck
:
test
:
[
"
CMD-SHELL"
,
"
pg_isready
-U
${POSTGRES_USER:-sub2api}
-d
${POSTGRES_DB:-sub2api}"
]
test
:
[
"
CMD-SHELL"
,
"
pg_isready
-U
${POSTGRES_USER:-sub2api}
-d
${POSTGRES_DB:-sub2api}"
,
]
interval
:
10s
interval
:
10s
timeout
:
5s
timeout
:
5s
retries
:
5
retries
:
5
start_period
:
10s
start_period
:
10s
ports
:
-
5432:5432
# 注意:不暴露端口到宿主机,应用通过内部网络连接
# 注意:不暴露端口到宿主机,应用通过内部网络连接
# 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"]
# 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"]
...
@@ -199,12 +219,12 @@ services:
...
@@ -199,12 +219,12 @@ services:
volumes
:
volumes
:
-
redis_data:/data
-
redis_data:/data
command
:
>
command
:
>
sh -c '
sh -c '
redis-server
redis-server
--save 60 1
--save 60 1
--appendonly yes
--appendonly yes
--appendfsync everysec
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment
:
environment
:
-
TZ=${TZ:-Asia/Shanghai}
-
TZ=${TZ:-Asia/Shanghai}
# REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag)
# REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag)
...
@@ -217,7 +237,8 @@ services:
...
@@ -217,7 +237,8 @@ services:
timeout
:
5s
timeout
:
5s
retries
:
5
retries
:
5
start_period
:
5s
start_period
:
5s
ports
:
-
6379:6379
# =============================================================================
# =============================================================================
# Volumes
# Volumes
# =============================================================================
# =============================================================================
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment