Unverified Commit 81b96ae1 authored by Wesley Liddick's avatar Wesley Liddick Committed by GitHub
Browse files

Merge pull request #1498 from aiexz/main

do not normalize model for openai API token based accounts
parents 7c60ee3c 3a07e92b
...@@ -275,6 +275,13 @@ func normalizeCodexModel(model string) string { ...@@ -275,6 +275,13 @@ func normalizeCodexModel(model string) string {
return "gpt-5.1" return "gpt-5.1"
} }
func normalizeOpenAIModelForUpstream(account *Account, model string) string {
if account == nil || account.Type == AccountTypeOAuth {
return normalizeCodexModel(model)
}
return strings.TrimSpace(model)
}
func SupportsVerbosity(model string) bool { func SupportsVerbosity(model string) bool {
if !strings.HasPrefix(model, "gpt-") { if !strings.HasPrefix(model, "gpt-") {
return true return true
......
...@@ -46,7 +46,7 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions( ...@@ -46,7 +46,7 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions(
// 2. Resolve model mapping early so compat prompt_cache_key injection can // 2. Resolve model mapping early so compat prompt_cache_key injection can
// derive a stable seed from the final upstream model family. // derive a stable seed from the final upstream model family.
billingModel := resolveOpenAIForwardModel(account, originalModel, defaultMappedModel) billingModel := resolveOpenAIForwardModel(account, originalModel, defaultMappedModel)
upstreamModel := normalizeCodexModel(billingModel) upstreamModel := normalizeOpenAIModelForUpstream(account, billingModel)
promptCacheKey = strings.TrimSpace(promptCacheKey) promptCacheKey = strings.TrimSpace(promptCacheKey)
compatPromptCacheInjected := false compatPromptCacheInjected := false
......
...@@ -62,7 +62,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic( ...@@ -62,7 +62,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
// 3. Model mapping // 3. Model mapping
billingModel := resolveOpenAIForwardModel(account, normalizedModel, defaultMappedModel) billingModel := resolveOpenAIForwardModel(account, normalizedModel, defaultMappedModel)
upstreamModel := normalizeCodexModel(billingModel) upstreamModel := normalizeOpenAIModelForUpstream(account, billingModel)
responsesReq.Model = upstreamModel responsesReq.Model = upstreamModel
logger.L().Debug("openai messages: model mapping applied", logger.L().Debug("openai messages: model mapping applied",
......
...@@ -1938,9 +1938,11 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco ...@@ -1938,9 +1938,11 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
} }
upstreamModel := billingModel upstreamModel := billingModel
// 针对所有 OpenAI 账号执行 Codex 模型名规范化,确保上游识别一致。 // OpenAI OAuth 账号走 ChatGPT internal Codex endpoint,需要将模型名规范化为
// 上游可识别的 Codex/GPT 系列。API Key 账号则应保留原始/映射后的模型名,
// 以兼容自定义 base_url 的 OpenAI-compatible 上游。
if model, ok := reqBody["model"].(string); ok { if model, ok := reqBody["model"].(string); ok {
upstreamModel = normalizeCodexModel(model) upstreamModel = normalizeOpenAIModelForUpstream(account, model)
if upstreamModel != "" && upstreamModel != model { if upstreamModel != "" && upstreamModel != model {
logger.LegacyPrintf("service.openai_gateway", "[OpenAI] Upstream model resolved: %s -> %s (account: %s, type: %s, isCodexCLI: %v)", logger.LegacyPrintf("service.openai_gateway", "[OpenAI] Upstream model resolved: %s -> %s (account: %s, type: %s, isCodexCLI: %v)",
model, upstreamModel, account.Name, account.Type, isCodexCLI) model, upstreamModel, account.Name, account.Type, isCodexCLI)
......
...@@ -99,3 +99,39 @@ func TestNormalizeCodexModel(t *testing.T) { ...@@ -99,3 +99,39 @@ func TestNormalizeCodexModel(t *testing.T) {
} }
} }
} }
func TestNormalizeOpenAIModelForUpstream(t *testing.T) {
tests := []struct {
name string
account *Account
model string
want string
}{
{
name: "oauth keeps codex normalization behavior",
account: &Account{Type: AccountTypeOAuth},
model: "gemini-3-flash-preview",
want: "gpt-5.1",
},
{
name: "apikey preserves custom compatible model",
account: &Account{Type: AccountTypeAPIKey},
model: "gemini-3-flash-preview",
want: "gemini-3-flash-preview",
},
{
name: "apikey preserves official non codex model",
account: &Account{Type: AccountTypeAPIKey},
model: "gpt-4.1",
want: "gpt-4.1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := normalizeOpenAIModelForUpstream(tt.account, tt.model); got != tt.want {
t.Fatalf("normalizeOpenAIModelForUpstream(...) = %q, want %q", got, tt.want)
}
})
}
}
...@@ -2515,7 +2515,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( ...@@ -2515,7 +2515,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
} }
normalized = next normalized = next
} }
upstreamModel := normalizeCodexModel(account.GetMappedModel(originalModel)) upstreamModel := normalizeOpenAIModelForUpstream(account, account.GetMappedModel(originalModel))
if upstreamModel != originalModel { if upstreamModel != originalModel {
next, setErr := applyPayloadMutation(normalized, "model", upstreamModel) next, setErr := applyPayloadMutation(normalized, "model", upstreamModel)
if setErr != nil { if setErr != nil {
...@@ -2773,7 +2773,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( ...@@ -2773,7 +2773,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
mappedModel := "" mappedModel := ""
var mappedModelBytes []byte var mappedModelBytes []byte
if originalModel != "" { if originalModel != "" {
mappedModel = normalizeCodexModel(account.GetMappedModel(originalModel)) mappedModel = normalizeOpenAIModelForUpstream(account, account.GetMappedModel(originalModel))
needModelReplace = mappedModel != "" && mappedModel != originalModel needModelReplace = mappedModel != "" && mappedModel != originalModel
if needModelReplace { if needModelReplace {
mappedModelBytes = []byte(mappedModel) mappedModelBytes = []byte(mappedModel)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment