Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
陈曦
sub2api
Commits
055c48ab
Unverified
Commit
055c48ab
authored
Apr 01, 2026
by
Wesley Liddick
Committed by
GitHub
Apr 01, 2026
Browse files
Merge pull request #1262 from InCerryGit/main
fix(openai): preserve bare gpt-5.3-codex-spark across forwarding paths
parents
6663e1ed
0b3feb9d
Changes
10
Hide whitespace changes
Inline
Side-by-side
backend/internal/service/openai_codex_transform.go
View file @
055c48ab
...
...
@@ -85,7 +85,7 @@ func applyCodexOAuthTransform(reqBody map[string]any, isCodexCLI bool, isCompact
if
v
,
ok
:=
reqBody
[
"model"
]
.
(
string
);
ok
{
model
=
v
}
normalizedModel
:=
normalizeCodexModel
(
model
)
normalizedModel
:=
strings
.
TrimSpace
(
model
)
if
normalizedModel
!=
""
{
if
model
!=
normalizedModel
{
reqBody
[
"model"
]
=
normalizedModel
...
...
backend/internal/service/openai_codex_transform_test.go
View file @
055c48ab
...
...
@@ -246,6 +246,7 @@ func TestNormalizeCodexModel_Gpt53(t *testing.T) {
"gpt-5.3-codex"
:
"gpt-5.3-codex"
,
"gpt-5.3-codex-xhigh"
:
"gpt-5.3-codex"
,
"gpt-5.3-codex-spark"
:
"gpt-5.3-codex"
,
"gpt 5.3 codex spark"
:
"gpt-5.3-codex"
,
"gpt-5.3-codex-spark-high"
:
"gpt-5.3-codex"
,
"gpt-5.3-codex-spark-xhigh"
:
"gpt-5.3-codex"
,
"gpt 5.3 codex"
:
"gpt-5.3-codex"
,
...
...
@@ -256,6 +257,34 @@ func TestNormalizeCodexModel_Gpt53(t *testing.T) {
}
}
func
TestApplyCodexOAuthTransform_PreservesBareSparkModel
(
t
*
testing
.
T
)
{
reqBody
:=
map
[
string
]
any
{
"model"
:
"gpt-5.3-codex-spark"
,
"input"
:
[]
any
{},
}
result
:=
applyCodexOAuthTransform
(
reqBody
,
false
,
false
)
require
.
Equal
(
t
,
"gpt-5.3-codex-spark"
,
reqBody
[
"model"
])
require
.
Equal
(
t
,
"gpt-5.3-codex-spark"
,
result
.
NormalizedModel
)
store
,
ok
:=
reqBody
[
"store"
]
.
(
bool
)
require
.
True
(
t
,
ok
)
require
.
False
(
t
,
store
)
}
func
TestApplyCodexOAuthTransform_TrimmedModelWithoutPolicyRewrite
(
t
*
testing
.
T
)
{
reqBody
:=
map
[
string
]
any
{
"model"
:
" gpt-5.3-codex-spark "
,
"input"
:
[]
any
{},
}
result
:=
applyCodexOAuthTransform
(
reqBody
,
false
,
false
)
require
.
Equal
(
t
,
"gpt-5.3-codex-spark"
,
reqBody
[
"model"
])
require
.
Equal
(
t
,
"gpt-5.3-codex-spark"
,
result
.
NormalizedModel
)
require
.
True
(
t
,
result
.
Modified
)
}
func
TestApplyCodexOAuthTransform_CodexCLI_PreservesExistingInstructions
(
t
*
testing
.
T
)
{
// Codex CLI 场景:已有 instructions 时不修改
...
...
backend/internal/service/openai_compat_prompt_cache_key.go
View file @
055c48ab
...
...
@@ -10,8 +10,8 @@ import (
const
compatPromptCacheKeyPrefix
=
"compat_cc_"
func
shouldAutoInjectPromptCacheKeyForCompat
(
model
string
)
bool
{
switch
normalizeCodex
Model
(
strings
.
TrimSpace
(
model
))
{
case
"gpt-5.4"
,
"gpt-5.3-codex"
:
switch
resolveOpenAIUpstream
Model
(
strings
.
TrimSpace
(
model
))
{
case
"gpt-5.4"
,
"gpt-5.3-codex"
,
"gpt-5.3-codex-spark"
:
return
true
default
:
return
false
...
...
@@ -23,9 +23,9 @@ func deriveCompatPromptCacheKey(req *apicompat.ChatCompletionsRequest, mappedMod
return
""
}
normalizedModel
:=
normalizeCodex
Model
(
strings
.
TrimSpace
(
mappedModel
))
normalizedModel
:=
resolveOpenAIUpstream
Model
(
strings
.
TrimSpace
(
mappedModel
))
if
normalizedModel
==
""
{
normalizedModel
=
normalizeCodex
Model
(
strings
.
TrimSpace
(
req
.
Model
))
normalizedModel
=
resolveOpenAIUpstream
Model
(
strings
.
TrimSpace
(
req
.
Model
))
}
if
normalizedModel
==
""
{
normalizedModel
=
strings
.
TrimSpace
(
req
.
Model
)
...
...
backend/internal/service/openai_compat_prompt_cache_key_test.go
View file @
055c48ab
...
...
@@ -17,6 +17,7 @@ func TestShouldAutoInjectPromptCacheKeyForCompat(t *testing.T) {
require
.
True
(
t
,
shouldAutoInjectPromptCacheKeyForCompat
(
"gpt-5.4"
))
require
.
True
(
t
,
shouldAutoInjectPromptCacheKeyForCompat
(
"gpt-5.3"
))
require
.
True
(
t
,
shouldAutoInjectPromptCacheKeyForCompat
(
"gpt-5.3-codex"
))
require
.
True
(
t
,
shouldAutoInjectPromptCacheKeyForCompat
(
"gpt-5.3-codex-spark"
))
require
.
False
(
t
,
shouldAutoInjectPromptCacheKeyForCompat
(
"gpt-4o"
))
}
...
...
@@ -62,3 +63,17 @@ func TestDeriveCompatPromptCacheKey_DiffersAcrossSessions(t *testing.T) {
k2
:=
deriveCompatPromptCacheKey
(
req2
,
"gpt-5.4"
)
require
.
NotEqual
(
t
,
k1
,
k2
,
"different first user messages should yield different keys"
)
}
func
TestDeriveCompatPromptCacheKey_UsesResolvedSparkFamily
(
t
*
testing
.
T
)
{
req
:=
&
apicompat
.
ChatCompletionsRequest
{
Model
:
"gpt-5.3-codex-spark"
,
Messages
:
[]
apicompat
.
ChatMessage
{
{
Role
:
"user"
,
Content
:
mustRawJSON
(
t
,
`"Question A"`
)},
},
}
k1
:=
deriveCompatPromptCacheKey
(
req
,
"gpt-5.3-codex-spark"
)
k2
:=
deriveCompatPromptCacheKey
(
req
,
" openai/gpt-5.3-codex-spark "
)
require
.
NotEmpty
(
t
,
k1
)
require
.
Equal
(
t
,
k1
,
k2
,
"resolved spark family should derive a stable compat cache key"
)
}
backend/internal/service/openai_gateway_chat_completions.go
View file @
055c48ab
...
...
@@ -45,12 +45,13 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions(
// 2. Resolve model mapping early so compat prompt_cache_key injection can
// derive a stable seed from the final upstream model family.
mappedModel
:=
resolveOpenAIForwardModel
(
account
,
originalModel
,
defaultMappedModel
)
billingModel
:=
resolveOpenAIForwardModel
(
account
,
originalModel
,
defaultMappedModel
)
upstreamModel
:=
resolveOpenAIUpstreamModel
(
billingModel
)
promptCacheKey
=
strings
.
TrimSpace
(
promptCacheKey
)
compatPromptCacheInjected
:=
false
if
promptCacheKey
==
""
&&
account
.
Type
==
AccountTypeOAuth
&&
shouldAutoInjectPromptCacheKeyForCompat
(
mapped
Model
)
{
promptCacheKey
=
deriveCompatPromptCacheKey
(
&
chatReq
,
mapped
Model
)
if
promptCacheKey
==
""
&&
account
.
Type
==
AccountTypeOAuth
&&
shouldAutoInjectPromptCacheKeyForCompat
(
upstream
Model
)
{
promptCacheKey
=
deriveCompatPromptCacheKey
(
&
chatReq
,
upstream
Model
)
compatPromptCacheInjected
=
promptCacheKey
!=
""
}
...
...
@@ -60,12 +61,13 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions(
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"convert chat completions to responses: %w"
,
err
)
}
responsesReq
.
Model
=
mapped
Model
responsesReq
.
Model
=
upstream
Model
logFields
:=
[]
zap
.
Field
{
zap
.
Int64
(
"account_id"
,
account
.
ID
),
zap
.
String
(
"original_model"
,
originalModel
),
zap
.
String
(
"mapped_model"
,
mappedModel
),
zap
.
String
(
"billing_model"
,
billingModel
),
zap
.
String
(
"upstream_model"
,
upstreamModel
),
zap
.
Bool
(
"stream"
,
clientStream
),
}
if
compatPromptCacheInjected
{
...
...
@@ -88,6 +90,9 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions(
return
nil
,
fmt
.
Errorf
(
"unmarshal for codex transform: %w"
,
err
)
}
codexResult
:=
applyCodexOAuthTransform
(
reqBody
,
false
,
false
)
if
codexResult
.
NormalizedModel
!=
""
{
upstreamModel
=
codexResult
.
NormalizedModel
}
if
codexResult
.
PromptCacheKey
!=
""
{
promptCacheKey
=
codexResult
.
PromptCacheKey
}
else
if
promptCacheKey
!=
""
{
...
...
@@ -180,9 +185,9 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions(
var
result
*
OpenAIForwardResult
var
handleErr
error
if
clientStream
{
result
,
handleErr
=
s
.
handleChatStreamingResponse
(
resp
,
c
,
originalModel
,
mapped
Model
,
includeUsage
,
startTime
)
result
,
handleErr
=
s
.
handleChatStreamingResponse
(
resp
,
c
,
originalModel
,
billingModel
,
upstream
Model
,
includeUsage
,
startTime
)
}
else
{
result
,
handleErr
=
s
.
handleChatBufferedStreamingResponse
(
resp
,
c
,
originalModel
,
mapped
Model
,
startTime
)
result
,
handleErr
=
s
.
handleChatBufferedStreamingResponse
(
resp
,
c
,
originalModel
,
billingModel
,
upstream
Model
,
startTime
)
}
// Propagate ServiceTier and ReasoningEffort to result for billing
...
...
@@ -224,7 +229,8 @@ func (s *OpenAIGatewayService) handleChatBufferedStreamingResponse(
resp
*
http
.
Response
,
c
*
gin
.
Context
,
originalModel
string
,
mappedModel
string
,
billingModel
string
,
upstreamModel
string
,
startTime
time
.
Time
,
)
(
*
OpenAIForwardResult
,
error
)
{
requestID
:=
resp
.
Header
.
Get
(
"x-request-id"
)
...
...
@@ -295,8 +301,8 @@ func (s *OpenAIGatewayService) handleChatBufferedStreamingResponse(
RequestID
:
requestID
,
Usage
:
usage
,
Model
:
originalModel
,
BillingModel
:
mapped
Model
,
UpstreamModel
:
mapped
Model
,
BillingModel
:
billing
Model
,
UpstreamModel
:
upstream
Model
,
Stream
:
false
,
Duration
:
time
.
Since
(
startTime
),
},
nil
...
...
@@ -308,7 +314,8 @@ func (s *OpenAIGatewayService) handleChatStreamingResponse(
resp
*
http
.
Response
,
c
*
gin
.
Context
,
originalModel
string
,
mappedModel
string
,
billingModel
string
,
upstreamModel
string
,
includeUsage
bool
,
startTime
time
.
Time
,
)
(
*
OpenAIForwardResult
,
error
)
{
...
...
@@ -343,8 +350,8 @@ func (s *OpenAIGatewayService) handleChatStreamingResponse(
RequestID
:
requestID
,
Usage
:
usage
,
Model
:
originalModel
,
BillingModel
:
mapped
Model
,
UpstreamModel
:
mapped
Model
,
BillingModel
:
billing
Model
,
UpstreamModel
:
upstream
Model
,
Stream
:
true
,
Duration
:
time
.
Since
(
startTime
),
FirstTokenMs
:
firstTokenMs
,
...
...
backend/internal/service/openai_gateway_messages.go
View file @
055c48ab
...
...
@@ -41,6 +41,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
}
originalModel
:=
anthropicReq
.
Model
applyOpenAICompatModelNormalization
(
&
anthropicReq
)
normalizedModel
:=
anthropicReq
.
Model
clientStream
:=
anthropicReq
.
Stream
// client's original stream preference
// 2. Convert Anthropic → Responses
...
...
@@ -60,13 +61,16 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
}
// 3. Model mapping
mappedModel
:=
resolveOpenAIForwardModel
(
account
,
anthropicReq
.
Model
,
defaultMappedModel
)
responsesReq
.
Model
=
mappedModel
billingModel
:=
resolveOpenAIForwardModel
(
account
,
normalizedModel
,
defaultMappedModel
)
upstreamModel
:=
resolveOpenAIUpstreamModel
(
billingModel
)
responsesReq
.
Model
=
upstreamModel
logger
.
L
()
.
Debug
(
"openai messages: model mapping applied"
,
zap
.
Int64
(
"account_id"
,
account
.
ID
),
zap
.
String
(
"original_model"
,
originalModel
),
zap
.
String
(
"mapped_model"
,
mappedModel
),
zap
.
String
(
"normalized_model"
,
normalizedModel
),
zap
.
String
(
"billing_model"
,
billingModel
),
zap
.
String
(
"upstream_model"
,
upstreamModel
),
zap
.
Bool
(
"stream"
,
isStream
),
)
...
...
@@ -82,6 +86,9 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
return
nil
,
fmt
.
Errorf
(
"unmarshal for codex transform: %w"
,
err
)
}
codexResult
:=
applyCodexOAuthTransform
(
reqBody
,
false
,
false
)
if
codexResult
.
NormalizedModel
!=
""
{
upstreamModel
=
codexResult
.
NormalizedModel
}
if
codexResult
.
PromptCacheKey
!=
""
{
promptCacheKey
=
codexResult
.
PromptCacheKey
}
else
if
promptCacheKey
!=
""
{
...
...
@@ -182,10 +189,10 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
var
result
*
OpenAIForwardResult
var
handleErr
error
if
clientStream
{
result
,
handleErr
=
s
.
handleAnthropicStreamingResponse
(
resp
,
c
,
originalModel
,
mapped
Model
,
startTime
)
result
,
handleErr
=
s
.
handleAnthropicStreamingResponse
(
resp
,
c
,
originalModel
,
billingModel
,
upstream
Model
,
startTime
)
}
else
{
// Client wants JSON: buffer the streaming response and assemble a JSON reply.
result
,
handleErr
=
s
.
handleAnthropicBufferedStreamingResponse
(
resp
,
c
,
originalModel
,
mapped
Model
,
startTime
)
result
,
handleErr
=
s
.
handleAnthropicBufferedStreamingResponse
(
resp
,
c
,
originalModel
,
billingModel
,
upstream
Model
,
startTime
)
}
// Propagate ServiceTier and ReasoningEffort to result for billing
...
...
@@ -230,7 +237,8 @@ func (s *OpenAIGatewayService) handleAnthropicBufferedStreamingResponse(
resp
*
http
.
Response
,
c
*
gin
.
Context
,
originalModel
string
,
mappedModel
string
,
billingModel
string
,
upstreamModel
string
,
startTime
time
.
Time
,
)
(
*
OpenAIForwardResult
,
error
)
{
requestID
:=
resp
.
Header
.
Get
(
"x-request-id"
)
...
...
@@ -303,8 +311,8 @@ func (s *OpenAIGatewayService) handleAnthropicBufferedStreamingResponse(
RequestID
:
requestID
,
Usage
:
usage
,
Model
:
originalModel
,
BillingModel
:
mapped
Model
,
UpstreamModel
:
mapped
Model
,
BillingModel
:
billing
Model
,
UpstreamModel
:
upstream
Model
,
Stream
:
false
,
Duration
:
time
.
Since
(
startTime
),
},
nil
...
...
@@ -319,7 +327,8 @@ func (s *OpenAIGatewayService) handleAnthropicStreamingResponse(
resp
*
http
.
Response
,
c
*
gin
.
Context
,
originalModel
string
,
mappedModel
string
,
billingModel
string
,
upstreamModel
string
,
startTime
time
.
Time
,
)
(
*
OpenAIForwardResult
,
error
)
{
requestID
:=
resp
.
Header
.
Get
(
"x-request-id"
)
...
...
@@ -352,8 +361,8 @@ func (s *OpenAIGatewayService) handleAnthropicStreamingResponse(
RequestID
:
requestID
,
Usage
:
usage
,
Model
:
originalModel
,
BillingModel
:
mapped
Model
,
UpstreamModel
:
mapped
Model
,
BillingModel
:
billing
Model
,
UpstreamModel
:
upstream
Model
,
Stream
:
true
,
Duration
:
time
.
Since
(
startTime
),
FirstTokenMs
:
firstTokenMs
,
...
...
backend/internal/service/openai_gateway_service.go
View file @
055c48ab
...
...
@@ -1814,29 +1814,29 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
}
// 对所有请求执行模型映射(包含 Codex CLI)。
mapped
Model
:=
account
.
GetMappedModel
(
reqModel
)
if
mapped
Model
!=
reqModel
{
logger
.
LegacyPrintf
(
"service.openai_gateway"
,
"[OpenAI] Model mapping applied: %s -> %s (account: %s, isCodexCLI: %v)"
,
reqModel
,
mapped
Model
,
account
.
Name
,
isCodexCLI
)
reqBody
[
"model"
]
=
mapped
Model
billing
Model
:=
account
.
GetMappedModel
(
reqModel
)
if
billing
Model
!=
reqModel
{
logger
.
LegacyPrintf
(
"service.openai_gateway"
,
"[OpenAI] Model mapping applied: %s -> %s (account: %s, isCodexCLI: %v)"
,
reqModel
,
billing
Model
,
account
.
Name
,
isCodexCLI
)
reqBody
[
"model"
]
=
billing
Model
bodyModified
=
true
markPatchSet
(
"model"
,
mapped
Model
)
markPatchSet
(
"model"
,
billing
Model
)
}
upstreamModel
:=
billingModel
// 针对所有 OpenAI 账号执行 Codex 模型名规范化,确保上游识别一致。
if
model
,
ok
:=
reqBody
[
"model"
]
.
(
string
);
ok
{
normalizedModel
:=
normalizeCodexModel
(
model
)
if
normalizedModel
!=
""
&&
normalizedModel
!=
model
{
logger
.
LegacyPrintf
(
"service.openai_gateway"
,
"[OpenAI] Codex model normalization: %s -> %s (account: %s, type: %s, isCodexCLI: %v)"
,
model
,
normalizedModel
,
account
.
Name
,
account
.
Type
,
isCodexCLI
)
reqBody
[
"model"
]
=
normalizedModel
mappedModel
=
normalizedModel
upstreamModel
=
resolveOpenAIUpstreamModel
(
model
)
if
upstreamModel
!=
""
&&
upstreamModel
!=
model
{
logger
.
LegacyPrintf
(
"service.openai_gateway"
,
"[OpenAI] Upstream model resolved: %s -> %s (account: %s, type: %s, isCodexCLI: %v)"
,
model
,
upstreamModel
,
account
.
Name
,
account
.
Type
,
isCodexCLI
)
reqBody
[
"model"
]
=
upstreamModel
bodyModified
=
true
markPatchSet
(
"model"
,
normalized
Model
)
markPatchSet
(
"model"
,
upstream
Model
)
}
// 移除 gpt-5.2-codex 以下的版本 verbosity 参数
// 确保高版本模型向低版本模型映射不报错
if
!
SupportsVerbosity
(
normalized
Model
)
{
if
!
SupportsVerbosity
(
upstream
Model
)
{
if
text
,
ok
:=
reqBody
[
"text"
]
.
(
map
[
string
]
any
);
ok
{
delete
(
text
,
"verbosity"
)
}
...
...
@@ -1860,7 +1860,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
disablePatch
()
}
if
codexResult
.
NormalizedModel
!=
""
{
mapped
Model
=
codexResult
.
NormalizedModel
upstream
Model
=
codexResult
.
NormalizedModel
}
if
codexResult
.
PromptCacheKey
!=
""
{
promptCacheKey
=
codexResult
.
PromptCacheKey
...
...
@@ -1977,7 +1977,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
"forward_start account_id=%d account_type=%s model=%s stream=%v has_previous_response_id=%v"
,
account
.
ID
,
account
.
Type
,
mapped
Model
,
upstream
Model
,
reqStream
,
hasPreviousResponseID
,
)
...
...
@@ -2066,7 +2066,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
isCodexCLI
,
reqStream
,
originalModel
,
mapped
Model
,
upstream
Model
,
startTime
,
attempt
,
wsLastFailureReason
,
...
...
@@ -2167,7 +2167,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
firstTokenMs
,
wsAttempts
,
)
wsResult
.
UpstreamModel
=
mapped
Model
wsResult
.
UpstreamModel
=
upstream
Model
return
wsResult
,
nil
}
s
.
writeOpenAIWSFallbackErrorResponse
(
c
,
account
,
wsErr
)
...
...
@@ -2272,14 +2272,14 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
var
usage
*
OpenAIUsage
var
firstTokenMs
*
int
if
reqStream
{
streamResult
,
err
:=
s
.
handleStreamingResponse
(
ctx
,
resp
,
c
,
account
,
startTime
,
originalModel
,
mapped
Model
)
streamResult
,
err
:=
s
.
handleStreamingResponse
(
ctx
,
resp
,
c
,
account
,
startTime
,
originalModel
,
upstream
Model
)
if
err
!=
nil
{
return
nil
,
err
}
usage
=
streamResult
.
usage
firstTokenMs
=
streamResult
.
firstTokenMs
}
else
{
usage
,
err
=
s
.
handleNonStreamingResponse
(
ctx
,
resp
,
c
,
account
,
originalModel
,
mapped
Model
)
usage
,
err
=
s
.
handleNonStreamingResponse
(
ctx
,
resp
,
c
,
account
,
originalModel
,
upstream
Model
)
if
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -2303,7 +2303,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
RequestID
:
resp
.
Header
.
Get
(
"x-request-id"
),
Usage
:
*
usage
,
Model
:
originalModel
,
UpstreamModel
:
mapped
Model
,
UpstreamModel
:
upstream
Model
,
ServiceTier
:
serviceTier
,
ReasoningEffort
:
reasoningEffort
,
Stream
:
reqStream
,
...
...
backend/internal/service/openai_model_mapping.go
View file @
055c48ab
package
service
// resolveOpenAIForwardModel determines the upstream model for OpenAI-compatible
// forwarding. Group-level default mapping only applies when the account itself
// did not match any explicit model_mapping rule.
import
"strings"
// resolveOpenAIForwardModel resolves the account/group mapping result for
// OpenAI-compatible forwarding. Group-level default mapping only applies when
// the account itself did not match any explicit model_mapping rule.
func
resolveOpenAIForwardModel
(
account
*
Account
,
requestedModel
,
defaultMappedModel
string
)
string
{
if
account
==
nil
{
if
defaultMappedModel
!=
""
{
...
...
@@ -17,3 +19,23 @@ func resolveOpenAIForwardModel(account *Account, requestedModel, defaultMappedMo
}
return
mappedModel
}
func
resolveOpenAIUpstreamModel
(
model
string
)
string
{
if
isBareGPT53CodexSparkModel
(
model
)
{
return
"gpt-5.3-codex-spark"
}
return
normalizeCodexModel
(
strings
.
TrimSpace
(
model
))
}
func
isBareGPT53CodexSparkModel
(
model
string
)
bool
{
modelID
:=
strings
.
TrimSpace
(
model
)
if
modelID
==
""
{
return
false
}
if
strings
.
Contains
(
modelID
,
"/"
)
{
parts
:=
strings
.
Split
(
modelID
,
"/"
)
modelID
=
parts
[
len
(
parts
)
-
1
]
}
normalized
:=
strings
.
ToLower
(
strings
.
TrimSpace
(
modelID
))
return
normalized
==
"gpt-5.3-codex-spark"
||
normalized
==
"gpt 5.3 codex spark"
}
backend/internal/service/openai_model_mapping_test.go
View file @
055c48ab
...
...
@@ -74,13 +74,30 @@ func TestResolveOpenAIForwardModel_PreventsClaudeModelFromFallingBackToGpt51(t *
Credentials
:
map
[
string
]
any
{},
}
withoutDefault
:=
resolveOpenAIForwardModel
(
account
,
"claude-opus-4-6"
,
""
)
if
got
:=
normalizeCodexModel
(
withoutDefault
);
got
!=
"gpt-5.1"
{
t
.
Fatalf
(
"
normalizeCodex
Model(
%q
) = %q, want %q"
,
withoutDefault
,
got
,
"gpt-5.1"
)
withoutDefault
:=
resolveOpenAIUpstreamModel
(
resolveOpenAIForwardModel
(
account
,
"claude-opus-4-6"
,
""
)
)
if
withoutDefault
!=
"gpt-5.1"
{
t
.
Fatalf
(
"
resolveOpenAIUpstream
Model(
...
) = %q, want %q"
,
withoutDefault
,
"gpt-5.1"
)
}
withDefault
:=
resolveOpenAIForwardModel
(
account
,
"claude-opus-4-6"
,
"gpt-5.4"
)
if
got
:=
normalizeCodexModel
(
withDefault
);
got
!=
"gpt-5.4"
{
t
.
Fatalf
(
"normalizeCodexModel(%q) = %q, want %q"
,
withDefault
,
got
,
"gpt-5.4"
)
withDefault
:=
resolveOpenAIUpstreamModel
(
resolveOpenAIForwardModel
(
account
,
"claude-opus-4-6"
,
"gpt-5.4"
))
if
withDefault
!=
"gpt-5.4"
{
t
.
Fatalf
(
"resolveOpenAIUpstreamModel(...) = %q, want %q"
,
withDefault
,
"gpt-5.4"
)
}
}
func
TestResolveOpenAIUpstreamModel
(
t
*
testing
.
T
)
{
cases
:=
map
[
string
]
string
{
"gpt-5.3-codex-spark"
:
"gpt-5.3-codex-spark"
,
"gpt 5.3 codex spark"
:
"gpt-5.3-codex-spark"
,
" openai/gpt-5.3-codex-spark "
:
"gpt-5.3-codex-spark"
,
"gpt-5.3-codex-spark-high"
:
"gpt-5.3-codex"
,
"gpt-5.3-codex-spark-xhigh"
:
"gpt-5.3-codex"
,
"gpt-5.3"
:
"gpt-5.3-codex"
,
}
for
input
,
expected
:=
range
cases
{
if
got
:=
resolveOpenAIUpstreamModel
(
input
);
got
!=
expected
{
t
.
Fatalf
(
"resolveOpenAIUpstreamModel(%q) = %q, want %q"
,
input
,
got
,
expected
)
}
}
}
backend/internal/service/openai_ws_forwarder.go
View file @
055c48ab
...
...
@@ -2515,12 +2515,9 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
}
normalized
=
next
}
mappedModel
:=
account
.
GetMappedModel
(
originalModel
)
if
normalizedModel
:=
normalizeCodexModel
(
mappedModel
);
normalizedModel
!=
""
{
mappedModel
=
normalizedModel
}
if
mappedModel
!=
originalModel
{
next
,
setErr
:=
applyPayloadMutation
(
normalized
,
"model"
,
mappedModel
)
upstreamModel
:=
resolveOpenAIUpstreamModel
(
account
.
GetMappedModel
(
originalModel
))
if
upstreamModel
!=
originalModel
{
next
,
setErr
:=
applyPayloadMutation
(
normalized
,
"model"
,
upstreamModel
)
if
setErr
!=
nil
{
return
openAIWSClientPayload
{},
NewOpenAIWSClientCloseError
(
coderws
.
StatusPolicyViolation
,
"invalid websocket request payload"
,
setErr
)
}
...
...
@@ -2776,10 +2773,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
mappedModel
:=
""
var
mappedModelBytes
[]
byte
if
originalModel
!=
""
{
mappedModel
=
account
.
GetMappedModel
(
originalModel
)
if
normalizedModel
:=
normalizeCodexModel
(
mappedModel
);
normalizedModel
!=
""
{
mappedModel
=
normalizedModel
}
mappedModel
=
resolveOpenAIUpstreamModel
(
account
.
GetMappedModel
(
originalModel
))
needModelReplace
=
mappedModel
!=
""
&&
mappedModel
!=
originalModel
if
needModelReplace
{
mappedModelBytes
=
[]
byte
(
mappedModel
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment