Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
陈曦
sub2api
Commits
9a92fa4a
Unverified
Commit
9a92fa4a
authored
Mar 30, 2026
by
Wesley Liddick
Committed by
GitHub
Mar 30, 2026
Browse files
Merge pull request #1370 from YanzheL/fix/1320-openai-messages-gpt54-xhigh
fix(gateway): normalize gpt-5.4-xhigh for /v1/messages
parents
576af710
f2c2abe6
Changes
4
Hide whitespace changes
Inline
Side-by-side
backend/internal/handler/openai_gateway_handler.go
View file @
9a92fa4a
...
@@ -541,6 +541,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -541,6 +541,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
return
return
}
}
reqModel
:=
modelResult
.
String
()
reqModel
:=
modelResult
.
String
()
routingModel
:=
service
.
NormalizeOpenAICompatRequestedModel
(
reqModel
)
reqStream
:=
gjson
.
GetBytes
(
body
,
"stream"
)
.
Bool
()
reqStream
:=
gjson
.
GetBytes
(
body
,
"stream"
)
.
Bool
()
reqLog
=
reqLog
.
With
(
zap
.
String
(
"model"
,
reqModel
),
zap
.
Bool
(
"stream"
,
reqStream
))
reqLog
=
reqLog
.
With
(
zap
.
String
(
"model"
,
reqModel
),
zap
.
Bool
(
"stream"
,
reqStream
))
...
@@ -606,7 +607,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -606,7 +607,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
apiKey
.
GroupID
,
apiKey
.
GroupID
,
""
,
// no previous_response_id
""
,
// no previous_response_id
sessionHash
,
sessionHash
,
r
eq
Model
,
r
outing
Model
,
failedAccountIDs
,
failedAccountIDs
,
service
.
OpenAIUpstreamTransportAny
,
service
.
OpenAIUpstreamTransportAny
,
)
)
...
@@ -621,7 +622,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
...
@@ -621,7 +622,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
if
apiKey
.
Group
!=
nil
{
if
apiKey
.
Group
!=
nil
{
defaultModel
=
apiKey
.
Group
.
DefaultMappedModel
defaultModel
=
apiKey
.
Group
.
DefaultMappedModel
}
}
if
defaultModel
!=
""
&&
defaultModel
!=
r
eq
Model
{
if
defaultModel
!=
""
&&
defaultModel
!=
r
outing
Model
{
reqLog
.
Info
(
"openai_messages.fallback_to_default_model"
,
reqLog
.
Info
(
"openai_messages.fallback_to_default_model"
,
zap
.
String
(
"default_mapped_model"
,
defaultModel
),
zap
.
String
(
"default_mapped_model"
,
defaultModel
),
)
)
...
...
backend/internal/service/openai_compat_model.go
0 → 100644
View file @
9a92fa4a
package
service
import
(
"strings"
"github.com/Wei-Shaw/sub2api/internal/pkg/apicompat"
)
func
NormalizeOpenAICompatRequestedModel
(
model
string
)
string
{
trimmed
:=
strings
.
TrimSpace
(
model
)
if
trimmed
==
""
{
return
""
}
normalized
,
_
,
ok
:=
splitOpenAICompatReasoningModel
(
trimmed
)
if
!
ok
||
normalized
==
""
{
return
trimmed
}
return
normalized
}
func
applyOpenAICompatModelNormalization
(
req
*
apicompat
.
AnthropicRequest
)
{
if
req
==
nil
{
return
}
originalModel
:=
strings
.
TrimSpace
(
req
.
Model
)
if
originalModel
==
""
{
return
}
normalizedModel
,
derivedEffort
,
hasReasoningSuffix
:=
splitOpenAICompatReasoningModel
(
originalModel
)
if
hasReasoningSuffix
&&
normalizedModel
!=
""
{
req
.
Model
=
normalizedModel
}
if
req
.
OutputConfig
!=
nil
&&
strings
.
TrimSpace
(
req
.
OutputConfig
.
Effort
)
!=
""
{
return
}
claudeEffort
:=
openAIReasoningEffortToClaudeOutputEffort
(
derivedEffort
)
if
claudeEffort
==
""
{
return
}
if
req
.
OutputConfig
==
nil
{
req
.
OutputConfig
=
&
apicompat
.
AnthropicOutputConfig
{}
}
req
.
OutputConfig
.
Effort
=
claudeEffort
}
func
splitOpenAICompatReasoningModel
(
model
string
)
(
normalizedModel
string
,
reasoningEffort
string
,
ok
bool
)
{
trimmed
:=
strings
.
TrimSpace
(
model
)
if
trimmed
==
""
{
return
""
,
""
,
false
}
modelID
:=
trimmed
if
strings
.
Contains
(
modelID
,
"/"
)
{
parts
:=
strings
.
Split
(
modelID
,
"/"
)
modelID
=
parts
[
len
(
parts
)
-
1
]
}
modelID
=
strings
.
TrimSpace
(
modelID
)
if
!
strings
.
HasPrefix
(
strings
.
ToLower
(
modelID
),
"gpt-"
)
{
return
trimmed
,
""
,
false
}
parts
:=
strings
.
FieldsFunc
(
strings
.
ToLower
(
modelID
),
func
(
r
rune
)
bool
{
switch
r
{
case
'-'
,
'_'
,
' '
:
return
true
default
:
return
false
}
})
if
len
(
parts
)
==
0
{
return
trimmed
,
""
,
false
}
last
:=
strings
.
NewReplacer
(
"-"
,
""
,
"_"
,
""
,
" "
,
""
)
.
Replace
(
parts
[
len
(
parts
)
-
1
])
switch
last
{
case
"none"
,
"minimal"
:
case
"low"
,
"medium"
,
"high"
:
reasoningEffort
=
last
case
"xhigh"
,
"extrahigh"
:
reasoningEffort
=
"xhigh"
default
:
return
trimmed
,
""
,
false
}
return
normalizeCodexModel
(
modelID
),
reasoningEffort
,
true
}
func
openAIReasoningEffortToClaudeOutputEffort
(
effort
string
)
string
{
switch
strings
.
TrimSpace
(
effort
)
{
case
"low"
,
"medium"
,
"high"
:
return
effort
case
"xhigh"
:
return
"max"
default
:
return
""
}
}
backend/internal/service/openai_compat_model_test.go
0 → 100644
View file @
9a92fa4a
package
service
import
(
"bytes"
"context"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/Wei-Shaw/sub2api/internal/pkg/apicompat"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
)
func
TestNormalizeOpenAICompatRequestedModel
(
t
*
testing
.
T
)
{
t
.
Parallel
()
tests
:=
[]
struct
{
name
string
input
string
want
string
}{
{
name
:
"gpt reasoning alias strips xhigh"
,
input
:
"gpt-5.4-xhigh"
,
want
:
"gpt-5.4"
},
{
name
:
"gpt reasoning alias strips none"
,
input
:
"gpt-5.4-none"
,
want
:
"gpt-5.4"
},
{
name
:
"codex max model stays intact"
,
input
:
"gpt-5.1-codex-max"
,
want
:
"gpt-5.1-codex-max"
},
{
name
:
"non openai model unchanged"
,
input
:
"claude-opus-4-6"
,
want
:
"claude-opus-4-6"
},
}
for
_
,
tt
:=
range
tests
{
t
.
Run
(
tt
.
name
,
func
(
t
*
testing
.
T
)
{
require
.
Equal
(
t
,
tt
.
want
,
NormalizeOpenAICompatRequestedModel
(
tt
.
input
))
})
}
}
func
TestApplyOpenAICompatModelNormalization
(
t
*
testing
.
T
)
{
t
.
Parallel
()
t
.
Run
(
"derives xhigh from model suffix when output config missing"
,
func
(
t
*
testing
.
T
)
{
req
:=
&
apicompat
.
AnthropicRequest
{
Model
:
"gpt-5.4-xhigh"
}
applyOpenAICompatModelNormalization
(
req
)
require
.
Equal
(
t
,
"gpt-5.4"
,
req
.
Model
)
require
.
NotNil
(
t
,
req
.
OutputConfig
)
require
.
Equal
(
t
,
"max"
,
req
.
OutputConfig
.
Effort
)
})
t
.
Run
(
"explicit output config wins over model suffix"
,
func
(
t
*
testing
.
T
)
{
req
:=
&
apicompat
.
AnthropicRequest
{
Model
:
"gpt-5.4-xhigh"
,
OutputConfig
:
&
apicompat
.
AnthropicOutputConfig
{
Effort
:
"low"
},
}
applyOpenAICompatModelNormalization
(
req
)
require
.
Equal
(
t
,
"gpt-5.4"
,
req
.
Model
)
require
.
NotNil
(
t
,
req
.
OutputConfig
)
require
.
Equal
(
t
,
"low"
,
req
.
OutputConfig
.
Effort
)
})
t
.
Run
(
"non openai model is untouched"
,
func
(
t
*
testing
.
T
)
{
req
:=
&
apicompat
.
AnthropicRequest
{
Model
:
"claude-opus-4-6"
}
applyOpenAICompatModelNormalization
(
req
)
require
.
Equal
(
t
,
"claude-opus-4-6"
,
req
.
Model
)
require
.
Nil
(
t
,
req
.
OutputConfig
)
})
}
func
TestForwardAsAnthropic_NormalizesRoutingAndEffortForGpt54XHigh
(
t
*
testing
.
T
)
{
t
.
Parallel
()
gin
.
SetMode
(
gin
.
TestMode
)
rec
:=
httptest
.
NewRecorder
()
c
,
_
:=
gin
.
CreateTestContext
(
rec
)
body
:=
[]
byte
(
`{"model":"gpt-5.4-xhigh","max_tokens":16,"messages":[{"role":"user","content":"hello"}],"stream":false}`
)
c
.
Request
=
httptest
.
NewRequest
(
http
.
MethodPost
,
"/v1/messages"
,
bytes
.
NewReader
(
body
))
c
.
Request
.
Header
.
Set
(
"Content-Type"
,
"application/json"
)
upstreamBody
:=
strings
.
Join
([]
string
{
`data: {"type":"response.completed","response":{"id":"resp_1","object":"response","model":"gpt-5.4","status":"completed","output":[{"type":"message","id":"msg_1","role":"assistant","status":"completed","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":5,"output_tokens":2,"total_tokens":7}}}`
,
""
,
"data: [DONE]"
,
""
,
},
"
\n
"
)
upstream
:=
&
httpUpstreamRecorder
{
resp
:
&
http
.
Response
{
StatusCode
:
http
.
StatusOK
,
Header
:
http
.
Header
{
"Content-Type"
:
[]
string
{
"text/event-stream"
},
"x-request-id"
:
[]
string
{
"rid_compat"
}},
Body
:
io
.
NopCloser
(
strings
.
NewReader
(
upstreamBody
)),
}}
svc
:=
&
OpenAIGatewayService
{
httpUpstream
:
upstream
}
account
:=
&
Account
{
ID
:
1
,
Name
:
"openai-oauth"
,
Platform
:
PlatformOpenAI
,
Type
:
AccountTypeOAuth
,
Concurrency
:
1
,
Credentials
:
map
[
string
]
any
{
"access_token"
:
"oauth-token"
,
"chatgpt_account_id"
:
"chatgpt-acc"
,
"model_mapping"
:
map
[
string
]
any
{
"gpt-5.4"
:
"gpt-5.4"
,
},
},
}
result
,
err
:=
svc
.
ForwardAsAnthropic
(
context
.
Background
(),
c
,
account
,
body
,
""
,
"gpt-5.1"
)
require
.
NoError
(
t
,
err
)
require
.
NotNil
(
t
,
result
)
require
.
Equal
(
t
,
"gpt-5.4-xhigh"
,
result
.
Model
)
require
.
Equal
(
t
,
"gpt-5.4"
,
result
.
UpstreamModel
)
require
.
Equal
(
t
,
"gpt-5.4"
,
result
.
BillingModel
)
require
.
NotNil
(
t
,
result
.
ReasoningEffort
)
require
.
Equal
(
t
,
"xhigh"
,
*
result
.
ReasoningEffort
)
require
.
Equal
(
t
,
"gpt-5.4"
,
gjson
.
GetBytes
(
upstream
.
lastBody
,
"model"
)
.
String
())
require
.
Equal
(
t
,
"xhigh"
,
gjson
.
GetBytes
(
upstream
.
lastBody
,
"reasoning.effort"
)
.
String
())
require
.
Equal
(
t
,
http
.
StatusOK
,
rec
.
Code
)
require
.
Equal
(
t
,
"gpt-5.4-xhigh"
,
gjson
.
GetBytes
(
rec
.
Body
.
Bytes
(),
"model"
)
.
String
())
require
.
Equal
(
t
,
"ok"
,
gjson
.
GetBytes
(
rec
.
Body
.
Bytes
(),
"content.0.text"
)
.
String
())
t
.
Logf
(
"upstream body: %s"
,
string
(
upstream
.
lastBody
))
t
.
Logf
(
"response body: %s"
,
rec
.
Body
.
String
())
}
backend/internal/service/openai_gateway_messages.go
View file @
9a92fa4a
...
@@ -40,6 +40,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
...
@@ -40,6 +40,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
return
nil
,
fmt
.
Errorf
(
"parse anthropic request: %w"
,
err
)
return
nil
,
fmt
.
Errorf
(
"parse anthropic request: %w"
,
err
)
}
}
originalModel
:=
anthropicReq
.
Model
originalModel
:=
anthropicReq
.
Model
applyOpenAICompatModelNormalization
(
&
anthropicReq
)
clientStream
:=
anthropicReq
.
Stream
// client's original stream preference
clientStream
:=
anthropicReq
.
Stream
// client's original stream preference
// 2. Convert Anthropic → Responses
// 2. Convert Anthropic → Responses
...
@@ -59,7 +60,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
...
@@ -59,7 +60,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
}
}
// 3. Model mapping
// 3. Model mapping
mappedModel
:=
resolveOpenAIForwardModel
(
account
,
original
Model
,
defaultMappedModel
)
mappedModel
:=
resolveOpenAIForwardModel
(
account
,
anthropicReq
.
Model
,
defaultMappedModel
)
responsesReq
.
Model
=
mappedModel
responsesReq
.
Model
=
mappedModel
logger
.
L
()
.
Debug
(
"openai messages: model mapping applied"
,
logger
.
L
()
.
Debug
(
"openai messages: model mapping applied"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment