Unverified Commit a225a241 authored by Wesley Liddick's avatar Wesley Liddick Committed by GitHub
Browse files

Merge pull request #1162 from remxcode/main

feat(openai): 增加 gpt-5.4-mini/nano 模型支持与定价配置
parents 553a486d 578608d3
...@@ -16,6 +16,8 @@ type Model struct { ...@@ -16,6 +16,8 @@ type Model struct {
// DefaultModels OpenAI models list // DefaultModels OpenAI models list
var DefaultModels = []Model{ var DefaultModels = []Model{
{ID: "gpt-5.4", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4"}, {ID: "gpt-5.4", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4"},
{ID: "gpt-5.4-mini", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4 Mini"},
{ID: "gpt-5.4-nano", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4 Nano"},
{ID: "gpt-5.3-codex", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex"}, {ID: "gpt-5.3-codex", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex"},
{ID: "gpt-5.3-codex-spark", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex Spark"}, {ID: "gpt-5.3-codex-spark", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex Spark"},
{ID: "gpt-5.2", Object: "model", Created: 1733875200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2"}, {ID: "gpt-5.2", Object: "model", Created: 1733875200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2"},
......
...@@ -221,6 +221,18 @@ func (s *BillingService) initFallbackPricing() { ...@@ -221,6 +221,18 @@ func (s *BillingService) initFallbackPricing() {
LongContextInputMultiplier: openAIGPT54LongContextInputMultiplier, LongContextInputMultiplier: openAIGPT54LongContextInputMultiplier,
LongContextOutputMultiplier: openAIGPT54LongContextOutputMultiplier, LongContextOutputMultiplier: openAIGPT54LongContextOutputMultiplier,
} }
s.fallbackPrices["gpt-5.4-mini"] = &ModelPricing{
InputPricePerToken: 7.5e-7,
OutputPricePerToken: 4.5e-6,
CacheReadPricePerToken: 7.5e-8,
SupportsCacheBreakdown: false,
}
s.fallbackPrices["gpt-5.4-nano"] = &ModelPricing{
InputPricePerToken: 2e-7,
OutputPricePerToken: 1.25e-6,
CacheReadPricePerToken: 2e-8,
SupportsCacheBreakdown: false,
}
// OpenAI GPT-5.2(本地兜底) // OpenAI GPT-5.2(本地兜底)
s.fallbackPrices["gpt-5.2"] = &ModelPricing{ s.fallbackPrices["gpt-5.2"] = &ModelPricing{
InputPricePerToken: 1.75e-6, InputPricePerToken: 1.75e-6,
...@@ -294,6 +306,10 @@ func (s *BillingService) getFallbackPricing(model string) *ModelPricing { ...@@ -294,6 +306,10 @@ func (s *BillingService) getFallbackPricing(model string) *ModelPricing {
if strings.Contains(modelLower, "gpt-5") || strings.Contains(modelLower, "codex") { if strings.Contains(modelLower, "gpt-5") || strings.Contains(modelLower, "codex") {
normalized := normalizeCodexModel(modelLower) normalized := normalizeCodexModel(modelLower)
switch normalized { switch normalized {
case "gpt-5.4-mini":
return s.fallbackPrices["gpt-5.4-mini"]
case "gpt-5.4-nano":
return s.fallbackPrices["gpt-5.4-nano"]
case "gpt-5.4": case "gpt-5.4":
return s.fallbackPrices["gpt-5.4"] return s.fallbackPrices["gpt-5.4"]
case "gpt-5.2": case "gpt-5.2":
......
...@@ -174,6 +174,30 @@ func TestGetModelPricing_OpenAIGPT54Fallback(t *testing.T) { ...@@ -174,6 +174,30 @@ func TestGetModelPricing_OpenAIGPT54Fallback(t *testing.T) {
require.InDelta(t, 1.5, pricing.LongContextOutputMultiplier, 1e-12) require.InDelta(t, 1.5, pricing.LongContextOutputMultiplier, 1e-12)
} }
func TestGetModelPricing_OpenAIGPT54MiniFallback(t *testing.T) {
svc := newTestBillingService()
pricing, err := svc.GetModelPricing("gpt-5.4-mini")
require.NoError(t, err)
require.NotNil(t, pricing)
require.InDelta(t, 7.5e-7, pricing.InputPricePerToken, 1e-12)
require.InDelta(t, 4.5e-6, pricing.OutputPricePerToken, 1e-12)
require.InDelta(t, 7.5e-8, pricing.CacheReadPricePerToken, 1e-12)
require.Zero(t, pricing.LongContextInputThreshold)
}
func TestGetModelPricing_OpenAIGPT54NanoFallback(t *testing.T) {
svc := newTestBillingService()
pricing, err := svc.GetModelPricing("gpt-5.4-nano")
require.NoError(t, err)
require.NotNil(t, pricing)
require.InDelta(t, 2e-7, pricing.InputPricePerToken, 1e-12)
require.InDelta(t, 1.25e-6, pricing.OutputPricePerToken, 1e-12)
require.InDelta(t, 2e-8, pricing.CacheReadPricePerToken, 1e-12)
require.Zero(t, pricing.LongContextInputThreshold)
}
func TestCalculateCost_OpenAIGPT54LongContextAppliesWholeSessionMultipliers(t *testing.T) { func TestCalculateCost_OpenAIGPT54LongContextAppliesWholeSessionMultipliers(t *testing.T) {
svc := newTestBillingService() svc := newTestBillingService()
...@@ -210,6 +234,8 @@ func TestGetFallbackPricing_FamilyMatching(t *testing.T) { ...@@ -210,6 +234,8 @@ func TestGetFallbackPricing_FamilyMatching(t *testing.T) {
{name: "gemini unknown no fallback", model: "gemini-2.0-pro", expectNilPricing: true}, {name: "gemini unknown no fallback", model: "gemini-2.0-pro", expectNilPricing: true},
{name: "openai gpt5.1", model: "gpt-5.1", expectedInput: 1.25e-6}, {name: "openai gpt5.1", model: "gpt-5.1", expectedInput: 1.25e-6},
{name: "openai gpt5.4", model: "gpt-5.4", expectedInput: 2.5e-6}, {name: "openai gpt5.4", model: "gpt-5.4", expectedInput: 2.5e-6},
{name: "openai gpt5.4 mini", model: "gpt-5.4-mini", expectedInput: 7.5e-7},
{name: "openai gpt5.4 nano", model: "gpt-5.4-nano", expectedInput: 2e-7},
{name: "openai gpt5.3 codex", model: "gpt-5.3-codex", expectedInput: 1.5e-6}, {name: "openai gpt5.3 codex", model: "gpt-5.3-codex", expectedInput: 1.5e-6},
{name: "openai gpt5.1 codex max alias", model: "gpt-5.1-codex-max", expectedInput: 1.5e-6}, {name: "openai gpt5.1 codex max alias", model: "gpt-5.1-codex-max", expectedInput: 1.5e-6},
{name: "openai codex mini latest alias", model: "codex-mini-latest", expectedInput: 1.5e-6}, {name: "openai codex mini latest alias", model: "codex-mini-latest", expectedInput: 1.5e-6},
...@@ -564,6 +590,40 @@ func TestCalculateCostWithServiceTier_FlexAppliesHalfMultiplier(t *testing.T) { ...@@ -564,6 +590,40 @@ func TestCalculateCostWithServiceTier_FlexAppliesHalfMultiplier(t *testing.T) {
require.InDelta(t, baseCost.TotalCost*0.5, flexCost.TotalCost, 1e-10) require.InDelta(t, baseCost.TotalCost*0.5, flexCost.TotalCost, 1e-10)
} }
func TestCalculateCostWithServiceTier_Gpt54MiniPriorityFallsBackToTierMultiplier(t *testing.T) {
svc := newTestBillingService()
tokens := UsageTokens{InputTokens: 120, OutputTokens: 30, CacheCreationTokens: 12, CacheReadTokens: 8}
baseCost, err := svc.CalculateCost("gpt-5.4-mini", tokens, 1.0)
require.NoError(t, err)
priorityCost, err := svc.CalculateCostWithServiceTier("gpt-5.4-mini", tokens, 1.0, "priority")
require.NoError(t, err)
require.InDelta(t, baseCost.InputCost*2, priorityCost.InputCost, 1e-10)
require.InDelta(t, baseCost.OutputCost*2, priorityCost.OutputCost, 1e-10)
require.InDelta(t, baseCost.CacheCreationCost*2, priorityCost.CacheCreationCost, 1e-10)
require.InDelta(t, baseCost.CacheReadCost*2, priorityCost.CacheReadCost, 1e-10)
require.InDelta(t, baseCost.TotalCost*2, priorityCost.TotalCost, 1e-10)
}
func TestCalculateCostWithServiceTier_Gpt54NanoFlexAppliesHalfMultiplier(t *testing.T) {
svc := newTestBillingService()
tokens := UsageTokens{InputTokens: 100, OutputTokens: 50, CacheCreationTokens: 40, CacheReadTokens: 20}
baseCost, err := svc.CalculateCost("gpt-5.4-nano", tokens, 1.0)
require.NoError(t, err)
flexCost, err := svc.CalculateCostWithServiceTier("gpt-5.4-nano", tokens, 1.0, "flex")
require.NoError(t, err)
require.InDelta(t, baseCost.InputCost*0.5, flexCost.InputCost, 1e-10)
require.InDelta(t, baseCost.OutputCost*0.5, flexCost.OutputCost, 1e-10)
require.InDelta(t, baseCost.CacheCreationCost*0.5, flexCost.CacheCreationCost, 1e-10)
require.InDelta(t, baseCost.CacheReadCost*0.5, flexCost.CacheReadCost, 1e-10)
require.InDelta(t, baseCost.TotalCost*0.5, flexCost.TotalCost, 1e-10)
}
func TestCalculateCostWithServiceTier_PriorityFallsBackToTierMultiplierWithoutExplicitPriorityPrice(t *testing.T) { func TestCalculateCostWithServiceTier_PriorityFallsBackToTierMultiplierWithoutExplicitPriorityPrice(t *testing.T) {
svc := newTestBillingService() svc := newTestBillingService()
tokens := UsageTokens{InputTokens: 120, OutputTokens: 30, CacheCreationTokens: 12, CacheReadTokens: 8} tokens := UsageTokens{InputTokens: 120, OutputTokens: 30, CacheCreationTokens: 12, CacheReadTokens: 8}
......
...@@ -7,6 +7,8 @@ import ( ...@@ -7,6 +7,8 @@ import (
var codexModelMap = map[string]string{ var codexModelMap = map[string]string{
"gpt-5.4": "gpt-5.4", "gpt-5.4": "gpt-5.4",
"gpt-5.4-mini": "gpt-5.4-mini",
"gpt-5.4-nano": "gpt-5.4-nano",
"gpt-5.4-none": "gpt-5.4", "gpt-5.4-none": "gpt-5.4",
"gpt-5.4-low": "gpt-5.4", "gpt-5.4-low": "gpt-5.4",
"gpt-5.4-medium": "gpt-5.4", "gpt-5.4-medium": "gpt-5.4",
...@@ -225,6 +227,12 @@ func normalizeCodexModel(model string) string { ...@@ -225,6 +227,12 @@ func normalizeCodexModel(model string) string {
normalized := strings.ToLower(modelID) normalized := strings.ToLower(modelID)
if strings.Contains(normalized, "gpt-5.4-mini") || strings.Contains(normalized, "gpt 5.4 mini") {
return "gpt-5.4-mini"
}
if strings.Contains(normalized, "gpt-5.4-nano") || strings.Contains(normalized, "gpt 5.4 nano") {
return "gpt-5.4-nano"
}
if strings.Contains(normalized, "gpt-5.4") || strings.Contains(normalized, "gpt 5.4") { if strings.Contains(normalized, "gpt-5.4") || strings.Contains(normalized, "gpt 5.4") {
return "gpt-5.4" return "gpt-5.4"
} }
......
...@@ -238,6 +238,10 @@ func TestNormalizeCodexModel_Gpt53(t *testing.T) { ...@@ -238,6 +238,10 @@ func TestNormalizeCodexModel_Gpt53(t *testing.T) {
"gpt-5.4-high": "gpt-5.4", "gpt-5.4-high": "gpt-5.4",
"gpt-5.4-chat-latest": "gpt-5.4", "gpt-5.4-chat-latest": "gpt-5.4",
"gpt 5.4": "gpt-5.4", "gpt 5.4": "gpt-5.4",
"gpt-5.4-mini": "gpt-5.4-mini",
"gpt 5.4 mini": "gpt-5.4-mini",
"gpt-5.4-nano": "gpt-5.4-nano",
"gpt 5.4 nano": "gpt-5.4-nano",
"gpt-5.3": "gpt-5.3-codex", "gpt-5.3": "gpt-5.3-codex",
"gpt-5.3-codex": "gpt-5.3-codex", "gpt-5.3-codex": "gpt-5.3-codex",
"gpt-5.3-codex-xhigh": "gpt-5.3-codex", "gpt-5.3-codex-xhigh": "gpt-5.3-codex",
......
...@@ -34,6 +34,22 @@ var ( ...@@ -34,6 +34,22 @@ var (
Mode: "chat", Mode: "chat",
SupportsPromptCaching: true, SupportsPromptCaching: true,
} }
openAIGPT54MiniFallbackPricing = &LiteLLMModelPricing{
InputCostPerToken: 7.5e-07,
OutputCostPerToken: 4.5e-06,
CacheReadInputTokenCost: 7.5e-08,
LiteLLMProvider: "openai",
Mode: "chat",
SupportsPromptCaching: true,
}
openAIGPT54NanoFallbackPricing = &LiteLLMModelPricing{
InputCostPerToken: 2e-07,
OutputCostPerToken: 1.25e-06,
CacheReadInputTokenCost: 2e-08,
LiteLLMProvider: "openai",
Mode: "chat",
SupportsPromptCaching: true,
}
) )
// LiteLLMModelPricing LiteLLM价格数据结构 // LiteLLMModelPricing LiteLLM价格数据结构
...@@ -723,6 +739,18 @@ func (s *PricingService) matchOpenAIModel(model string) *LiteLLMModelPricing { ...@@ -723,6 +739,18 @@ func (s *PricingService) matchOpenAIModel(model string) *LiteLLMModelPricing {
} }
} }
if strings.HasPrefix(model, "gpt-5.4-mini") {
logger.With(zap.String("component", "service.pricing")).
Info(fmt.Sprintf("[Pricing] OpenAI fallback matched %s -> %s", model, "gpt-5.4-mini(static)"))
return openAIGPT54MiniFallbackPricing
}
if strings.HasPrefix(model, "gpt-5.4-nano") {
logger.With(zap.String("component", "service.pricing")).
Info(fmt.Sprintf("[Pricing] OpenAI fallback matched %s -> %s", model, "gpt-5.4-nano(static)"))
return openAIGPT54NanoFallbackPricing
}
if strings.HasPrefix(model, "gpt-5.4") { if strings.HasPrefix(model, "gpt-5.4") {
logger.With(zap.String("component", "service.pricing")). logger.With(zap.String("component", "service.pricing")).
Info(fmt.Sprintf("[Pricing] OpenAI fallback matched %s -> %s", model, "gpt-5.4(static)")) Info(fmt.Sprintf("[Pricing] OpenAI fallback matched %s -> %s", model, "gpt-5.4(static)"))
......
...@@ -98,6 +98,36 @@ func TestGetModelPricing_Gpt54UsesStaticFallbackWhenRemoteMissing(t *testing.T) ...@@ -98,6 +98,36 @@ func TestGetModelPricing_Gpt54UsesStaticFallbackWhenRemoteMissing(t *testing.T)
require.InDelta(t, 1.5, got.LongContextOutputCostMultiplier, 1e-12) require.InDelta(t, 1.5, got.LongContextOutputCostMultiplier, 1e-12)
} }
func TestGetModelPricing_Gpt54MiniUsesDedicatedStaticFallbackWhenRemoteMissing(t *testing.T) {
svc := &PricingService{
pricingData: map[string]*LiteLLMModelPricing{
"gpt-5.1-codex": {InputCostPerToken: 1.25e-6},
},
}
got := svc.GetModelPricing("gpt-5.4-mini")
require.NotNil(t, got)
require.InDelta(t, 7.5e-7, got.InputCostPerToken, 1e-12)
require.InDelta(t, 4.5e-6, got.OutputCostPerToken, 1e-12)
require.InDelta(t, 7.5e-8, got.CacheReadInputTokenCost, 1e-12)
require.Zero(t, got.LongContextInputTokenThreshold)
}
func TestGetModelPricing_Gpt54NanoUsesDedicatedStaticFallbackWhenRemoteMissing(t *testing.T) {
svc := &PricingService{
pricingData: map[string]*LiteLLMModelPricing{
"gpt-5.1-codex": {InputCostPerToken: 1.25e-6},
},
}
got := svc.GetModelPricing("gpt-5.4-nano")
require.NotNil(t, got)
require.InDelta(t, 2e-7, got.InputCostPerToken, 1e-12)
require.InDelta(t, 1.25e-6, got.OutputCostPerToken, 1e-12)
require.InDelta(t, 2e-8, got.CacheReadInputTokenCost, 1e-12)
require.Zero(t, got.LongContextInputTokenThreshold)
}
func TestParsePricingData_PreservesPriorityAndServiceTierFields(t *testing.T) { func TestParsePricingData_PreservesPriorityAndServiceTierFields(t *testing.T) {
raw := map[string]any{ raw := map[string]any{
"gpt-5.4": map[string]any{ "gpt-5.4": map[string]any{
......
...@@ -5173,6 +5173,71 @@ ...@@ -5173,6 +5173,71 @@
"supports_tool_choice": true, "supports_tool_choice": true,
"supports_vision": true "supports_vision": true
}, },
"gpt-5.4-mini": {
"cache_read_input_token_cost": 7.5e-08,
"input_cost_per_token": 7.5e-07,
"litellm_provider": "openai",
"max_input_tokens": 400000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 4.5e-06,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_service_tier": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"gpt-5.4-nano": {
"cache_read_input_token_cost": 2e-08,
"input_cost_per_token": 2e-07,
"litellm_provider": "openai",
"max_input_tokens": 400000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1.25e-06,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"gpt-5.3-codex": { "gpt-5.3-codex": {
"cache_read_input_token_cost": 1.75e-07, "cache_read_input_token_cost": 1.75e-07,
"cache_read_input_token_cost_priority": 3.5e-07, "cache_read_input_token_cost_priority": 3.5e-07,
......
...@@ -709,6 +709,38 @@ function generateOpenCodeConfig(platform: string, baseUrl: string, apiKey: strin ...@@ -709,6 +709,38 @@ function generateOpenCodeConfig(platform: string, baseUrl: string, apiKey: strin
xhigh: {} xhigh: {}
} }
}, },
'gpt-5.4-mini': {
name: 'GPT-5.4 Mini',
limit: {
context: 400000,
output: 128000
},
options: {
store: false
},
variants: {
low: {},
medium: {},
high: {},
xhigh: {}
}
},
'gpt-5.4-nano': {
name: 'GPT-5.4 Nano',
limit: {
context: 400000,
output: 128000
},
options: {
store: false
},
variants: {
low: {},
medium: {},
high: {},
xhigh: {}
}
},
'gpt-5.3-codex-spark': { 'gpt-5.3-codex-spark': {
name: 'GPT-5.3 Codex Spark', name: 'GPT-5.3 Codex Spark',
limit: { limit: {
......
import { describe, expect, it, vi } from 'vitest'
import { mount } from '@vue/test-utils'
import { nextTick } from 'vue'
vi.mock('vue-i18n', () => ({
useI18n: () => ({
t: (key: string) => key
})
}))
vi.mock('@/composables/useClipboard', () => ({
useClipboard: () => ({
copyToClipboard: vi.fn().mockResolvedValue(true)
})
}))
import UseKeyModal from '../UseKeyModal.vue'
describe('UseKeyModal', () => {
it('renders updated GPT-5.4 mini/nano names in OpenCode config', async () => {
const wrapper = mount(UseKeyModal, {
props: {
show: true,
apiKey: 'sk-test',
baseUrl: 'https://example.com/v1',
platform: 'openai'
},
global: {
stubs: {
BaseDialog: {
template: '<div><slot /><slot name="footer" /></div>'
},
Icon: {
template: '<span />'
}
}
}
})
const opencodeTab = wrapper.findAll('button').find((button) =>
button.text().includes('keys.useKeyModal.cliTabs.opencode')
)
expect(opencodeTab).toBeDefined()
await opencodeTab!.trigger('click')
await nextTick()
const codeBlock = wrapper.find('pre code')
expect(codeBlock.exists()).toBe(true)
expect(codeBlock.text()).toContain('"name": "GPT-5.4 Mini"')
expect(codeBlock.text()).toContain('"name": "GPT-5.4 Nano"')
})
})
...@@ -11,6 +11,8 @@ describe('useModelWhitelist', () => { ...@@ -11,6 +11,8 @@ describe('useModelWhitelist', () => {
const models = getModelsByPlatform('openai') const models = getModelsByPlatform('openai')
expect(models).toContain('gpt-5.4') expect(models).toContain('gpt-5.4')
expect(models).toContain('gpt-5.4-mini')
expect(models).toContain('gpt-5.4-nano')
expect(models).toContain('gpt-5.4-2026-03-05') expect(models).toContain('gpt-5.4-2026-03-05')
}) })
...@@ -52,4 +54,13 @@ describe('useModelWhitelist', () => { ...@@ -52,4 +54,13 @@ describe('useModelWhitelist', () => {
'gpt-5.4-2026-03-05': 'gpt-5.4-2026-03-05' 'gpt-5.4-2026-03-05': 'gpt-5.4-2026-03-05'
}) })
}) })
it('whitelist keeps GPT-5.4 mini and nano exact mappings', () => {
const mapping = buildModelMappingObject('whitelist', ['gpt-5.4-mini', 'gpt-5.4-nano'], [])
expect(mapping).toEqual({
'gpt-5.4-mini': 'gpt-5.4-mini',
'gpt-5.4-nano': 'gpt-5.4-nano'
})
})
}) })
...@@ -25,7 +25,7 @@ const openaiModels = [ ...@@ -25,7 +25,7 @@ const openaiModels = [
'gpt-5.2', 'gpt-5.2-2025-12-11', 'gpt-5.2-chat-latest', 'gpt-5.2', 'gpt-5.2-2025-12-11', 'gpt-5.2-chat-latest',
'gpt-5.2-codex', 'gpt-5.2-pro', 'gpt-5.2-pro-2025-12-11', 'gpt-5.2-codex', 'gpt-5.2-pro', 'gpt-5.2-pro-2025-12-11',
// GPT-5.4 系列 // GPT-5.4 系列
'gpt-5.4', 'gpt-5.4-2026-03-05', 'gpt-5.4', 'gpt-5.4-mini', 'gpt-5.4-nano', 'gpt-5.4-2026-03-05',
// GPT-5.3 系列 // GPT-5.3 系列
'gpt-5.3-codex', 'gpt-5.3-codex-spark', 'gpt-5.3-codex', 'gpt-5.3-codex-spark',
'chatgpt-4o-latest', 'chatgpt-4o-latest',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment