Unverified Commit bbc79796 authored by Wesley Liddick's avatar Wesley Liddick Committed by GitHub
Browse files

Merge pull request #1529 from IanShaw027/feat/group-messages-dispatch-redo

feat: 为openai分组增加messages调度模型映射并支持instructions模板注入
parents 760cc7d6 7d008bd5
......@@ -3,8 +3,12 @@ package service
import (
"strings"
"time"
"github.com/Wei-Shaw/sub2api/internal/domain"
)
type OpenAIMessagesDispatchModelConfig = domain.OpenAIMessagesDispatchModelConfig
type Group struct {
ID int64
Name string
......@@ -49,10 +53,11 @@ type Group struct {
SortOrder int
// OpenAI Messages 调度配置(仅 openai 平台使用)
AllowMessagesDispatch bool
RequireOAuthOnly bool // 仅允许非 apikey 类型账号关联(OpenAI/Antigravity/Anthropic/Gemini)
RequirePrivacySet bool // 调度时仅允许 privacy 已成功设置的账号(OpenAI/Antigravity/Anthropic/Gemini)
DefaultMappedModel string
AllowMessagesDispatch bool
RequireOAuthOnly bool // 仅允许非 apikey 类型账号关联(OpenAI/Antigravity/Anthropic/Gemini)
RequirePrivacySet bool // 调度时仅允许 privacy 已成功设置的账号(OpenAI/Antigravity/Anthropic/Gemini)
DefaultMappedModel string
MessagesDispatchModelConfig OpenAIMessagesDispatchModelConfig
CreatedAt time.Time
UpdatedAt time.Time
......
package service
import (
"bytes"
"fmt"
"strings"
"text/template"
)
type forcedCodexInstructionsTemplateData struct {
ExistingInstructions string
OriginalModel string
NormalizedModel string
BillingModel string
UpstreamModel string
}
func applyForcedCodexInstructionsTemplate(
reqBody map[string]any,
templateText string,
data forcedCodexInstructionsTemplateData,
) (bool, error) {
rendered, err := renderForcedCodexInstructionsTemplate(templateText, data)
if err != nil {
return false, err
}
if rendered == "" {
return false, nil
}
existing, _ := reqBody["instructions"].(string)
if strings.TrimSpace(existing) == rendered {
return false, nil
}
reqBody["instructions"] = rendered
return true, nil
}
func renderForcedCodexInstructionsTemplate(
templateText string,
data forcedCodexInstructionsTemplateData,
) (string, error) {
tmpl, err := template.New("forced_codex_instructions").Option("missingkey=zero").Parse(templateText)
if err != nil {
return "", fmt.Errorf("parse forced codex instructions template: %w", err)
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
return "", fmt.Errorf("render forced codex instructions template: %w", err)
}
return strings.TrimSpace(buf.String()), nil
}
......@@ -6,9 +6,12 @@ import (
"io"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/Wei-Shaw/sub2api/internal/config"
"github.com/Wei-Shaw/sub2api/internal/pkg/apicompat"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/require"
......@@ -127,3 +130,101 @@ func TestForwardAsAnthropic_NormalizesRoutingAndEffortForGpt54XHigh(t *testing.T
t.Logf("upstream body: %s", string(upstream.lastBody))
t.Logf("response body: %s", rec.Body.String())
}
func TestForwardAsAnthropic_ForcedCodexInstructionsTemplatePrependsRenderedInstructions(t *testing.T) {
t.Parallel()
gin.SetMode(gin.TestMode)
templateDir := t.TempDir()
templatePath := filepath.Join(templateDir, "codex-instructions.md.tmpl")
require.NoError(t, os.WriteFile(templatePath, []byte("server-prefix\n\n{{ .ExistingInstructions }}"), 0o644))
rec := httptest.NewRecorder()
c, _ := gin.CreateTestContext(rec)
body := []byte(`{"model":"gpt-5.4","max_tokens":16,"system":"client-system","messages":[{"role":"user","content":"hello"}],"stream":false}`)
c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(body))
c.Request.Header.Set("Content-Type", "application/json")
upstreamBody := strings.Join([]string{
`data: {"type":"response.completed","response":{"id":"resp_1","object":"response","model":"gpt-5.4","status":"completed","output":[{"type":"message","id":"msg_1","role":"assistant","status":"completed","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":5,"output_tokens":2,"total_tokens":7}}}`,
"",
"data: [DONE]",
"",
}, "\n")
upstream := &httpUpstreamRecorder{resp: &http.Response{
StatusCode: http.StatusOK,
Header: http.Header{"Content-Type": []string{"text/event-stream"}, "x-request-id": []string{"rid_forced"}},
Body: io.NopCloser(strings.NewReader(upstreamBody)),
}}
svc := &OpenAIGatewayService{
cfg: &config.Config{Gateway: config.GatewayConfig{
ForcedCodexInstructionsTemplateFile: templatePath,
ForcedCodexInstructionsTemplate: "server-prefix\n\n{{ .ExistingInstructions }}",
}},
httpUpstream: upstream,
}
account := &Account{
ID: 1,
Name: "openai-oauth",
Platform: PlatformOpenAI,
Type: AccountTypeOAuth,
Concurrency: 1,
Credentials: map[string]any{
"access_token": "oauth-token",
"chatgpt_account_id": "chatgpt-acc",
},
}
result, err := svc.ForwardAsAnthropic(context.Background(), c, account, body, "", "gpt-5.1")
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, "server-prefix\n\nclient-system", gjson.GetBytes(upstream.lastBody, "instructions").String())
}
func TestForwardAsAnthropic_ForcedCodexInstructionsTemplateUsesCachedTemplateContent(t *testing.T) {
t.Parallel()
gin.SetMode(gin.TestMode)
rec := httptest.NewRecorder()
c, _ := gin.CreateTestContext(rec)
body := []byte(`{"model":"gpt-5.4","max_tokens":16,"system":"client-system","messages":[{"role":"user","content":"hello"}],"stream":false}`)
c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(body))
c.Request.Header.Set("Content-Type", "application/json")
upstreamBody := strings.Join([]string{
`data: {"type":"response.completed","response":{"id":"resp_1","object":"response","model":"gpt-5.4","status":"completed","output":[{"type":"message","id":"msg_1","role":"assistant","status":"completed","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":5,"output_tokens":2,"total_tokens":7}}}`,
"",
"data: [DONE]",
"",
}, "\n")
upstream := &httpUpstreamRecorder{resp: &http.Response{
StatusCode: http.StatusOK,
Header: http.Header{"Content-Type": []string{"text/event-stream"}, "x-request-id": []string{"rid_forced_cached"}},
Body: io.NopCloser(strings.NewReader(upstreamBody)),
}}
svc := &OpenAIGatewayService{
cfg: &config.Config{Gateway: config.GatewayConfig{
ForcedCodexInstructionsTemplateFile: "/path/that/should/not/be/read.tmpl",
ForcedCodexInstructionsTemplate: "cached-prefix\n\n{{ .ExistingInstructions }}",
}},
httpUpstream: upstream,
}
account := &Account{
ID: 1,
Name: "openai-oauth",
Platform: PlatformOpenAI,
Type: AccountTypeOAuth,
Concurrency: 1,
Credentials: map[string]any{
"access_token": "oauth-token",
"chatgpt_account_id": "chatgpt-acc",
},
}
result, err := svc.ForwardAsAnthropic(context.Background(), c, account, body, "", "gpt-5.1")
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, "cached-prefix\n\nclient-system", gjson.GetBytes(upstream.lastBody, "instructions").String())
}
......@@ -86,6 +86,24 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
return nil, fmt.Errorf("unmarshal for codex transform: %w", err)
}
codexResult := applyCodexOAuthTransform(reqBody, false, false)
forcedTemplateText := ""
if s.cfg != nil {
forcedTemplateText = s.cfg.Gateway.ForcedCodexInstructionsTemplate
}
templateUpstreamModel := upstreamModel
if codexResult.NormalizedModel != "" {
templateUpstreamModel = codexResult.NormalizedModel
}
existingInstructions, _ := reqBody["instructions"].(string)
if _, err := applyForcedCodexInstructionsTemplate(reqBody, forcedTemplateText, forcedCodexInstructionsTemplateData{
ExistingInstructions: strings.TrimSpace(existingInstructions),
OriginalModel: originalModel,
NormalizedModel: normalizedModel,
BillingModel: billingModel,
UpstreamModel: templateUpstreamModel,
}); err != nil {
return nil, err
}
if codexResult.NormalizedModel != "" {
upstreamModel = codexResult.NormalizedModel
}
......
package service
import "strings"
const (
defaultOpenAIMessagesDispatchOpusMappedModel = "gpt-5.4"
defaultOpenAIMessagesDispatchSonnetMappedModel = "gpt-5.3-codex"
defaultOpenAIMessagesDispatchHaikuMappedModel = "gpt-5.4-mini"
)
func normalizeOpenAIMessagesDispatchMappedModel(model string) string {
model = NormalizeOpenAICompatRequestedModel(strings.TrimSpace(model))
return strings.TrimSpace(model)
}
func normalizeOpenAIMessagesDispatchModelConfig(cfg OpenAIMessagesDispatchModelConfig) OpenAIMessagesDispatchModelConfig {
out := OpenAIMessagesDispatchModelConfig{
OpusMappedModel: normalizeOpenAIMessagesDispatchMappedModel(cfg.OpusMappedModel),
SonnetMappedModel: normalizeOpenAIMessagesDispatchMappedModel(cfg.SonnetMappedModel),
HaikuMappedModel: normalizeOpenAIMessagesDispatchMappedModel(cfg.HaikuMappedModel),
}
if len(cfg.ExactModelMappings) > 0 {
out.ExactModelMappings = make(map[string]string, len(cfg.ExactModelMappings))
for requestedModel, mappedModel := range cfg.ExactModelMappings {
requestedModel = strings.TrimSpace(requestedModel)
mappedModel = normalizeOpenAIMessagesDispatchMappedModel(mappedModel)
if requestedModel == "" || mappedModel == "" {
continue
}
out.ExactModelMappings[requestedModel] = mappedModel
}
if len(out.ExactModelMappings) == 0 {
out.ExactModelMappings = nil
}
}
return out
}
func claudeMessagesDispatchFamily(model string) string {
normalized := strings.ToLower(strings.TrimSpace(model))
if !strings.HasPrefix(normalized, "claude") {
return ""
}
switch {
case strings.Contains(normalized, "opus"):
return "opus"
case strings.Contains(normalized, "sonnet"):
return "sonnet"
case strings.Contains(normalized, "haiku"):
return "haiku"
default:
return ""
}
}
func (g *Group) ResolveMessagesDispatchModel(requestedModel string) string {
if g == nil {
return ""
}
requestedModel = strings.TrimSpace(requestedModel)
if requestedModel == "" {
return ""
}
cfg := normalizeOpenAIMessagesDispatchModelConfig(g.MessagesDispatchModelConfig)
if mappedModel := strings.TrimSpace(cfg.ExactModelMappings[requestedModel]); mappedModel != "" {
return mappedModel
}
switch claudeMessagesDispatchFamily(requestedModel) {
case "opus":
if mappedModel := strings.TrimSpace(cfg.OpusMappedModel); mappedModel != "" {
return mappedModel
}
return defaultOpenAIMessagesDispatchOpusMappedModel
case "sonnet":
if mappedModel := strings.TrimSpace(cfg.SonnetMappedModel); mappedModel != "" {
return mappedModel
}
return defaultOpenAIMessagesDispatchSonnetMappedModel
case "haiku":
if mappedModel := strings.TrimSpace(cfg.HaikuMappedModel); mappedModel != "" {
return mappedModel
}
return defaultOpenAIMessagesDispatchHaikuMappedModel
default:
return ""
}
}
func sanitizeGroupMessagesDispatchFields(g *Group) {
if g == nil || g.Platform == PlatformOpenAI {
return
}
g.AllowMessagesDispatch = false
g.DefaultMappedModel = ""
g.MessagesDispatchModelConfig = OpenAIMessagesDispatchModelConfig{}
}
package service
import "testing"
import "github.com/stretchr/testify/require"
func TestNormalizeOpenAIMessagesDispatchModelConfig(t *testing.T) {
t.Parallel()
cfg := normalizeOpenAIMessagesDispatchModelConfig(OpenAIMessagesDispatchModelConfig{
OpusMappedModel: " gpt-5.4-high ",
SonnetMappedModel: "gpt-5.3-codex",
HaikuMappedModel: " gpt-5.4-mini-medium ",
ExactModelMappings: map[string]string{
" claude-sonnet-4-5-20250929 ": " gpt-5.2-high ",
"": "gpt-5.4",
"claude-opus-4-6": " ",
},
})
require.Equal(t, "gpt-5.4", cfg.OpusMappedModel)
require.Equal(t, "gpt-5.3-codex", cfg.SonnetMappedModel)
require.Equal(t, "gpt-5.4-mini", cfg.HaikuMappedModel)
require.Equal(t, map[string]string{
"claude-sonnet-4-5-20250929": "gpt-5.2",
}, cfg.ExactModelMappings)
}
ALTER TABLE groups
ADD COLUMN IF NOT EXISTS messages_dispatch_model_config JSONB NOT NULL DEFAULT '{}'::jsonb;
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
{{ if .ExistingInstructions }}
{{ .ExistingInstructions }}
{{ end }}
......@@ -202,6 +202,32 @@ gateway:
#
# 注意:开启后会影响所有客户端的行为(不仅限于 VS Code / Codex CLI),请谨慎开启。
force_codex_cli: false
# Optional: template file used to build the final top-level Codex `instructions`.
# 可选:用于构建最终 Codex 顶层 `instructions` 的模板文件路径。
#
# This is applied on the `/v1/messages -> Responses/Codex` conversion path,
# after Claude `system` has already been normalized into Codex `instructions`.
# 该模板作用于 `/v1/messages -> Responses/Codex` 转换链路,且发生在 Claude `system`
# 已经被归一化为 Codex `instructions` 之后。
#
# The template can reference:
# 模板可引用:
# - {{ .ExistingInstructions }} : converted client instructions/system
# - {{ .OriginalModel }} : original requested model
# - {{ .NormalizedModel }} : normalized routing model
# - {{ .BillingModel }} : billing model
# - {{ .UpstreamModel }} : final upstream model
#
# If you want to preserve client system prompts, keep {{ .ExistingInstructions }}
# somewhere in the template. If omitted, the template output fully replaces it.
# 如需保留客户端 system 提示词,请在模板中显式包含 {{ .ExistingInstructions }}。
# 若省略,则模板输出会完全覆盖它。
#
# Docker users can mount a host file to /app/data/codex-instructions.md.tmpl
# and point this field there.
# Docker 用户可将宿主机文件挂载到 /app/data/codex-instructions.md.tmpl,
# 然后把本字段指向该路径。
forced_codex_instructions_template_file: ""
# OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout)
# 默认 false:过滤超时头,降低上游提前断流风险。
openai_passthrough_allow_timeout_headers: false
......@@ -347,12 +373,6 @@ gateway:
# Enable batch load calculation for scheduling
# 启用调度批量负载计算
load_batch_enabled: true
# Snapshot bucket MGET chunk size
# 调度快照分桶读取时的 MGET 分块大小
snapshot_mget_chunk_size: 128
# Snapshot bucket write chunk size
# 调度快照重建写入时的分块大小
snapshot_write_chunk_size: 256
# Slot cleanup interval (duration)
# 并发槽位清理周期(时间段)
slot_cleanup_interval: 30s
......
......@@ -31,6 +31,10 @@ services:
# Optional: Mount custom config.yaml (uncomment and create the file first)
# Copy config.example.yaml to config.yaml, modify it, then uncomment:
# - ./config.yaml:/app/data/config.yaml
# Optional: Mount a custom Codex instructions template file, then point
# gateway.forced_codex_instructions_template_file at /app/data/codex-instructions.md.tmpl
# in config.yaml.
# - ./codex-instructions.md.tmpl:/app/data/codex-instructions.md.tmpl:ro
environment:
# =======================================================================
# Auto Setup (REQUIRED for Docker deployment)
......@@ -146,7 +150,17 @@ services:
networks:
- sub2api-network
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
test:
[
"CMD",
"wget",
"-q",
"-T",
"5",
"-O",
"/dev/null",
"http://localhost:8080/health",
]
interval: 30s
timeout: 10s
retries: 3
......@@ -177,11 +191,17 @@ services:
networks:
- sub2api-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"]
test:
[
"CMD-SHELL",
"pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}",
]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
ports:
- 5432:5432
# 注意:不暴露端口到宿主机,应用通过内部网络连接
# 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"]
......@@ -199,12 +219,12 @@ services:
volumes:
- redis_data:/data
command: >
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment:
- TZ=${TZ:-Asia/Shanghai}
# REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag)
......@@ -217,7 +237,8 @@ services:
timeout: 5s
retries: 5
start_period: 5s
ports:
- 6379:6379
# =============================================================================
# Volumes
# =============================================================================
......
......@@ -369,6 +369,13 @@ export type GroupPlatform = 'anthropic' | 'openai' | 'gemini' | 'antigravity'
export type SubscriptionType = 'standard' | 'subscription'
export interface OpenAIMessagesDispatchModelConfig {
opus_mapped_model?: string
sonnet_mapped_model?: string
haiku_mapped_model?: string
exact_model_mappings?: Record<string, string>
}
export interface Group {
id: number
name: string
......@@ -391,6 +398,8 @@ export interface Group {
fallback_group_id_on_invalid_request: number | null
// OpenAI Messages 调度开关(用户侧需要此字段判断是否展示 Claude Code 教程)
allow_messages_dispatch?: boolean
default_mapped_model?: string
messages_dispatch_model_config?: OpenAIMessagesDispatchModelConfig
require_oauth_only: boolean
require_privacy_set: boolean
created_at: string
......@@ -417,6 +426,7 @@ export interface AdminGroup extends Group {
// OpenAI Messages 调度配置(仅 openai 平台使用)
default_mapped_model?: string
messages_dispatch_model_config?: OpenAIMessagesDispatchModelConfig
// 分组排序
sort_order: number
......
This diff is collapsed.
import { describe, expect, it } from "vitest";
import {
createDefaultMessagesDispatchFormState,
messagesDispatchConfigToFormState,
messagesDispatchFormStateToConfig,
resetMessagesDispatchFormState,
} from "../groupsMessagesDispatch";
describe("groupsMessagesDispatch", () => {
it("returns the expected default form state", () => {
expect(createDefaultMessagesDispatchFormState()).toEqual({
allow_messages_dispatch: false,
opus_mapped_model: "gpt-5.4",
sonnet_mapped_model: "gpt-5.3-codex",
haiku_mapped_model: "gpt-5.4-mini",
exact_model_mappings: [],
});
});
it("sanitizes exact model mapping rows when converting to config", () => {
const config = messagesDispatchFormStateToConfig({
allow_messages_dispatch: true,
opus_mapped_model: " gpt-5.4 ",
sonnet_mapped_model: "gpt-5.3-codex",
haiku_mapped_model: " gpt-5.4-mini ",
exact_model_mappings: [
{
claude_model: " claude-sonnet-4-5-20250929 ",
target_model: " gpt-5.2 ",
},
{ claude_model: "", target_model: "gpt-5.4" },
{ claude_model: "claude-opus-4-6", target_model: " " },
],
});
expect(config).toEqual({
opus_mapped_model: "gpt-5.4",
sonnet_mapped_model: "gpt-5.3-codex",
haiku_mapped_model: "gpt-5.4-mini",
exact_model_mappings: {
"claude-sonnet-4-5-20250929": "gpt-5.2",
},
});
});
it("hydrates form state from api config", () => {
expect(
messagesDispatchConfigToFormState({
opus_mapped_model: "gpt-5.4",
sonnet_mapped_model: "gpt-5.2",
haiku_mapped_model: "gpt-5.4-mini",
exact_model_mappings: {
"claude-opus-4-6": "gpt-5.4",
"claude-haiku-4-5-20251001": "gpt-5.4-mini",
},
}),
).toEqual({
allow_messages_dispatch: false,
opus_mapped_model: "gpt-5.4",
sonnet_mapped_model: "gpt-5.2",
haiku_mapped_model: "gpt-5.4-mini",
exact_model_mappings: [
{
claude_model: "claude-haiku-4-5-20251001",
target_model: "gpt-5.4-mini",
},
{ claude_model: "claude-opus-4-6", target_model: "gpt-5.4" },
],
});
});
it("resets mutable form state when platform switches away from openai", () => {
const state = {
allow_messages_dispatch: true,
opus_mapped_model: "gpt-5.2",
sonnet_mapped_model: "gpt-5.4",
haiku_mapped_model: "gpt-5.1",
exact_model_mappings: [
{ claude_model: "claude-opus-4-6", target_model: "gpt-5.4" },
],
};
resetMessagesDispatchFormState(state);
expect(state).toEqual({
allow_messages_dispatch: false,
opus_mapped_model: "gpt-5.4",
sonnet_mapped_model: "gpt-5.3-codex",
haiku_mapped_model: "gpt-5.4-mini",
exact_model_mappings: [],
});
});
});
import type { OpenAIMessagesDispatchModelConfig } from "@/types";
export interface MessagesDispatchMappingRow {
claude_model: string;
target_model: string;
}
export interface MessagesDispatchFormState {
allow_messages_dispatch: boolean;
opus_mapped_model: string;
sonnet_mapped_model: string;
haiku_mapped_model: string;
exact_model_mappings: MessagesDispatchMappingRow[];
}
export function createDefaultMessagesDispatchFormState(): MessagesDispatchFormState {
return {
allow_messages_dispatch: false,
opus_mapped_model: "gpt-5.4",
sonnet_mapped_model: "gpt-5.3-codex",
haiku_mapped_model: "gpt-5.4-mini",
exact_model_mappings: [],
};
}
export function messagesDispatchConfigToFormState(
config?: OpenAIMessagesDispatchModelConfig | null,
): MessagesDispatchFormState {
const defaults = createDefaultMessagesDispatchFormState();
const exactMappings = Object.entries(config?.exact_model_mappings || {})
.sort(([left], [right]) => left.localeCompare(right))
.map(([claude_model, target_model]) => ({ claude_model, target_model }));
return {
allow_messages_dispatch: false,
opus_mapped_model:
config?.opus_mapped_model?.trim() || defaults.opus_mapped_model,
sonnet_mapped_model:
config?.sonnet_mapped_model?.trim() || defaults.sonnet_mapped_model,
haiku_mapped_model:
config?.haiku_mapped_model?.trim() || defaults.haiku_mapped_model,
exact_model_mappings: exactMappings,
};
}
export function messagesDispatchFormStateToConfig(
state: MessagesDispatchFormState,
): OpenAIMessagesDispatchModelConfig {
const exactModelMappings = Object.fromEntries(
state.exact_model_mappings
.map((row) => [row.claude_model.trim(), row.target_model.trim()] as const)
.filter(([claudeModel, targetModel]) => claudeModel && targetModel),
);
return {
opus_mapped_model: state.opus_mapped_model.trim(),
sonnet_mapped_model: state.sonnet_mapped_model.trim(),
haiku_mapped_model: state.haiku_mapped_model.trim(),
exact_model_mappings: exactModelMappings,
};
}
export function resetMessagesDispatchFormState(
target: MessagesDispatchFormState,
): void {
const defaults = createDefaultMessagesDispatchFormState();
target.allow_messages_dispatch = defaults.allow_messages_dispatch;
target.opus_mapped_model = defaults.opus_mapped_model;
target.sonnet_mapped_model = defaults.sonnet_mapped_model;
target.haiku_mapped_model = defaults.haiku_mapped_model;
target.exact_model_mappings = [];
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment