Commit 73a8683c authored by 陈曦's avatar 陈曦
Browse files

append migration files by v117

parent b017f461
Pipeline #82250 passed with stage
in 23 seconds
ALTER TABLE payment_orders
ADD COLUMN IF NOT EXISTS provider_snapshot JSONB;
INSERT INTO settings (key, value)
VALUES
(
'wechat_connect_open_enabled',
CASE
WHEN NOT EXISTS (SELECT 1 FROM settings WHERE key = 'wechat_connect_enabled') THEN ''
WHEN COALESCE((SELECT value FROM settings WHERE key = 'wechat_connect_enabled'), 'false') <> 'true' THEN 'false'
WHEN LOWER(TRIM(COALESCE((SELECT value FROM settings WHERE key = 'wechat_connect_mode'), 'open'))) = 'mp' THEN 'false'
ELSE 'true'
END
),
(
'wechat_connect_mp_enabled',
CASE
WHEN NOT EXISTS (SELECT 1 FROM settings WHERE key = 'wechat_connect_enabled') THEN ''
WHEN COALESCE((SELECT value FROM settings WHERE key = 'wechat_connect_enabled'), 'false') <> 'true' THEN 'false'
WHEN LOWER(TRIM(COALESCE((SELECT value FROM settings WHERE key = 'wechat_connect_mode'), 'open'))) = 'mp' THEN 'true'
ELSE 'false'
END
),
('auth_source_default_email_grant_on_signup', 'false'),
('auth_source_default_linuxdo_grant_on_signup', 'false'),
('auth_source_default_oidc_grant_on_signup', 'false'),
('auth_source_default_wechat_grant_on_signup', 'false')
ON CONFLICT (key) DO NOTHING;
-- Intentionally left as a no-op.
-- The online index rollout lives in 120_enforce_payment_orders_out_trade_no_unique_notx.sql
DO $$
BEGIN
NULL;
END $$;
-- Build the payment order uniqueness guarantee online.
-- The migration runner performs an explicit duplicate out_trade_no precheck and
-- drops any stale invalid paymentorder_out_trade_no_unique index before retrying.
-- Create the new partial unique index concurrently first so writes keep flowing,
-- then remove the legacy index name once the replacement is ready.
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS paymentorder_out_trade_no_unique
ON payment_orders (out_trade_no)
WHERE out_trade_no <> '';
DROP INDEX CONCURRENTLY IF EXISTS paymentorder_out_trade_no;
DO $$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_indexes
WHERE schemaname = 'public'
AND tablename = 'payment_orders'
AND indexname = 'paymentorder_out_trade_no_unique'
) THEN
IF EXISTS (
SELECT 1
FROM pg_indexes
WHERE schemaname = 'public'
AND tablename = 'payment_orders'
AND indexname = 'paymentorder_out_trade_no'
) THEN
EXECUTE 'DROP INDEX IF EXISTS paymentorder_out_trade_no';
END IF;
EXECUTE 'ALTER INDEX paymentorder_out_trade_no_unique RENAME TO paymentorder_out_trade_no';
END IF;
END $$;
ALTER TABLE auth_identity_migration_reports
ALTER COLUMN report_type TYPE VARCHAR(80);
UPDATE pending_auth_sessions
SET
local_flow_state = jsonb_set(
local_flow_state,
'{completion_response}',
((local_flow_state -> 'completion_response') - 'access_token' - 'refresh_token' - 'expires_in' - 'token_type'),
true
)
WHERE jsonb_typeof(local_flow_state -> 'completion_response') = 'object'
AND (
(local_flow_state -> 'completion_response') ? 'access_token'
OR (local_flow_state -> 'completion_response') ? 'refresh_token'
OR (local_flow_state -> 'completion_response') ? 'expires_in'
OR (local_flow_state -> 'completion_response') ? 'token_type'
);
-- Auto-backfill untouched migration 110 signup-grant defaults to the corrected false value.
-- Rows still matching the migration-110 default payload and timestamp window are treated as
-- untouched legacy defaults; any remaining legacy true values are reported for manual review.
WITH migration_110 AS (
SELECT applied_at
FROM schema_migrations
WHERE filename = '110_pending_auth_and_provider_default_grants.sql'
),
providers AS (
SELECT provider_type
FROM (
VALUES ('email'), ('linuxdo'), ('oidc'), ('wechat')
) AS providers(provider_type)
),
legacy_provider_defaults AS (
SELECT providers.provider_type
FROM providers
CROSS JOIN migration_110
JOIN settings balance
ON balance.key = 'auth_source_default_' || providers.provider_type || '_balance'
JOIN settings concurrency
ON concurrency.key = 'auth_source_default_' || providers.provider_type || '_concurrency'
JOIN settings subscriptions
ON subscriptions.key = 'auth_source_default_' || providers.provider_type || '_subscriptions'
JOIN settings grant_on_signup
ON grant_on_signup.key = 'auth_source_default_' || providers.provider_type || '_grant_on_signup'
JOIN settings grant_on_first_bind
ON grant_on_first_bind.key = 'auth_source_default_' || providers.provider_type || '_grant_on_first_bind'
WHERE balance.value = '0'
AND concurrency.value = '5'
AND subscriptions.value = '[]'
AND grant_on_signup.value = 'true'
AND grant_on_first_bind.value = 'false'
AND balance.updated_at BETWEEN migration_110.applied_at - INTERVAL '1 minute' AND migration_110.applied_at + INTERVAL '1 minute'
AND concurrency.updated_at BETWEEN migration_110.applied_at - INTERVAL '1 minute' AND migration_110.applied_at + INTERVAL '1 minute'
AND subscriptions.updated_at BETWEEN migration_110.applied_at - INTERVAL '1 minute' AND migration_110.applied_at + INTERVAL '1 minute'
AND grant_on_signup.updated_at BETWEEN migration_110.applied_at - INTERVAL '1 minute' AND migration_110.applied_at + INTERVAL '1 minute'
AND grant_on_first_bind.updated_at BETWEEN migration_110.applied_at - INTERVAL '1 minute' AND migration_110.applied_at + INTERVAL '1 minute'
),
updated_signup_grants AS (
UPDATE settings
SET
value = 'false',
updated_at = NOW()
FROM legacy_provider_defaults
WHERE settings.key = 'auth_source_default_' || legacy_provider_defaults.provider_type || '_grant_on_signup'
AND settings.value = 'true'
RETURNING legacy_provider_defaults.provider_type
)
INSERT INTO auth_identity_migration_reports (report_type, report_key, details)
SELECT
'legacy_auth_source_signup_grant_review',
providers.provider_type,
jsonb_build_object(
'provider_type', providers.provider_type,
'current_value', grant_on_signup.value,
'auto_backfilled', FALSE,
'reason', 'legacy_true_default_not_auto_backfilled'
)
FROM providers
JOIN settings grant_on_signup
ON grant_on_signup.key = 'auth_source_default_' || providers.provider_type || '_grant_on_signup'
LEFT JOIN updated_signup_grants
ON updated_signup_grants.provider_type = providers.provider_type
WHERE grant_on_signup.value = 'true'
AND updated_signup_grants.provider_type IS NULL
ON CONFLICT (report_type, report_key) DO NOTHING;
-- Preserve legacy OIDC behavior for upgraded installs that predate the
-- introduction of secure PKCE/id_token defaults. Fresh installs continue to
-- inherit runtime defaults when these rows are absent.
WITH legacy_oidc_install AS (
SELECT 1
FROM settings
WHERE key IN (
'oidc_connect_enabled',
'oidc_connect_client_id',
'oidc_connect_authorize_url',
'oidc_connect_token_url',
'oidc_connect_issuer_url',
'oidc_connect_userinfo_url',
'oidc_connect_frontend_redirect_url'
)
LIMIT 1
)
INSERT INTO settings (key, value)
SELECT defaults.key, 'false'
FROM legacy_oidc_install
CROSS JOIN (
VALUES
('oidc_connect_use_pkce'),
('oidc_connect_validate_id_token')
) AS defaults(key)
WHERE NOT EXISTS (
SELECT 1
FROM settings existing
WHERE existing.key = defaults.key
)
ON CONFLICT (key) DO NOTHING;
-- Migration: 125_add_channel_monitors
-- 渠道监控 MVP:周期性对外部 provider/endpoint/api_key 做模型心跳测试。
--
-- 表结构说明:
-- - channel_monitors 渠道配置表(一行 = 一个监控对象)
-- - channel_monitor_histories 检测历史明细表(一次检测一个模型 = 一行)
--
-- 设计要点:
-- - api_key_encrypted 列存放 AES-256-GCM 密文(base64),由 service 层加密。
-- - extra_models 用 JSONB 存储字符串数组,便于扩展(后续可加权重等元数据)。
-- - history 表通过 ON DELETE CASCADE 自动清理已删除监控的历史。
-- - (enabled, last_checked_at) 索引服务于调度器扫描“到期需要检测”的监控。
-- - histories 上 (monitor_id, model, checked_at DESC) 服务用户视图聚合查询;
-- 单独的 (checked_at) 索引服务定期清理 30 天前数据的 DELETE。
CREATE TABLE IF NOT EXISTS channel_monitors (
id BIGSERIAL PRIMARY KEY,
name VARCHAR(100) NOT NULL,
provider VARCHAR(20) NOT NULL, -- openai / anthropic / gemini
endpoint VARCHAR(500) NOT NULL, -- base origin
api_key_encrypted TEXT NOT NULL, -- AES-256-GCM (base64)
primary_model VARCHAR(200) NOT NULL,
extra_models JSONB NOT NULL DEFAULT '[]'::jsonb,
group_name VARCHAR(100) NOT NULL DEFAULT '',
enabled BOOLEAN NOT NULL DEFAULT TRUE,
interval_seconds INT NOT NULL,
last_checked_at TIMESTAMPTZ,
created_by BIGINT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT channel_monitors_provider_check CHECK (provider IN ('openai', 'anthropic', 'gemini')),
CONSTRAINT channel_monitors_interval_check CHECK (interval_seconds BETWEEN 15 AND 3600)
);
CREATE INDEX IF NOT EXISTS idx_channel_monitors_enabled_last_checked
ON channel_monitors (enabled, last_checked_at);
CREATE INDEX IF NOT EXISTS idx_channel_monitors_provider
ON channel_monitors (provider);
CREATE INDEX IF NOT EXISTS idx_channel_monitors_group_name
ON channel_monitors (group_name);
CREATE TABLE IF NOT EXISTS channel_monitor_histories (
id BIGSERIAL PRIMARY KEY,
monitor_id BIGINT NOT NULL REFERENCES channel_monitors(id) ON DELETE CASCADE,
model VARCHAR(200) NOT NULL,
status VARCHAR(20) NOT NULL,
latency_ms INT,
ping_latency_ms INT,
message VARCHAR(500) NOT NULL DEFAULT '',
checked_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT channel_monitor_histories_status_check
CHECK (status IN ('operational', 'degraded', 'failed', 'error'))
);
CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_monitor_model_checked
ON channel_monitor_histories (monitor_id, model, checked_at DESC);
CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_checked_at
ON channel_monitor_histories (checked_at);
-- Add per-group Requests-Per-Minute limit.
-- rpm_limit: 分组统一 RPM 上限(0 = 不限制)。
-- 一旦配置即接管该用户在该分组的限流,覆盖用户级 users.rpm_limit。
-- 计数键:rpm:ug:{user_id}:{group_id}:{minute}。
ALTER TABLE groups ADD COLUMN IF NOT EXISTS rpm_limit integer NOT NULL DEFAULT 0;
COMMENT ON COLUMN groups.rpm_limit IS '分组 RPM 上限;0 表示不限制;设置后接管该分组用户的限流(覆盖用户级 rpm_limit)。';
-- Migration: 126_add_channel_monitor_aggregation
-- 渠道监控日聚合:把 channel_monitor_histories 的明细按天聚合,明细只保留 1 天,
-- 聚合保留 30 天。明细和聚合表都用软删除(deleted_at),由 ops cleanup 任务每天
-- 凌晨随运维监控清理一起跑(共享 cron)。
--
-- 设计要点:
-- - channel_monitor_histories 加 deleted_at 软删除字段(SoftDeleteMixin 全局
-- Hook 会把 DELETE 自动改写成 UPDATE deleted_at = NOW())。
-- - channel_monitor_daily_rollups 按 (monitor_id, model, bucket_date) 唯一,
-- 用 ON CONFLICT DO UPDATE 实现幂等回填,状态分布和延迟分子分母都保留,
-- 方便后续按窗口任意求加权可用率和均值。
-- - watermark 表只有一行(id=1),记录最近一次聚合到达的日期,避免重启后重复
-- 扫全表。
-- - rollup 上 (bucket_date) 索引服务清理任务的 DELETE WHERE bucket_date < cutoff。
-- 1) 给历史明细表加软删除字段
ALTER TABLE channel_monitor_histories
ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMPTZ;
CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_deleted_at
ON channel_monitor_histories (deleted_at);
-- 2) 创建日聚合表
CREATE TABLE IF NOT EXISTS channel_monitor_daily_rollups (
id BIGSERIAL PRIMARY KEY,
monitor_id BIGINT NOT NULL REFERENCES channel_monitors(id) ON DELETE CASCADE,
model VARCHAR(200) NOT NULL,
bucket_date DATE NOT NULL,
total_checks INT NOT NULL DEFAULT 0,
ok_count INT NOT NULL DEFAULT 0,
operational_count INT NOT NULL DEFAULT 0,
degraded_count INT NOT NULL DEFAULT 0,
failed_count INT NOT NULL DEFAULT 0,
error_count INT NOT NULL DEFAULT 0,
sum_latency_ms BIGINT NOT NULL DEFAULT 0,
count_latency INT NOT NULL DEFAULT 0,
sum_ping_latency_ms BIGINT NOT NULL DEFAULT 0,
count_ping_latency INT NOT NULL DEFAULT 0,
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
deleted_at TIMESTAMPTZ
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_unique
ON channel_monitor_daily_rollups (monitor_id, model, bucket_date);
CREATE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_bucket
ON channel_monitor_daily_rollups (bucket_date);
CREATE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_deleted_at
ON channel_monitor_daily_rollups (deleted_at);
-- 3) 创建 watermark 表(单行:id=1)
CREATE TABLE IF NOT EXISTS channel_monitor_aggregation_watermark (
id INT PRIMARY KEY DEFAULT 1,
last_aggregated_date DATE,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT channel_monitor_aggregation_watermark_singleton CHECK (id = 1)
);
INSERT INTO channel_monitor_aggregation_watermark (id, last_aggregated_date, updated_at)
VALUES (1, NULL, NOW())
ON CONFLICT (id) DO NOTHING;
-- Add per-user Requests-Per-Minute cap.
-- rpm_limit: 用户全局 RPM 兜底(0 = 不限制)。
-- 仅当所访问分组未设置 rpm_limit 且无 user-group rpm_override 时作为兜底生效。
-- 计数键:rpm:u:{user_id}:{minute}。
ALTER TABLE users ADD COLUMN IF NOT EXISTS rpm_limit integer NOT NULL DEFAULT 0;
COMMENT ON COLUMN users.rpm_limit IS '用户级 RPM 兜底上限;0 表示不限制;仅当分组未设置 rpm_limit 时生效。';
-- 在已有的"用户专属分组倍率表"上扩展 rpm_override 列;同时放宽 rate_multiplier 为可空,
-- 使一行记录可以只覆盖 rate、只覆盖 rpm,或同时覆盖两者。
-- 语义:
-- - rate_multiplier NULL → 该用户在此分组使用 groups.rate_multiplier 默认值
-- - rate_multiplier 非 NULL → 覆盖分组默认计费倍率
-- - rpm_override NULL → 该用户在此分组使用 groups.rpm_limit 默认值
-- - rpm_override 非 NULL → 覆盖分组默认 RPM(0 = 不限制)
-- 用户级 users.rpm_limit 仍独立生效(跨分组总配额)。
ALTER TABLE user_group_rate_multipliers
ADD COLUMN IF NOT EXISTS rpm_override integer NULL;
ALTER TABLE user_group_rate_multipliers
ALTER COLUMN rate_multiplier DROP NOT NULL;
COMMENT ON COLUMN user_group_rate_multipliers.rate_multiplier IS '专属计费倍率;NULL 表示沿用分组默认倍率。';
COMMENT ON COLUMN user_group_rate_multipliers.rpm_override IS '专属 RPM 上限;NULL 表示沿用分组默认;0 表示该用户在此分组不受 RPM 限制。';
-- Migration: 127_drop_channel_monitor_deleted_at
-- 纠正 110 引入的 SoftDeleteMixin:日志/聚合表无恢复需求,软删会让行和索引只增不减,
-- 徒增磁盘和查询开销。改回分批物理删(由 OpsCleanupService 每天凌晨统一调度,
-- deleteOldRowsByID 模板,batch=5000)。
--
-- 110 尚未跑过聚合/清理(首次 maintenance 在次日 02:00),所以此处不担心业务数据。
-- 直接 DROP 列 + 索引;对应的 Go 侧 ent schema 已移除 SoftDeleteMixin、repo 的
-- raw SQL 已移除 deleted_at IS NULL 过滤。
DROP INDEX IF EXISTS idx_channel_monitor_histories_deleted_at;
ALTER TABLE channel_monitor_histories
DROP COLUMN IF EXISTS deleted_at;
DROP INDEX IF EXISTS idx_channel_monitor_daily_rollups_deleted_at;
ALTER TABLE channel_monitor_daily_rollups
DROP COLUMN IF EXISTS deleted_at;
-- Migration: 128_add_channel_monitor_request_templates
-- 加请求模板表 + 给 channel_monitors 加 4 个快照字段(template_id 关联引用 + extra_headers /
-- body_override_mode / body_override 三个真正运行时使用的快照)。
--
-- 设计要点:
-- 1) 模板与监控之间是「应用即拷贝」的快照语义,运行时 checker 不再回查模板表。
-- 模板 UPDATE 不会自动影响监控;只有用户主动「应用到关联监控」才会刷新快照。
-- 2) ON DELETE SET NULL:模板删除不级联清理监控;监控保留快照继续工作。
-- 3) extra_headers / body_override 都是 JSONB;body_override_mode 用 varchar(不是 enum)
-- 便于将来加新模式无需 ALTER TYPE。
-- 4) 同一 provider 内模板 name 唯一(允许 Anthropic + OpenAI 重名 "伪装官方客户端")。
CREATE TABLE IF NOT EXISTS channel_monitor_request_templates (
id BIGSERIAL PRIMARY KEY,
name VARCHAR(100) NOT NULL,
provider VARCHAR(20) NOT NULL,
description VARCHAR(500) NOT NULL DEFAULT '',
extra_headers JSONB NOT NULL DEFAULT '{}'::jsonb,
body_override_mode VARCHAR(10) NOT NULL DEFAULT 'off',
body_override JSONB NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT channel_monitor_request_templates_provider_check
CHECK (provider IN ('openai', 'anthropic', 'gemini')),
CONSTRAINT channel_monitor_request_templates_body_mode_check
CHECK (body_override_mode IN ('off', 'merge', 'replace'))
);
CREATE UNIQUE INDEX IF NOT EXISTS channel_monitor_request_templates_provider_name
ON channel_monitor_request_templates (provider, name);
-- channel_monitors 加 4 列(ADD COLUMN IF NOT EXISTS 需要 PG 9.6+,生产使用 PG 16)
ALTER TABLE channel_monitors
ADD COLUMN IF NOT EXISTS template_id BIGINT NULL;
ALTER TABLE channel_monitors
ADD COLUMN IF NOT EXISTS extra_headers JSONB NOT NULL DEFAULT '{}'::jsonb;
ALTER TABLE channel_monitors
ADD COLUMN IF NOT EXISTS body_override_mode VARCHAR(10) NOT NULL DEFAULT 'off';
ALTER TABLE channel_monitors
ADD COLUMN IF NOT EXISTS body_override JSONB NULL;
-- 约束 + 外键(DO 块里 IF NOT EXISTS 判断,保证幂等)
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.table_constraints
WHERE constraint_name = 'channel_monitors_body_mode_check'
AND table_name = 'channel_monitors'
) THEN
ALTER TABLE channel_monitors
ADD CONSTRAINT channel_monitors_body_mode_check
CHECK (body_override_mode IN ('off', 'merge', 'replace'));
END IF;
IF NOT EXISTS (
SELECT 1 FROM information_schema.table_constraints
WHERE constraint_name = 'channel_monitors_template_id_fkey'
AND table_name = 'channel_monitors'
) THEN
ALTER TABLE channel_monitors
ADD CONSTRAINT channel_monitors_template_id_fkey
FOREIGN KEY (template_id)
REFERENCES channel_monitor_request_templates (id)
ON DELETE SET NULL;
END IF;
END $$;
CREATE INDEX IF NOT EXISTS idx_channel_monitors_template_id
ON channel_monitors (template_id)
WHERE template_id IS NOT NULL;
-- Migration: 129_seed_claude_code_template
-- 内置「Claude Code 伪装」请求模板,覆盖 Anthropic 上游对官方 CLI 客户端的所有验证项:
-- 1) User-Agent / X-App / anthropic-beta / anthropic-version 等头
-- 2) system 数组首项与官方 system prompt 字面一致(Dice >= 0.5)
-- 3) metadata.user_id 满足 ParseMetadataUserID — 这里用 legacy 格式(user_<64hex>_account_<uuid>_session_<36char>)
-- 避免新版 JSON 字符串内嵌 JSON 在编辑器里出现一长串 \" 转义,便于用户阅读。
--
-- ON CONFLICT DO NOTHING:已部署环境(手动建过模板)跑此 migration 不会重复 / 覆盖。
-- 用户可自行编辑后续覆盖此 seed;CC 升大版时再起一条 migration 提供新模板,不动用户的旧模板。
INSERT INTO channel_monitor_request_templates (
name, provider, description, extra_headers, body_override_mode, body_override
)
VALUES (
'Claude Code 伪装',
'anthropic',
'完整模拟 Claude Code 2.1.114 客户端:UA + anthropic-beta + system + metadata.user_id 全部对齐,绕过 Anthropic 上游 ''Claude Code only'' 限制(如 Max 套餐)。',
'{
"User-Agent": "claude-cli/2.1.114 (external, sdk-cli)",
"X-App": "cli",
"anthropic-version": "2023-06-01",
"anthropic-beta": "claude-code-20250219,interleaved-thinking-2025-05-14,context-management-2025-06-27,prompt-caching-scope-2026-01-05,advisor-tool-2026-03-01",
"anthropic-dangerous-direct-browser-access": "true"
}'::jsonb,
'merge',
'{
"system": [
{
"type": "text",
"text": "You are Claude Code, Anthropic''s official CLI for Claude."
}
],
"metadata": {
"user_id": "user_0000000000000000000000000000000000000000000000000000000000000000_account_00000000-0000-0000-0000-000000000000_session_00000000-0000-0000-0000-000000000000"
}
}'::jsonb
)
ON CONFLICT (provider, name) DO NOTHING;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment