Commit df1ef3de authored by ianshaw's avatar ianshaw
Browse files

refactor: 移除 Ops 监控模块

移除未完成的运维监控功能,简化系统架构:
- 删除 ops_handler, ops_service, ops_repo 等后端代码
- 删除 ops 相关数据库迁移文件
- 删除前端 OpsDashboard 页面和 API
parent 45bd9ac7
package admin
import (
"math"
"net/http"
"strconv"
"time"
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/gin-gonic/gin"
)
// OpsHandler handles ops dashboard endpoints.
type OpsHandler struct {
opsService *service.OpsService
}
// NewOpsHandler creates a new OpsHandler.
func NewOpsHandler(opsService *service.OpsService) *OpsHandler {
return &OpsHandler{opsService: opsService}
}
// GetMetrics returns the latest ops metrics snapshot.
// GET /api/v1/admin/ops/metrics
func (h *OpsHandler) GetMetrics(c *gin.Context) {
metrics, err := h.opsService.GetLatestMetrics(c.Request.Context())
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to get ops metrics")
return
}
response.Success(c, metrics)
}
// ListMetricsHistory returns a time-range slice of metrics for charts.
// GET /api/v1/admin/ops/metrics/history
//
// Query params:
// - window_minutes: int (default 1)
// - minutes: int (lookback; optional)
// - start_time/end_time: RFC3339 timestamps (optional; overrides minutes when provided)
// - limit: int (optional; max 100, default 300 for backward compatibility)
func (h *OpsHandler) ListMetricsHistory(c *gin.Context) {
windowMinutes := 1
if v := c.Query("window_minutes"); v != "" {
if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 {
windowMinutes = parsed
} else {
response.BadRequest(c, "Invalid window_minutes")
return
}
}
limit := 300
limitProvided := false
if v := c.Query("limit"); v != "" {
parsed, err := strconv.Atoi(v)
if err != nil || parsed <= 0 || parsed > 5000 {
response.BadRequest(c, "Invalid limit (must be 1-5000)")
return
}
limit = parsed
limitProvided = true
}
endTime := time.Now()
startTime := time.Time{}
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
parsed, err := time.Parse(time.RFC3339, startTimeStr)
if err != nil {
response.BadRequest(c, "Invalid start_time format (RFC3339)")
return
}
startTime = parsed
}
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
parsed, err := time.Parse(time.RFC3339, endTimeStr)
if err != nil {
response.BadRequest(c, "Invalid end_time format (RFC3339)")
return
}
endTime = parsed
}
// If explicit range not provided, use lookback minutes.
if startTime.IsZero() {
if v := c.Query("minutes"); v != "" {
minutes, err := strconv.Atoi(v)
if err != nil || minutes <= 0 {
response.BadRequest(c, "Invalid minutes")
return
}
if minutes > 60*24*7 {
minutes = 60 * 24 * 7
}
startTime = endTime.Add(-time.Duration(minutes) * time.Minute)
}
}
// Default time range: last 24 hours.
if startTime.IsZero() {
startTime = endTime.Add(-24 * time.Hour)
if !limitProvided {
// Metrics are collected at 1-minute cadence; 24h requires ~1440 points.
limit = 24 * 60
}
}
if startTime.After(endTime) {
response.BadRequest(c, "Invalid time range: start_time must be <= end_time")
return
}
items, err := h.opsService.ListMetricsHistory(c.Request.Context(), windowMinutes, startTime, endTime, limit)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to list ops metrics history")
return
}
response.Success(c, gin.H{"items": items})
}
// ListErrorLogs lists recent error logs with optional filters.
// GET /api/v1/admin/ops/error-logs
//
// Query params:
// - start_time/end_time: RFC3339 timestamps (optional)
// - platform: string (optional)
// - phase: string (optional)
// - severity: string (optional)
// - q: string (optional; fuzzy match)
// - limit: int (optional; default 100; max 500)
func (h *OpsHandler) ListErrorLogs(c *gin.Context) {
var filters service.OpsErrorLogFilters
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
startTime, err := time.Parse(time.RFC3339, startTimeStr)
if err != nil {
response.BadRequest(c, "Invalid start_time format (RFC3339)")
return
}
filters.StartTime = &startTime
}
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
endTime, err := time.Parse(time.RFC3339, endTimeStr)
if err != nil {
response.BadRequest(c, "Invalid end_time format (RFC3339)")
return
}
filters.EndTime = &endTime
}
if filters.StartTime != nil && filters.EndTime != nil && filters.StartTime.After(*filters.EndTime) {
response.BadRequest(c, "Invalid time range: start_time must be <= end_time")
return
}
filters.Platform = c.Query("platform")
filters.Phase = c.Query("phase")
filters.Severity = c.Query("severity")
filters.Query = c.Query("q")
filters.Limit = 100
if limitStr := c.Query("limit"); limitStr != "" {
limit, err := strconv.Atoi(limitStr)
if err != nil || limit <= 0 || limit > 500 {
response.BadRequest(c, "Invalid limit (must be 1-500)")
return
}
filters.Limit = limit
}
items, total, err := h.opsService.ListErrorLogs(c.Request.Context(), filters)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to list error logs")
return
}
response.Success(c, gin.H{
"items": items,
"total": total,
})
}
// GetDashboardOverview returns realtime ops dashboard overview.
// GET /api/v1/admin/ops/dashboard/overview
//
// Query params:
// - time_range: string (optional; default "1h") one of: 5m, 30m, 1h, 6h, 24h
func (h *OpsHandler) GetDashboardOverview(c *gin.Context) {
timeRange := c.Query("time_range")
if timeRange == "" {
timeRange = "1h"
}
switch timeRange {
case "5m", "30m", "1h", "6h", "24h":
default:
response.BadRequest(c, "Invalid time_range (supported: 5m, 30m, 1h, 6h, 24h)")
return
}
data, err := h.opsService.GetDashboardOverview(c.Request.Context(), timeRange)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to get dashboard overview")
return
}
response.Success(c, data)
}
// GetProviderHealth returns upstream provider health comparison data.
// GET /api/v1/admin/ops/dashboard/providers
//
// Query params:
// - time_range: string (optional; default "1h") one of: 5m, 30m, 1h, 6h, 24h
func (h *OpsHandler) GetProviderHealth(c *gin.Context) {
timeRange := c.Query("time_range")
if timeRange == "" {
timeRange = "1h"
}
switch timeRange {
case "5m", "30m", "1h", "6h", "24h":
default:
response.BadRequest(c, "Invalid time_range (supported: 5m, 30m, 1h, 6h, 24h)")
return
}
providers, err := h.opsService.GetProviderHealth(c.Request.Context(), timeRange)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to get provider health")
return
}
var totalRequests int64
var weightedSuccess float64
var bestProvider string
var worstProvider string
var bestRate float64
var worstRate float64
hasRate := false
for _, p := range providers {
if p == nil {
continue
}
totalRequests += p.RequestCount
weightedSuccess += (p.SuccessRate / 100) * float64(p.RequestCount)
if p.RequestCount <= 0 {
continue
}
if !hasRate {
bestProvider = p.Name
worstProvider = p.Name
bestRate = p.SuccessRate
worstRate = p.SuccessRate
hasRate = true
continue
}
if p.SuccessRate > bestRate {
bestProvider = p.Name
bestRate = p.SuccessRate
}
if p.SuccessRate < worstRate {
worstProvider = p.Name
worstRate = p.SuccessRate
}
}
avgSuccessRate := 0.0
if totalRequests > 0 {
avgSuccessRate = (weightedSuccess / float64(totalRequests)) * 100
avgSuccessRate = math.Round(avgSuccessRate*100) / 100
}
response.Success(c, gin.H{
"providers": providers,
"summary": gin.H{
"total_requests": totalRequests,
"avg_success_rate": avgSuccessRate,
"best_provider": bestProvider,
"worst_provider": worstProvider,
},
})
}
// GetErrorLogs returns a paginated error log list with multi-dimensional filters.
// GET /api/v1/admin/ops/errors
func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
page, pageSize := response.ParsePagination(c)
filter := &service.ErrorLogFilter{
Page: page,
PageSize: pageSize,
}
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
startTime, err := time.Parse(time.RFC3339, startTimeStr)
if err != nil {
response.BadRequest(c, "Invalid start_time format (RFC3339)")
return
}
filter.StartTime = &startTime
}
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
endTime, err := time.Parse(time.RFC3339, endTimeStr)
if err != nil {
response.BadRequest(c, "Invalid end_time format (RFC3339)")
return
}
filter.EndTime = &endTime
}
if filter.StartTime != nil && filter.EndTime != nil && filter.StartTime.After(*filter.EndTime) {
response.BadRequest(c, "Invalid time range: start_time must be <= end_time")
return
}
if errorCodeStr := c.Query("error_code"); errorCodeStr != "" {
code, err := strconv.Atoi(errorCodeStr)
if err != nil || code < 0 {
response.BadRequest(c, "Invalid error_code")
return
}
filter.ErrorCode = &code
}
// Keep both parameter names for compatibility: provider (docs) and platform (legacy).
filter.Provider = c.Query("provider")
if filter.Provider == "" {
filter.Provider = c.Query("platform")
}
if accountIDStr := c.Query("account_id"); accountIDStr != "" {
accountID, err := strconv.ParseInt(accountIDStr, 10, 64)
if err != nil || accountID <= 0 {
response.BadRequest(c, "Invalid account_id")
return
}
filter.AccountID = &accountID
}
out, err := h.opsService.GetErrorLogs(c.Request.Context(), filter)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to get error logs")
return
}
response.Success(c, gin.H{
"errors": out.Errors,
"total": out.Total,
"page": out.Page,
"page_size": out.PageSize,
})
}
// GetLatencyHistogram returns the latency distribution histogram.
// GET /api/v1/admin/ops/dashboard/latency-histogram
func (h *OpsHandler) GetLatencyHistogram(c *gin.Context) {
timeRange := c.Query("time_range")
if timeRange == "" {
timeRange = "1h"
}
buckets, err := h.opsService.GetLatencyHistogram(c.Request.Context(), timeRange)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to get latency histogram")
return
}
totalRequests := int64(0)
for _, b := range buckets {
totalRequests += b.Count
}
response.Success(c, gin.H{
"buckets": buckets,
"total_requests": totalRequests,
"slow_request_threshold": 1000,
})
}
// GetErrorDistribution returns the error distribution.
// GET /api/v1/admin/ops/dashboard/errors/distribution
func (h *OpsHandler) GetErrorDistribution(c *gin.Context) {
timeRange := c.Query("time_range")
if timeRange == "" {
timeRange = "1h"
}
items, err := h.opsService.GetErrorDistribution(c.Request.Context(), timeRange)
if err != nil {
response.Error(c, http.StatusInternalServerError, "Failed to get error distribution")
return
}
response.Success(c, gin.H{
"items": items,
})
}
package admin
import (
"context"
"encoding/json"
"log"
"net"
"net/http"
"net/netip"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
type OpsWSProxyConfig struct {
TrustProxy bool
TrustedProxies []netip.Prefix
OriginPolicy string
}
const (
envOpsWSTrustProxy = "OPS_WS_TRUST_PROXY"
envOpsWSTrustedProxies = "OPS_WS_TRUSTED_PROXIES"
envOpsWSOriginPolicy = "OPS_WS_ORIGIN_POLICY"
)
const (
OriginPolicyStrict = "strict"
OriginPolicyPermissive = "permissive"
)
var opsWSProxyConfig = loadOpsWSProxyConfigFromEnv()
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return isAllowedOpsWSOrigin(r)
},
}
// QPSWSHandler handles realtime QPS push via WebSocket.
// GET /api/v1/admin/ops/ws/qps
func (h *OpsHandler) QPSWSHandler(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
log.Printf("[OpsWS] upgrade failed: %v", err)
return
}
defer func() { _ = conn.Close() }()
// Set pong handler
if err := conn.SetReadDeadline(time.Now().Add(60 * time.Second)); err != nil {
log.Printf("[OpsWS] set read deadline failed: %v", err)
return
}
conn.SetPongHandler(func(string) error {
return conn.SetReadDeadline(time.Now().Add(60 * time.Second))
})
// Push QPS data every 2 seconds
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
// Heartbeat ping every 30 seconds
pingTicker := time.NewTicker(30 * time.Second)
defer pingTicker.Stop()
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
for {
select {
case <-ticker.C:
// Fetch 1m window stats for current QPS
data, err := h.opsService.GetDashboardOverview(ctx, "5m")
if err != nil {
log.Printf("[OpsWS] get overview failed: %v", err)
continue
}
payload := gin.H{
"type": "qps_update",
"timestamp": time.Now().Format(time.RFC3339),
"data": gin.H{
"qps": data.QPS.Current,
"tps": data.TPS.Current,
"request_count": data.Errors.TotalCount + int64(data.QPS.Avg1h*60), // Rough estimate
},
}
msg, _ := json.Marshal(payload)
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
log.Printf("[OpsWS] write failed: %v", err)
return
}
case <-pingTicker.C:
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Printf("[OpsWS] ping failed: %v", err)
return
}
case <-ctx.Done():
return
}
}
}
func isAllowedOpsWSOrigin(r *http.Request) bool {
if r == nil {
return false
}
origin := strings.TrimSpace(r.Header.Get("Origin"))
if origin == "" {
switch strings.ToLower(strings.TrimSpace(opsWSProxyConfig.OriginPolicy)) {
case OriginPolicyStrict:
return false
case OriginPolicyPermissive, "":
return true
default:
return true
}
}
parsed, err := url.Parse(origin)
if err != nil || parsed.Hostname() == "" {
return false
}
originHost := strings.ToLower(parsed.Hostname())
trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r)
reqHost := hostWithoutPort(r.Host)
if trustProxyHeaders {
xfHost := strings.TrimSpace(r.Header.Get("X-Forwarded-Host"))
if xfHost != "" {
xfHost = strings.TrimSpace(strings.Split(xfHost, ",")[0])
if xfHost != "" {
reqHost = hostWithoutPort(xfHost)
}
}
}
reqHost = strings.ToLower(reqHost)
if reqHost == "" {
return false
}
return originHost == reqHost
}
func shouldTrustOpsWSProxyHeaders(r *http.Request) bool {
if r == nil {
return false
}
if !opsWSProxyConfig.TrustProxy {
return false
}
peerIP, ok := requestPeerIP(r)
if !ok {
return false
}
return isAddrInTrustedProxies(peerIP, opsWSProxyConfig.TrustedProxies)
}
func requestPeerIP(r *http.Request) (netip.Addr, bool) {
if r == nil {
return netip.Addr{}, false
}
host, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
if err != nil {
host = strings.TrimSpace(r.RemoteAddr)
}
host = strings.TrimPrefix(host, "[")
host = strings.TrimSuffix(host, "]")
if host == "" {
return netip.Addr{}, false
}
addr, err := netip.ParseAddr(host)
if err != nil {
return netip.Addr{}, false
}
return addr.Unmap(), true
}
func isAddrInTrustedProxies(addr netip.Addr, trusted []netip.Prefix) bool {
if !addr.IsValid() {
return false
}
for _, p := range trusted {
if p.Contains(addr) {
return true
}
}
return false
}
func loadOpsWSProxyConfigFromEnv() OpsWSProxyConfig {
cfg := OpsWSProxyConfig{
TrustProxy: true,
TrustedProxies: defaultTrustedProxies(),
OriginPolicy: OriginPolicyPermissive,
}
if v := strings.TrimSpace(os.Getenv(envOpsWSTrustProxy)); v != "" {
if parsed, err := strconv.ParseBool(v); err == nil {
cfg.TrustProxy = parsed
} else {
log.Printf("[OpsWS] invalid %s=%q (expected bool); using default=%v", envOpsWSTrustProxy, v, cfg.TrustProxy)
}
}
if raw := strings.TrimSpace(os.Getenv(envOpsWSTrustedProxies)); raw != "" {
prefixes, invalid := parseTrustedProxyList(raw)
if len(invalid) > 0 {
log.Printf("[OpsWS] invalid %s entries ignored: %s", envOpsWSTrustedProxies, strings.Join(invalid, ", "))
}
cfg.TrustedProxies = prefixes
}
if v := strings.TrimSpace(os.Getenv(envOpsWSOriginPolicy)); v != "" {
normalized := strings.ToLower(v)
switch normalized {
case OriginPolicyStrict, OriginPolicyPermissive:
cfg.OriginPolicy = normalized
default:
log.Printf("[OpsWS] invalid %s=%q (expected %q or %q); using default=%q", envOpsWSOriginPolicy, v, OriginPolicyStrict, OriginPolicyPermissive, cfg.OriginPolicy)
}
}
return cfg
}
func defaultTrustedProxies() []netip.Prefix {
prefixes, _ := parseTrustedProxyList("127.0.0.0/8,::1/128")
return prefixes
}
func parseTrustedProxyList(raw string) (prefixes []netip.Prefix, invalid []string) {
for _, token := range strings.Split(raw, ",") {
item := strings.TrimSpace(token)
if item == "" {
continue
}
var (
p netip.Prefix
err error
)
if strings.Contains(item, "/") {
p, err = netip.ParsePrefix(item)
} else {
var addr netip.Addr
addr, err = netip.ParseAddr(item)
if err == nil {
addr = addr.Unmap()
bits := 128
if addr.Is4() {
bits = 32
}
p = netip.PrefixFrom(addr, bits)
}
}
if err != nil || !p.IsValid() {
invalid = append(invalid, item)
continue
}
prefixes = append(prefixes, p.Masked())
}
return prefixes, invalid
}
func hostWithoutPort(hostport string) string {
hostport = strings.TrimSpace(hostport)
if hostport == "" {
return ""
}
if host, _, err := net.SplitHostPort(hostport); err == nil {
return host
}
if strings.HasPrefix(hostport, "[") && strings.HasSuffix(hostport, "]") {
return strings.Trim(hostport, "[]")
}
parts := strings.Split(hostport, ":")
return parts[0]
}
package admin
import (
"net/http"
"net/netip"
"testing"
)
func TestIsAllowedOpsWSOrigin_AllowsEmptyOrigin(t *testing.T) {
original := opsWSProxyConfig
t.Cleanup(func() { opsWSProxyConfig = original })
opsWSProxyConfig = OpsWSProxyConfig{OriginPolicy: OriginPolicyPermissive}
req, err := http.NewRequest(http.MethodGet, "http://example.test", nil)
if err != nil {
t.Fatalf("NewRequest: %v", err)
}
if !isAllowedOpsWSOrigin(req) {
t.Fatalf("expected empty Origin to be allowed")
}
}
func TestIsAllowedOpsWSOrigin_RejectsEmptyOrigin_WhenStrict(t *testing.T) {
original := opsWSProxyConfig
t.Cleanup(func() { opsWSProxyConfig = original })
opsWSProxyConfig = OpsWSProxyConfig{OriginPolicy: OriginPolicyStrict}
req, err := http.NewRequest(http.MethodGet, "http://example.test", nil)
if err != nil {
t.Fatalf("NewRequest: %v", err)
}
if isAllowedOpsWSOrigin(req) {
t.Fatalf("expected empty Origin to be rejected under strict policy")
}
}
func TestIsAllowedOpsWSOrigin_UsesXForwardedHostOnlyFromTrustedProxy(t *testing.T) {
original := opsWSProxyConfig
t.Cleanup(func() { opsWSProxyConfig = original })
opsWSProxyConfig = OpsWSProxyConfig{
TrustProxy: true,
TrustedProxies: []netip.Prefix{
netip.MustParsePrefix("127.0.0.0/8"),
},
}
// Untrusted peer: ignore X-Forwarded-Host and compare against r.Host.
{
req, err := http.NewRequest(http.MethodGet, "http://internal.service.local", nil)
if err != nil {
t.Fatalf("NewRequest: %v", err)
}
req.RemoteAddr = "192.0.2.1:12345"
req.Host = "internal.service.local"
req.Header.Set("Origin", "https://public.example.com")
req.Header.Set("X-Forwarded-Host", "public.example.com")
if isAllowedOpsWSOrigin(req) {
t.Fatalf("expected Origin to be rejected when peer is not a trusted proxy")
}
}
// Trusted peer: allow X-Forwarded-Host to participate in Origin validation.
{
req, err := http.NewRequest(http.MethodGet, "http://internal.service.local", nil)
if err != nil {
t.Fatalf("NewRequest: %v", err)
}
req.RemoteAddr = "127.0.0.1:23456"
req.Host = "internal.service.local"
req.Header.Set("Origin", "https://public.example.com")
req.Header.Set("X-Forwarded-Host", "public.example.com")
if !isAllowedOpsWSOrigin(req) {
t.Fatalf("expected Origin to be accepted when peer is a trusted proxy")
}
}
}
func TestLoadOpsWSProxyConfigFromEnv_OriginPolicy(t *testing.T) {
t.Setenv(envOpsWSOriginPolicy, "STRICT")
cfg := loadOpsWSProxyConfigFromEnv()
if cfg.OriginPolicy != OriginPolicyStrict {
t.Fatalf("OriginPolicy=%q, want %q", cfg.OriginPolicy, OriginPolicyStrict)
}
}
func TestLoadOpsWSProxyConfigFromEnv_OriginPolicyInvalidUsesDefault(t *testing.T) {
t.Setenv(envOpsWSOriginPolicy, "nope")
cfg := loadOpsWSProxyConfigFromEnv()
if cfg.OriginPolicy != OriginPolicyPermissive {
t.Fatalf("OriginPolicy=%q, want %q", cfg.OriginPolicy, OriginPolicyPermissive)
}
}
func TestParseTrustedProxyList(t *testing.T) {
prefixes, invalid := parseTrustedProxyList("10.0.0.1, 10.0.0.0/8, bad, ::1/128")
if len(prefixes) != 3 {
t.Fatalf("prefixes=%d, want 3", len(prefixes))
}
if len(invalid) != 1 || invalid[0] != "bad" {
t.Fatalf("invalid=%v, want [bad]", invalid)
}
}
func TestRequestPeerIP_ParsesIPv6(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "http://example.test", nil)
if err != nil {
t.Fatalf("NewRequest: %v", err)
}
req.RemoteAddr = "[::1]:1234"
addr, ok := requestPeerIP(req)
if !ok {
t.Fatalf("expected IPv6 peer IP to parse")
}
if addr != netip.MustParseAddr("::1") {
t.Fatalf("addr=%s, want ::1", addr)
}
}
package handler
import (
"context"
"strings"
"sync"
"time"
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/gin-gonic/gin"
)
const (
opsModelKey = "ops_model"
opsStreamKey = "ops_stream"
)
const (
opsErrorLogWorkerCount = 10
opsErrorLogQueueSize = 256
opsErrorLogTimeout = 2 * time.Second
)
type opsErrorLogJob struct {
ops *service.OpsService
entry *service.OpsErrorLog
}
var (
opsErrorLogOnce sync.Once
opsErrorLogQueue chan opsErrorLogJob
)
func startOpsErrorLogWorkers() {
opsErrorLogQueue = make(chan opsErrorLogJob, opsErrorLogQueueSize)
for i := 0; i < opsErrorLogWorkerCount; i++ {
go func() {
for job := range opsErrorLogQueue {
if job.ops == nil || job.entry == nil {
continue
}
ctx, cancel := context.WithTimeout(context.Background(), opsErrorLogTimeout)
_ = job.ops.RecordError(ctx, job.entry)
cancel()
}
}()
}
}
func enqueueOpsErrorLog(ops *service.OpsService, entry *service.OpsErrorLog) {
if ops == nil || entry == nil {
return
}
opsErrorLogOnce.Do(startOpsErrorLogWorkers)
select {
case opsErrorLogQueue <- opsErrorLogJob{ops: ops, entry: entry}:
default:
// Queue is full; drop to avoid blocking request handling.
}
}
func setOpsRequestContext(c *gin.Context, model string, stream bool) {
c.Set(opsModelKey, model)
c.Set(opsStreamKey, stream)
}
func recordOpsError(c *gin.Context, ops *service.OpsService, status int, errType, message, fallbackPlatform string) {
if ops == nil || c == nil {
return
}
model, _ := c.Get(opsModelKey)
stream, _ := c.Get(opsStreamKey)
var modelName string
if m, ok := model.(string); ok {
modelName = m
}
streaming, _ := stream.(bool)
apiKey, _ := middleware2.GetAPIKeyFromContext(c)
logEntry := &service.OpsErrorLog{
Phase: classifyOpsPhase(errType, message),
Type: errType,
Severity: classifyOpsSeverity(errType, status),
StatusCode: status,
Platform: resolveOpsPlatform(apiKey, fallbackPlatform),
Model: modelName,
RequestID: c.Writer.Header().Get("x-request-id"),
Message: message,
ClientIP: c.ClientIP(),
RequestPath: func() string {
if c.Request != nil && c.Request.URL != nil {
return c.Request.URL.Path
}
return ""
}(),
Stream: streaming,
}
if apiKey != nil {
logEntry.APIKeyID = &apiKey.ID
if apiKey.User != nil {
logEntry.UserID = &apiKey.User.ID
}
if apiKey.GroupID != nil {
logEntry.GroupID = apiKey.GroupID
}
}
enqueueOpsErrorLog(ops, logEntry)
}
func resolveOpsPlatform(apiKey *service.APIKey, fallback string) string {
if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform != "" {
return apiKey.Group.Platform
}
return fallback
}
func classifyOpsPhase(errType, message string) string {
msg := strings.ToLower(message)
switch errType {
case "authentication_error":
return "auth"
case "billing_error", "subscription_error":
return "billing"
case "rate_limit_error":
if strings.Contains(msg, "concurrency") || strings.Contains(msg, "pending") {
return "concurrency"
}
return "upstream"
case "invalid_request_error":
return "response"
case "upstream_error", "overloaded_error":
return "upstream"
case "api_error":
if strings.Contains(msg, "no available accounts") {
return "scheduling"
}
return "internal"
default:
return "internal"
}
}
func classifyOpsSeverity(errType string, status int) string {
switch errType {
case "invalid_request_error", "authentication_error", "billing_error", "subscription_error":
return "P3"
}
if status >= 500 {
return "P1"
}
if status == 429 {
return "P1"
}
if status >= 400 {
return "P2"
}
return "P3"
}
package repository
import (
"context"
"database/sql"
"fmt"
"strconv"
"strings"
"time"
"github.com/Wei-Shaw/sub2api/internal/service"
)
// ListErrorLogs queries ops_error_logs with optional filters and pagination.
// It returns the list items and the total count of matching rows.
func (r *OpsRepository) ListErrorLogs(ctx context.Context, filter *service.ErrorLogFilter) ([]*service.ErrorLog, int64, error) {
page := 1
pageSize := 20
if filter != nil {
if filter.Page > 0 {
page = filter.Page
}
if filter.PageSize > 0 {
pageSize = filter.PageSize
}
}
if pageSize > 100 {
pageSize = 100
}
offset := (page - 1) * pageSize
conditions := make([]string, 0)
args := make([]any, 0)
addCondition := func(condition string, values ...any) {
conditions = append(conditions, condition)
args = append(args, values...)
}
if filter != nil {
// 默认查询最近 24 小时
if filter.StartTime == nil && filter.EndTime == nil {
defaultStart := time.Now().Add(-24 * time.Hour)
filter.StartTime = &defaultStart
}
if filter.StartTime != nil {
addCondition(fmt.Sprintf("created_at >= $%d", len(args)+1), *filter.StartTime)
}
if filter.EndTime != nil {
addCondition(fmt.Sprintf("created_at <= $%d", len(args)+1), *filter.EndTime)
}
if filter.ErrorCode != nil {
addCondition(fmt.Sprintf("status_code = $%d", len(args)+1), *filter.ErrorCode)
}
if provider := strings.TrimSpace(filter.Provider); provider != "" {
addCondition(fmt.Sprintf("platform = $%d", len(args)+1), provider)
}
if filter.AccountID != nil {
addCondition(fmt.Sprintf("account_id = $%d", len(args)+1), *filter.AccountID)
}
}
where := ""
if len(conditions) > 0 {
where = "WHERE " + strings.Join(conditions, " AND ")
}
countQuery := fmt.Sprintf(`SELECT COUNT(1) FROM ops_error_logs %s`, where)
var total int64
if err := scanSingleRow(ctx, r.sql, countQuery, args, &total); err != nil {
if err == sql.ErrNoRows {
total = 0
} else {
return nil, 0, err
}
}
listQuery := fmt.Sprintf(`
SELECT
id,
created_at,
severity,
request_id,
account_id,
request_path,
platform,
model,
status_code,
error_message,
duration_ms,
retry_count,
stream
FROM ops_error_logs
%s
ORDER BY created_at DESC
LIMIT $%d OFFSET $%d
`, where, len(args)+1, len(args)+2)
listArgs := append(append([]any{}, args...), pageSize, offset)
rows, err := r.sql.QueryContext(ctx, listQuery, listArgs...)
if err != nil {
return nil, 0, err
}
defer func() { _ = rows.Close() }()
results := make([]*service.ErrorLog, 0)
for rows.Next() {
var (
id int64
createdAt time.Time
severity sql.NullString
requestID sql.NullString
accountID sql.NullInt64
requestURI sql.NullString
platform sql.NullString
model sql.NullString
statusCode sql.NullInt64
message sql.NullString
durationMs sql.NullInt64
retryCount sql.NullInt64
stream sql.NullBool
)
if err := rows.Scan(
&id,
&createdAt,
&severity,
&requestID,
&accountID,
&requestURI,
&platform,
&model,
&statusCode,
&message,
&durationMs,
&retryCount,
&stream,
); err != nil {
return nil, 0, err
}
entry := &service.ErrorLog{
ID: id,
Timestamp: createdAt,
Level: levelFromSeverity(severity.String),
RequestID: requestID.String,
APIPath: requestURI.String,
Provider: platform.String,
Model: model.String,
HTTPCode: int(statusCode.Int64),
Stream: stream.Bool,
}
if accountID.Valid {
entry.AccountID = strconv.FormatInt(accountID.Int64, 10)
}
if message.Valid {
entry.ErrorMessage = message.String
}
if durationMs.Valid {
v := int(durationMs.Int64)
entry.DurationMs = &v
}
if retryCount.Valid {
v := int(retryCount.Int64)
entry.RetryCount = &v
}
results = append(results, entry)
}
if err := rows.Err(); err != nil {
return nil, 0, err
}
return results, total, nil
}
func levelFromSeverity(severity string) string {
sev := strings.ToUpper(strings.TrimSpace(severity))
switch sev {
case "P0", "P1":
return "CRITICAL"
case "P2":
return "ERROR"
case "P3":
return "WARN"
default:
return "ERROR"
}
}
package repository
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/redis/go-redis/v9"
)
const (
opsLatestMetricsKey = "ops:metrics:latest"
opsDashboardOverviewKeyPrefix = "ops:dashboard:overview:"
opsLatestMetricsTTL = 10 * time.Second
)
func (r *OpsRepository) GetCachedLatestSystemMetric(ctx context.Context) (*service.OpsMetrics, error) {
if ctx == nil {
ctx = context.Background()
}
if r == nil || r.rdb == nil {
return nil, nil
}
data, err := r.rdb.Get(ctx, opsLatestMetricsKey).Bytes()
if errors.Is(err, redis.Nil) {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("redis get cached latest system metric: %w", err)
}
var metric service.OpsMetrics
if err := json.Unmarshal(data, &metric); err != nil {
return nil, fmt.Errorf("unmarshal cached latest system metric: %w", err)
}
return &metric, nil
}
func (r *OpsRepository) SetCachedLatestSystemMetric(ctx context.Context, metric *service.OpsMetrics) error {
if metric == nil {
return nil
}
if ctx == nil {
ctx = context.Background()
}
if r == nil || r.rdb == nil {
return nil
}
data, err := json.Marshal(metric)
if err != nil {
return fmt.Errorf("marshal cached latest system metric: %w", err)
}
return r.rdb.Set(ctx, opsLatestMetricsKey, data, opsLatestMetricsTTL).Err()
}
func (r *OpsRepository) GetCachedDashboardOverview(ctx context.Context, timeRange string) (*service.DashboardOverviewData, error) {
if ctx == nil {
ctx = context.Background()
}
if r == nil || r.rdb == nil {
return nil, nil
}
rangeKey := strings.TrimSpace(timeRange)
if rangeKey == "" {
rangeKey = "1h"
}
key := opsDashboardOverviewKeyPrefix + rangeKey
data, err := r.rdb.Get(ctx, key).Bytes()
if errors.Is(err, redis.Nil) {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("redis get cached dashboard overview: %w", err)
}
var overview service.DashboardOverviewData
if err := json.Unmarshal(data, &overview); err != nil {
return nil, fmt.Errorf("unmarshal cached dashboard overview: %w", err)
}
return &overview, nil
}
func (r *OpsRepository) SetCachedDashboardOverview(ctx context.Context, timeRange string, data *service.DashboardOverviewData, ttl time.Duration) error {
if data == nil {
return nil
}
if ttl <= 0 {
ttl = 10 * time.Second
}
if ctx == nil {
ctx = context.Background()
}
if r == nil || r.rdb == nil {
return nil
}
rangeKey := strings.TrimSpace(timeRange)
if rangeKey == "" {
rangeKey = "1h"
}
payload, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("marshal cached dashboard overview: %w", err)
}
key := opsDashboardOverviewKeyPrefix + rangeKey
return r.rdb.Set(ctx, key, payload, ttl).Err()
}
func (r *OpsRepository) PingRedis(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
if r == nil || r.rdb == nil {
return errors.New("redis client is nil")
}
return r.rdb.Ping(ctx).Err()
}
This diff is collapsed.
package middleware
import (
"context"
"sync"
"time"
"github.com/Wei-Shaw/sub2api/internal/service"
)
const (
opsAuthErrorLogWorkerCount = 10
opsAuthErrorLogQueueSize = 256
opsAuthErrorLogTimeout = 2 * time.Second
)
type opsAuthErrorLogJob struct {
ops *service.OpsService
entry *service.OpsErrorLog
}
var (
opsAuthErrorLogOnce sync.Once
opsAuthErrorLogQueue chan opsAuthErrorLogJob
)
func startOpsAuthErrorLogWorkers() {
opsAuthErrorLogQueue = make(chan opsAuthErrorLogJob, opsAuthErrorLogQueueSize)
for i := 0; i < opsAuthErrorLogWorkerCount; i++ {
go func() {
for job := range opsAuthErrorLogQueue {
if job.ops == nil || job.entry == nil {
continue
}
ctx, cancel := context.WithTimeout(context.Background(), opsAuthErrorLogTimeout)
_ = job.ops.RecordError(ctx, job.entry)
cancel()
}
}()
}
}
func enqueueOpsAuthErrorLog(ops *service.OpsService, entry *service.OpsErrorLog) {
if ops == nil || entry == nil {
return
}
opsAuthErrorLogOnce.Do(startOpsAuthErrorLogWorkers)
select {
case opsAuthErrorLogQueue <- opsAuthErrorLogJob{ops: ops, entry: entry}:
default:
// Queue is full; drop to avoid blocking request handling.
}
}
package service
import (
"context"
"time"
)
// ErrorLog represents an ops error log item for list queries.
//
// Field naming matches docs/API-运维监控中心2.0.md (L3 根因追踪 - 错误日志列表).
type ErrorLog struct {
ID int64 `json:"id"`
Timestamp time.Time `json:"timestamp"`
Level string `json:"level,omitempty"`
RequestID string `json:"request_id,omitempty"`
AccountID string `json:"account_id,omitempty"`
APIPath string `json:"api_path,omitempty"`
Provider string `json:"provider,omitempty"`
Model string `json:"model,omitempty"`
HTTPCode int `json:"http_code,omitempty"`
ErrorMessage string `json:"error_message,omitempty"`
DurationMs *int `json:"duration_ms,omitempty"`
RetryCount *int `json:"retry_count,omitempty"`
Stream bool `json:"stream,omitempty"`
}
// ErrorLogFilter describes optional filters and pagination for listing ops error logs.
type ErrorLogFilter struct {
StartTime *time.Time
EndTime *time.Time
ErrorCode *int
Provider string
AccountID *int64
Page int
PageSize int
}
func (f *ErrorLogFilter) normalize() (page, pageSize int) {
page = 1
pageSize = 20
if f == nil {
return page, pageSize
}
if f.Page > 0 {
page = f.Page
}
if f.PageSize > 0 {
pageSize = f.PageSize
}
if pageSize > 100 {
pageSize = 100
}
return page, pageSize
}
type ErrorLogListResponse struct {
Errors []*ErrorLog `json:"errors"`
Total int64 `json:"total"`
Page int `json:"page"`
PageSize int `json:"page_size"`
}
func (s *OpsService) GetErrorLogs(ctx context.Context, filter *ErrorLogFilter) (*ErrorLogListResponse, error) {
if s == nil || s.repo == nil {
return &ErrorLogListResponse{
Errors: []*ErrorLog{},
Total: 0,
Page: 1,
PageSize: 20,
}, nil
}
page, pageSize := filter.normalize()
if filter == nil {
filter = &ErrorLogFilter{}
}
filter.Page = page
filter.PageSize = pageSize
items, total, err := s.repo.ListErrorLogs(ctx, filter)
if err != nil {
return nil, err
}
if items == nil {
items = []*ErrorLog{}
}
return &ErrorLogListResponse{
Errors: items,
Total: total,
Page: page,
PageSize: pageSize,
}, nil
}
This diff is collapsed.
//go:build integration
package service
import (
"context"
"database/sql"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// This integration test protects the DI startup contract for OpsAlertService.
//
// Background:
// - OpsMetricsCollector previously called alertService.Start()/Evaluate() directly.
// - Those direct calls were removed, so OpsAlertService must now start via DI
// (ProvideOpsAlertService in wire.go) and run its own evaluation ticker.
//
// What we validate here:
// 1. When we construct via the Wire provider functions (ProvideOpsAlertService +
// ProvideOpsMetricsCollector), OpsAlertService starts automatically.
// 2. Its evaluation loop continues to tick even if OpsMetricsCollector is stopped,
// proving the alert evaluator is independent.
// 3. The evaluation path can trigger alert logic (CreateAlertEvent called).
func TestOpsAlertService_StartedViaWireProviders_RunsIndependentTicker(t *testing.T) {
oldInterval := opsAlertEvalInterval
opsAlertEvalInterval = 25 * time.Millisecond
t.Cleanup(func() { opsAlertEvalInterval = oldInterval })
repo := newFakeOpsRepository()
opsService := NewOpsService(repo, nil)
// Start via the Wire provider function (the production DI path).
alertService := ProvideOpsAlertService(opsService, nil, nil)
t.Cleanup(alertService.Stop)
// Construct via ProvideOpsMetricsCollector (wire.go). Stop immediately to ensure
// the alert ticker keeps running without the metrics collector.
collector := ProvideOpsMetricsCollector(opsService, NewConcurrencyService(nil))
collector.Stop()
// Wait for at least one evaluation (run() calls evaluateOnce immediately).
require.Eventually(t, func() bool {
return repo.listRulesCalls.Load() >= 1
}, 1*time.Second, 5*time.Millisecond)
// Confirm the evaluation loop keeps ticking after the metrics collector is stopped.
callsAfterCollectorStop := repo.listRulesCalls.Load()
require.Eventually(t, func() bool {
return repo.listRulesCalls.Load() >= callsAfterCollectorStop+2
}, 1*time.Second, 5*time.Millisecond)
// Confirm the evaluation logic actually fires an alert event at least once.
select {
case <-repo.eventCreatedCh:
// ok
case <-time.After(2 * time.Second):
t.Fatalf("expected OpsAlertService to create an alert event, but none was created (ListAlertRules calls=%d)", repo.listRulesCalls.Load())
}
}
func newFakeOpsRepository() *fakeOpsRepository {
return &fakeOpsRepository{
eventCreatedCh: make(chan struct{}),
}
}
// fakeOpsRepository is a lightweight in-memory stub of OpsRepository for integration tests.
// It avoids real DB/Redis usage and provides deterministic responses fast.
type fakeOpsRepository struct {
listRulesCalls atomic.Int64
mu sync.Mutex
activeEvent *OpsAlertEvent
latestEvent *OpsAlertEvent
nextEventID int64
eventCreatedCh chan struct{}
eventOnce sync.Once
}
func (r *fakeOpsRepository) CreateErrorLog(ctx context.Context, log *OpsErrorLog) error {
return nil
}
func (r *fakeOpsRepository) ListErrorLogsLegacy(ctx context.Context, filters OpsErrorLogFilters) ([]OpsErrorLog, error) {
return nil, nil
}
func (r *fakeOpsRepository) ListErrorLogs(ctx context.Context, filter *ErrorLogFilter) ([]*ErrorLog, int64, error) {
return nil, 0, nil
}
func (r *fakeOpsRepository) GetLatestSystemMetric(ctx context.Context) (*OpsMetrics, error) {
return &OpsMetrics{WindowMinutes: 1}, sql.ErrNoRows
}
func (r *fakeOpsRepository) CreateSystemMetric(ctx context.Context, metric *OpsMetrics) error {
return nil
}
func (r *fakeOpsRepository) GetWindowStats(ctx context.Context, startTime, endTime time.Time) (*OpsWindowStats, error) {
return &OpsWindowStats{}, nil
}
func (r *fakeOpsRepository) GetProviderStats(ctx context.Context, startTime, endTime time.Time) ([]*ProviderStats, error) {
return nil, nil
}
func (r *fakeOpsRepository) GetLatencyHistogram(ctx context.Context, startTime, endTime time.Time) ([]*LatencyHistogramItem, error) {
return nil, nil
}
func (r *fakeOpsRepository) GetErrorDistribution(ctx context.Context, startTime, endTime time.Time) ([]*ErrorDistributionItem, error) {
return nil, nil
}
func (r *fakeOpsRepository) ListRecentSystemMetrics(ctx context.Context, windowMinutes, limit int) ([]OpsMetrics, error) {
if limit <= 0 {
limit = 1
}
now := time.Now()
metrics := make([]OpsMetrics, 0, limit)
for i := 0; i < limit; i++ {
metrics = append(metrics, OpsMetrics{
WindowMinutes: windowMinutes,
CPUUsagePercent: 99,
UpdatedAt: now.Add(-time.Duration(i) * opsMetricsInterval),
})
}
return metrics, nil
}
func (r *fakeOpsRepository) ListSystemMetricsRange(ctx context.Context, windowMinutes int, startTime, endTime time.Time, limit int) ([]OpsMetrics, error) {
return nil, nil
}
func (r *fakeOpsRepository) ListAlertRules(ctx context.Context) ([]OpsAlertRule, error) {
call := r.listRulesCalls.Add(1)
// Delay enabling rules slightly so the test can stop OpsMetricsCollector first,
// then observe the alert evaluator ticking independently.
if call < 5 {
return nil, nil
}
return []OpsAlertRule{
{
ID: 1,
Name: "cpu too high (test)",
Enabled: true,
MetricType: OpsMetricCPUUsagePercent,
Operator: ">",
Threshold: 0,
WindowMinutes: 1,
SustainedMinutes: 1,
Severity: "P1",
NotifyEmail: false,
NotifyWebhook: false,
CooldownMinutes: 0,
},
}, nil
}
func (r *fakeOpsRepository) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.activeEvent == nil {
return nil, nil
}
if r.activeEvent.RuleID != ruleID {
return nil, nil
}
if r.activeEvent.Status != OpsAlertStatusFiring {
return nil, nil
}
clone := *r.activeEvent
return &clone, nil
}
func (r *fakeOpsRepository) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.latestEvent == nil || r.latestEvent.RuleID != ruleID {
return nil, nil
}
clone := *r.latestEvent
return &clone, nil
}
func (r *fakeOpsRepository) CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) error {
if event == nil {
return nil
}
r.mu.Lock()
defer r.mu.Unlock()
r.nextEventID++
event.ID = r.nextEventID
clone := *event
r.latestEvent = &clone
if clone.Status == OpsAlertStatusFiring {
r.activeEvent = &clone
}
r.eventOnce.Do(func() { close(r.eventCreatedCh) })
return nil
}
func (r *fakeOpsRepository) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.activeEvent != nil && r.activeEvent.ID == eventID {
r.activeEvent.Status = status
r.activeEvent.ResolvedAt = resolvedAt
}
if r.latestEvent != nil && r.latestEvent.ID == eventID {
r.latestEvent.Status = status
r.latestEvent.ResolvedAt = resolvedAt
}
return nil
}
func (r *fakeOpsRepository) UpdateAlertEventNotifications(ctx context.Context, eventID int64, emailSent, webhookSent bool) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.activeEvent != nil && r.activeEvent.ID == eventID {
r.activeEvent.EmailSent = emailSent
r.activeEvent.WebhookSent = webhookSent
}
if r.latestEvent != nil && r.latestEvent.ID == eventID {
r.latestEvent.EmailSent = emailSent
r.latestEvent.WebhookSent = webhookSent
}
return nil
}
func (r *fakeOpsRepository) CountActiveAlerts(ctx context.Context) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.activeEvent == nil {
return 0, nil
}
return 1, nil
}
func (r *fakeOpsRepository) GetOverviewStats(ctx context.Context, startTime, endTime time.Time) (*OverviewStats, error) {
return &OverviewStats{}, nil
}
func (r *fakeOpsRepository) GetCachedLatestSystemMetric(ctx context.Context) (*OpsMetrics, error) {
return nil, nil
}
func (r *fakeOpsRepository) SetCachedLatestSystemMetric(ctx context.Context, metric *OpsMetrics) error {
return nil
}
func (r *fakeOpsRepository) GetCachedDashboardOverview(ctx context.Context, timeRange string) (*DashboardOverviewData, error) {
return nil, nil
}
func (r *fakeOpsRepository) SetCachedDashboardOverview(ctx context.Context, timeRange string, data *DashboardOverviewData, ttl time.Duration) error {
return nil
}
func (r *fakeOpsRepository) PingRedis(ctx context.Context) error {
return nil
}
//go:build unit || opsalert_unit
package service
import (
"context"
"errors"
"net"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestSelectContiguousMetrics_Contiguous(t *testing.T) {
now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
metrics := []OpsMetrics{
{UpdatedAt: now},
{UpdatedAt: now.Add(-1 * time.Minute)},
{UpdatedAt: now.Add(-2 * time.Minute)},
}
selected, ok := selectContiguousMetrics(metrics, 3, now)
require.True(t, ok)
require.Len(t, selected, 3)
}
func TestSelectContiguousMetrics_GapFails(t *testing.T) {
now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
metrics := []OpsMetrics{
{UpdatedAt: now},
// Missing the -1m sample (gap ~=2m).
{UpdatedAt: now.Add(-2 * time.Minute)},
{UpdatedAt: now.Add(-3 * time.Minute)},
}
_, ok := selectContiguousMetrics(metrics, 3, now)
require.False(t, ok)
}
func TestSelectContiguousMetrics_StaleNewestFails(t *testing.T) {
now := time.Date(2026, 1, 1, 0, 10, 0, 0, time.UTC)
metrics := []OpsMetrics{
{UpdatedAt: now.Add(-10 * time.Minute)},
{UpdatedAt: now.Add(-11 * time.Minute)},
}
_, ok := selectContiguousMetrics(metrics, 2, now)
require.False(t, ok)
}
func TestMetricValue_SuccessRate_NoTrafficIsNoData(t *testing.T) {
metric := OpsMetrics{
RequestCount: 0,
SuccessRate: 0,
}
value, ok := metricValue(metric, OpsMetricSuccessRate)
require.False(t, ok)
require.Equal(t, 0.0, value)
}
func TestOpsAlertService_StopWithoutStart_NoPanic(t *testing.T) {
s := NewOpsAlertService(nil, nil, nil)
require.NotPanics(t, func() { s.Stop() })
}
func TestOpsAlertService_StartStop_Graceful(t *testing.T) {
s := NewOpsAlertService(nil, nil, nil)
s.interval = 5 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s.StartWithContext(ctx)
done := make(chan struct{})
go func() {
s.Stop()
close(done)
}()
select {
case <-done:
// ok
case <-time.After(1 * time.Second):
t.Fatal("Stop did not return; background goroutine likely stuck")
}
require.NotPanics(t, func() { s.Stop() })
}
func TestBuildWebhookHTTPClient_DefaultTimeout(t *testing.T) {
client := buildWebhookHTTPClient(nil, nil)
require.Equal(t, webhookHTTPClientTimeout, client.Timeout)
require.NotNil(t, client.CheckRedirect)
require.ErrorIs(t, client.CheckRedirect(nil, nil), http.ErrUseLastResponse)
base := &http.Client{}
client = buildWebhookHTTPClient(base, nil)
require.Equal(t, webhookHTTPClientTimeout, client.Timeout)
require.NotNil(t, client.CheckRedirect)
base = &http.Client{Timeout: 2 * time.Second}
client = buildWebhookHTTPClient(base, nil)
require.Equal(t, 2*time.Second, client.Timeout)
require.NotNil(t, client.CheckRedirect)
}
func TestValidateWebhookURL_RequiresHTTPS(t *testing.T) {
oldLookup := lookupIPAddrs
t.Cleanup(func() { lookupIPAddrs = oldLookup })
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
}
_, err := validateWebhookURL(context.Background(), "http://example.com/webhook")
require.Error(t, err)
}
func TestValidateWebhookURL_InvalidFormatRejected(t *testing.T) {
_, err := validateWebhookURL(context.Background(), "https://[::1")
require.Error(t, err)
}
func TestValidateWebhookURL_RejectsUserinfo(t *testing.T) {
oldLookup := lookupIPAddrs
t.Cleanup(func() { lookupIPAddrs = oldLookup })
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
}
_, err := validateWebhookURL(context.Background(), "https://user:pass@example.com/webhook")
require.Error(t, err)
}
func TestValidateWebhookURL_RejectsLocalhost(t *testing.T) {
_, err := validateWebhookURL(context.Background(), "https://localhost/webhook")
require.Error(t, err)
}
func TestValidateWebhookURL_RejectsPrivateIPLiteral(t *testing.T) {
cases := []string{
"https://0.0.0.0/webhook",
"https://127.0.0.1/webhook",
"https://10.0.0.1/webhook",
"https://192.168.1.2/webhook",
"https://172.16.0.1/webhook",
"https://172.31.255.255/webhook",
"https://100.64.0.1/webhook",
"https://169.254.169.254/webhook",
"https://198.18.0.1/webhook",
"https://224.0.0.1/webhook",
"https://240.0.0.1/webhook",
"https://[::]/webhook",
"https://[::1]/webhook",
"https://[ff02::1]/webhook",
}
for _, tc := range cases {
t.Run(tc, func(t *testing.T) {
_, err := validateWebhookURL(context.Background(), tc)
require.Error(t, err)
})
}
}
func TestValidateWebhookURL_RejectsPrivateIPViaDNS(t *testing.T) {
oldLookup := lookupIPAddrs
t.Cleanup(func() { lookupIPAddrs = oldLookup })
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
require.Equal(t, "internal.example", host)
return []net.IPAddr{{IP: net.ParseIP("10.0.0.2")}}, nil
}
_, err := validateWebhookURL(context.Background(), "https://internal.example/webhook")
require.Error(t, err)
}
func TestValidateWebhookURL_RejectsLinkLocalIPViaDNS(t *testing.T) {
oldLookup := lookupIPAddrs
t.Cleanup(func() { lookupIPAddrs = oldLookup })
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
require.Equal(t, "metadata.example", host)
return []net.IPAddr{{IP: net.ParseIP("169.254.169.254")}}, nil
}
_, err := validateWebhookURL(context.Background(), "https://metadata.example/webhook")
require.Error(t, err)
}
func TestValidateWebhookURL_AllowsPublicHostViaDNS(t *testing.T) {
oldLookup := lookupIPAddrs
t.Cleanup(func() { lookupIPAddrs = oldLookup })
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
require.Equal(t, "example.com", host)
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
}
target, err := validateWebhookURL(context.Background(), "https://example.com:443/webhook")
require.NoError(t, err)
require.Equal(t, "https", target.URL.Scheme)
require.Equal(t, "example.com", target.URL.Hostname())
require.Equal(t, "443", target.URL.Port())
}
func TestValidateWebhookURL_RejectsInvalidPort(t *testing.T) {
oldLookup := lookupIPAddrs
t.Cleanup(func() { lookupIPAddrs = oldLookup })
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
}
_, err := validateWebhookURL(context.Background(), "https://example.com:99999/webhook")
require.Error(t, err)
}
func TestWebhookTransport_UsesPinnedIP_NoDNSRebinding(t *testing.T) {
oldLookup := lookupIPAddrs
oldDial := webhookBaseDialContext
t.Cleanup(func() {
lookupIPAddrs = oldLookup
webhookBaseDialContext = oldDial
})
lookupCalls := 0
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
lookupCalls++
require.Equal(t, "example.com", host)
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
}
target, err := validateWebhookURL(context.Background(), "https://example.com/webhook")
require.NoError(t, err)
require.Equal(t, 1, lookupCalls)
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
lookupCalls++
return []net.IPAddr{{IP: net.ParseIP("10.0.0.1")}}, nil
}
var dialAddrs []string
webhookBaseDialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
dialAddrs = append(dialAddrs, addr)
return nil, errors.New("dial blocked in test")
}
client := buildWebhookHTTPClient(nil, target)
transport, ok := client.Transport.(*http.Transport)
require.True(t, ok)
_, err = transport.DialContext(context.Background(), "tcp", "example.com:443")
require.Error(t, err)
require.Equal(t, []string{"93.184.216.34:443"}, dialAddrs)
require.Equal(t, 1, lookupCalls, "dial path must not re-resolve DNS")
}
func TestRetryWithBackoff_SucceedsAfterRetries(t *testing.T) {
oldSleep := opsAlertSleep
t.Cleanup(func() { opsAlertSleep = oldSleep })
var slept []time.Duration
opsAlertSleep = func(ctx context.Context, d time.Duration) error {
slept = append(slept, d)
return nil
}
attempts := 0
err := retryWithBackoff(
context.Background(),
3,
[]time.Duration{time.Second, 2 * time.Second, 4 * time.Second},
func() error {
attempts++
if attempts <= 3 {
return errors.New("send failed")
}
return nil
},
nil,
)
require.NoError(t, err)
require.Equal(t, 4, attempts)
require.Equal(t, []time.Duration{time.Second, 2 * time.Second, 4 * time.Second}, slept)
}
func TestRetryWithBackoff_ContextCanceledStopsRetries(t *testing.T) {
oldSleep := opsAlertSleep
t.Cleanup(func() { opsAlertSleep = oldSleep })
var slept []time.Duration
opsAlertSleep = func(ctx context.Context, d time.Duration) error {
slept = append(slept, d)
return ctx.Err()
}
ctx, cancel := context.WithCancel(context.Background())
attempts := 0
err := retryWithBackoff(
ctx,
3,
[]time.Duration{time.Second, 2 * time.Second, 4 * time.Second},
func() error {
attempts++
return errors.New("send failed")
},
func(attempt int, total int, nextDelay time.Duration, err error) {
if attempt == 1 {
cancel()
}
},
)
require.ErrorIs(t, err, context.Canceled)
require.Equal(t, 1, attempts)
require.Equal(t, []time.Duration{time.Second}, slept)
}
package service
import (
"context"
"time"
)
const (
OpsAlertStatusFiring = "firing"
OpsAlertStatusResolved = "resolved"
)
const (
OpsMetricSuccessRate = "success_rate"
OpsMetricErrorRate = "error_rate"
OpsMetricP95LatencyMs = "p95_latency_ms"
OpsMetricP99LatencyMs = "p99_latency_ms"
OpsMetricHTTP2Errors = "http2_errors"
OpsMetricCPUUsagePercent = "cpu_usage_percent"
OpsMetricMemoryUsagePercent = "memory_usage_percent"
OpsMetricQueueDepth = "concurrency_queue_depth"
)
type OpsAlertRule struct {
ID int64 `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Enabled bool `json:"enabled"`
MetricType string `json:"metric_type"`
Operator string `json:"operator"`
Threshold float64 `json:"threshold"`
WindowMinutes int `json:"window_minutes"`
SustainedMinutes int `json:"sustained_minutes"`
Severity string `json:"severity"`
NotifyEmail bool `json:"notify_email"`
NotifyWebhook bool `json:"notify_webhook"`
WebhookURL string `json:"webhook_url"`
CooldownMinutes int `json:"cooldown_minutes"`
DimensionFilters map[string]any `json:"dimension_filters,omitempty"`
NotifyChannels []string `json:"notify_channels,omitempty"`
NotifyConfig map[string]any `json:"notify_config,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type OpsAlertEvent struct {
ID int64 `json:"id"`
RuleID int64 `json:"rule_id"`
Severity string `json:"severity"`
Status string `json:"status"`
Title string `json:"title"`
Description string `json:"description"`
MetricValue float64 `json:"metric_value"`
ThresholdValue float64 `json:"threshold_value"`
FiredAt time.Time `json:"fired_at"`
ResolvedAt *time.Time `json:"resolved_at"`
EmailSent bool `json:"email_sent"`
WebhookSent bool `json:"webhook_sent"`
CreatedAt time.Time `json:"created_at"`
}
func (s *OpsService) ListAlertRules(ctx context.Context) ([]OpsAlertRule, error) {
return s.repo.ListAlertRules(ctx)
}
func (s *OpsService) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
return s.repo.GetActiveAlertEvent(ctx, ruleID)
}
func (s *OpsService) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
return s.repo.GetLatestAlertEvent(ctx, ruleID)
}
func (s *OpsService) CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) error {
return s.repo.CreateAlertEvent(ctx, event)
}
func (s *OpsService) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error {
return s.repo.UpdateAlertEventStatus(ctx, eventID, status, resolvedAt)
}
func (s *OpsService) UpdateAlertEventNotifications(ctx context.Context, eventID int64, emailSent, webhookSent bool) error {
return s.repo.UpdateAlertEventNotifications(ctx, eventID, emailSent, webhookSent)
}
func (s *OpsService) ListRecentSystemMetrics(ctx context.Context, windowMinutes, limit int) ([]OpsMetrics, error) {
return s.repo.ListRecentSystemMetrics(ctx, windowMinutes, limit)
}
func (s *OpsService) CountActiveAlerts(ctx context.Context) (int, error) {
return s.repo.CountActiveAlerts(ctx)
}
package service
import (
"context"
"log"
"runtime"
"sync"
"time"
"github.com/shirou/gopsutil/v4/cpu"
"github.com/shirou/gopsutil/v4/mem"
)
const (
opsMetricsInterval = 1 * time.Minute
opsMetricsCollectTimeout = 10 * time.Second
opsMetricsWindowShortMinutes = 1
opsMetricsWindowLongMinutes = 5
bytesPerMB = 1024 * 1024
cpuUsageSampleInterval = 0 * time.Second
percentScale = 100
)
type OpsMetricsCollector struct {
opsService *OpsService
concurrencyService *ConcurrencyService
interval time.Duration
lastGCPauseTotal uint64
lastGCPauseMu sync.Mutex
stopCh chan struct{}
startOnce sync.Once
stopOnce sync.Once
}
func NewOpsMetricsCollector(opsService *OpsService, concurrencyService *ConcurrencyService) *OpsMetricsCollector {
return &OpsMetricsCollector{
opsService: opsService,
concurrencyService: concurrencyService,
interval: opsMetricsInterval,
}
}
func (c *OpsMetricsCollector) Start() {
if c == nil {
return
}
c.startOnce.Do(func() {
if c.stopCh == nil {
c.stopCh = make(chan struct{})
}
go c.run()
})
}
func (c *OpsMetricsCollector) Stop() {
if c == nil {
return
}
c.stopOnce.Do(func() {
if c.stopCh != nil {
close(c.stopCh)
}
})
}
func (c *OpsMetricsCollector) run() {
ticker := time.NewTicker(c.interval)
defer ticker.Stop()
c.collectOnce()
for {
select {
case <-ticker.C:
c.collectOnce()
case <-c.stopCh:
return
}
}
}
func (c *OpsMetricsCollector) collectOnce() {
if c.opsService == nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), opsMetricsCollectTimeout)
defer cancel()
now := time.Now()
systemStats := c.collectSystemStats(ctx)
queueDepth := c.collectQueueDepth(ctx)
activeAlerts := c.collectActiveAlerts(ctx)
for _, window := range []int{opsMetricsWindowShortMinutes, opsMetricsWindowLongMinutes} {
startTime := now.Add(-time.Duration(window) * time.Minute)
windowStats, err := c.opsService.GetWindowStats(ctx, startTime, now)
if err != nil {
log.Printf("[OpsMetrics] failed to get window stats (%dm): %v", window, err)
continue
}
successRate, errorRate := computeRates(windowStats.SuccessCount, windowStats.ErrorCount)
requestCount := windowStats.SuccessCount + windowStats.ErrorCount
metric := &OpsMetrics{
WindowMinutes: window,
RequestCount: requestCount,
SuccessCount: windowStats.SuccessCount,
ErrorCount: windowStats.ErrorCount,
SuccessRate: successRate,
ErrorRate: errorRate,
P95LatencyMs: windowStats.P95LatencyMs,
P99LatencyMs: windowStats.P99LatencyMs,
HTTP2Errors: windowStats.HTTP2Errors,
ActiveAlerts: activeAlerts,
CPUUsagePercent: systemStats.cpuUsage,
MemoryUsedMB: systemStats.memoryUsedMB,
MemoryTotalMB: systemStats.memoryTotalMB,
MemoryUsagePercent: systemStats.memoryUsagePercent,
HeapAllocMB: systemStats.heapAllocMB,
GCPauseMs: systemStats.gcPauseMs,
ConcurrencyQueueDepth: queueDepth,
UpdatedAt: now,
}
if err := c.opsService.RecordMetrics(ctx, metric); err != nil {
log.Printf("[OpsMetrics] failed to record metrics (%dm): %v", window, err)
}
}
}
func computeRates(successCount, errorCount int64) (float64, float64) {
total := successCount + errorCount
if total == 0 {
// No traffic => no data. Rates are kept at 0 and request_count will be 0.
// The UI should render this as N/A instead of "100% success".
return 0, 0
}
successRate := float64(successCount) / float64(total) * percentScale
errorRate := float64(errorCount) / float64(total) * percentScale
return successRate, errorRate
}
type opsSystemStats struct {
cpuUsage float64
memoryUsedMB int64
memoryTotalMB int64
memoryUsagePercent float64
heapAllocMB int64
gcPauseMs float64
}
func (c *OpsMetricsCollector) collectSystemStats(ctx context.Context) opsSystemStats {
stats := opsSystemStats{}
if percents, err := cpu.PercentWithContext(ctx, cpuUsageSampleInterval, false); err == nil && len(percents) > 0 {
stats.cpuUsage = percents[0]
}
if vm, err := mem.VirtualMemoryWithContext(ctx); err == nil {
stats.memoryUsedMB = int64(vm.Used / bytesPerMB)
stats.memoryTotalMB = int64(vm.Total / bytesPerMB)
stats.memoryUsagePercent = vm.UsedPercent
}
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
stats.heapAllocMB = int64(memStats.HeapAlloc / bytesPerMB)
c.lastGCPauseMu.Lock()
if c.lastGCPauseTotal != 0 && memStats.PauseTotalNs >= c.lastGCPauseTotal {
stats.gcPauseMs = float64(memStats.PauseTotalNs-c.lastGCPauseTotal) / float64(time.Millisecond)
}
c.lastGCPauseTotal = memStats.PauseTotalNs
c.lastGCPauseMu.Unlock()
return stats
}
func (c *OpsMetricsCollector) collectQueueDepth(ctx context.Context) int {
if c.concurrencyService == nil {
return 0
}
depth, err := c.concurrencyService.GetTotalWaitCount(ctx)
if err != nil {
log.Printf("[OpsMetrics] failed to get queue depth: %v", err)
return 0
}
return depth
}
func (c *OpsMetricsCollector) collectActiveAlerts(ctx context.Context) int {
if c.opsService == nil {
return 0
}
count, err := c.opsService.CountActiveAlerts(ctx)
if err != nil {
return 0
}
return count
}
This diff is collapsed.
-- Ops error logs and system metrics
CREATE TABLE IF NOT EXISTS ops_error_logs (
id BIGSERIAL PRIMARY KEY,
request_id VARCHAR(64),
user_id BIGINT,
api_key_id BIGINT,
account_id BIGINT,
group_id BIGINT,
client_ip INET,
error_phase VARCHAR(32) NOT NULL,
error_type VARCHAR(64) NOT NULL,
severity VARCHAR(4) NOT NULL,
status_code INT,
platform VARCHAR(32),
model VARCHAR(100),
request_path VARCHAR(256),
stream BOOLEAN NOT NULL DEFAULT FALSE,
error_message TEXT,
error_body TEXT,
provider_error_code VARCHAR(64),
provider_error_type VARCHAR(64),
is_retryable BOOLEAN NOT NULL DEFAULT FALSE,
is_user_actionable BOOLEAN NOT NULL DEFAULT FALSE,
retry_count INT NOT NULL DEFAULT 0,
completion_status VARCHAR(16),
duration_ms INT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_created_at ON ops_error_logs (created_at DESC);
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase ON ops_error_logs (error_phase);
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_platform ON ops_error_logs (platform);
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_severity ON ops_error_logs (severity);
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase_platform_time ON ops_error_logs (error_phase, platform, created_at DESC);
CREATE TABLE IF NOT EXISTS ops_system_metrics (
id BIGSERIAL PRIMARY KEY,
success_rate DOUBLE PRECISION,
error_rate DOUBLE PRECISION,
p95_latency_ms INT,
p99_latency_ms INT,
http2_errors INT,
active_alerts INT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_created_at ON ops_system_metrics (created_at DESC);
-- Extend ops_system_metrics with windowed/system stats
ALTER TABLE ops_system_metrics
ADD COLUMN IF NOT EXISTS window_minutes INT NOT NULL DEFAULT 1,
ADD COLUMN IF NOT EXISTS cpu_usage_percent DOUBLE PRECISION,
ADD COLUMN IF NOT EXISTS memory_used_mb BIGINT,
ADD COLUMN IF NOT EXISTS memory_total_mb BIGINT,
ADD COLUMN IF NOT EXISTS memory_usage_percent DOUBLE PRECISION,
ADD COLUMN IF NOT EXISTS heap_alloc_mb BIGINT,
ADD COLUMN IF NOT EXISTS gc_pause_ms DOUBLE PRECISION,
ADD COLUMN IF NOT EXISTS concurrency_queue_depth INT;
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_window_time
ON ops_system_metrics (window_minutes, created_at DESC);
-- Ops alert rules and events
CREATE TABLE IF NOT EXISTS ops_alert_rules (
id BIGSERIAL PRIMARY KEY,
name VARCHAR(128) NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
metric_type VARCHAR(64) NOT NULL,
operator VARCHAR(8) NOT NULL,
threshold DOUBLE PRECISION NOT NULL,
window_minutes INT NOT NULL DEFAULT 1,
sustained_minutes INT NOT NULL DEFAULT 1,
severity VARCHAR(4) NOT NULL DEFAULT 'P1',
notify_email BOOLEAN NOT NULL DEFAULT FALSE,
notify_webhook BOOLEAN NOT NULL DEFAULT FALSE,
webhook_url TEXT,
cooldown_minutes INT NOT NULL DEFAULT 10,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_enabled ON ops_alert_rules (enabled);
CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_metric ON ops_alert_rules (metric_type, window_minutes);
CREATE TABLE IF NOT EXISTS ops_alert_events (
id BIGSERIAL PRIMARY KEY,
rule_id BIGINT NOT NULL REFERENCES ops_alert_rules(id) ON DELETE CASCADE,
severity VARCHAR(4) NOT NULL,
status VARCHAR(16) NOT NULL DEFAULT 'firing',
title VARCHAR(200),
description TEXT,
metric_value DOUBLE PRECISION,
threshold_value DOUBLE PRECISION,
fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
resolved_at TIMESTAMPTZ,
email_sent BOOLEAN NOT NULL DEFAULT FALSE,
webhook_sent BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_ops_alert_events_rule_status ON ops_alert_events (rule_id, status);
CREATE INDEX IF NOT EXISTS idx_ops_alert_events_fired_at ON ops_alert_events (fired_at DESC);
-- Seed default ops alert rules (idempotent)
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'Global success rate < 99%',
'Trigger when the 1-minute success rate drops below 99% for 2 consecutive minutes.',
TRUE,
'success_rate',
'<',
99,
1,
2,
'P1',
TRUE,
FALSE,
NULL,
10
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules);
-- Seed additional ops alert rules (idempotent)
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'Global error rate > 1%',
'Trigger when the 1-minute error rate exceeds 1% for 2 consecutive minutes.',
TRUE,
'error_rate',
'>',
1,
1,
2,
'P1',
TRUE,
CASE
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
ELSE TRUE
END,
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
10
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'Global error rate > 1%');
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'P99 latency > 2000ms',
'Trigger when the 5-minute P99 latency exceeds 2000ms for 2 consecutive samples.',
TRUE,
'p99_latency_ms',
'>',
2000,
5,
2,
'P1',
TRUE,
CASE
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
ELSE TRUE
END,
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
15
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'P99 latency > 2000ms');
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'HTTP/2 errors > 20',
'Trigger when HTTP/2 errors exceed 20 in the last minute for 2 consecutive minutes.',
TRUE,
'http2_errors',
'>',
20,
1,
2,
'P2',
FALSE,
CASE
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
ELSE TRUE
END,
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
10
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'HTTP/2 errors > 20');
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'CPU usage > 85%',
'Trigger when CPU usage exceeds 85% for 5 consecutive minutes.',
TRUE,
'cpu_usage_percent',
'>',
85,
1,
5,
'P2',
FALSE,
CASE
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
ELSE TRUE
END,
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
15
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'CPU usage > 85%');
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'Memory usage > 90%',
'Trigger when memory usage exceeds 90% for 5 consecutive minutes.',
TRUE,
'memory_usage_percent',
'>',
90,
1,
5,
'P2',
FALSE,
CASE
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
ELSE TRUE
END,
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
15
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'Memory usage > 90%');
INSERT INTO ops_alert_rules (
name,
description,
enabled,
metric_type,
operator,
threshold,
window_minutes,
sustained_minutes,
severity,
notify_email,
notify_webhook,
webhook_url,
cooldown_minutes
)
SELECT
'Queue depth > 50',
'Trigger when concurrency queue depth exceeds 50 for 2 consecutive minutes.',
TRUE,
'concurrency_queue_depth',
'>',
50,
1,
2,
'P2',
FALSE,
CASE
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
ELSE TRUE
END,
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
10
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'Queue depth > 50');
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment