Commit 67953abf authored by 陈曦's avatar 陈曦
Browse files

Merge remote-tracking branch 'upstream/main'

parents bbd39c4f 1dfd9744
package service
import (
"bytes"
"context"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/Wei-Shaw/sub2api/internal/pkg/apicompat"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
)
func TestNormalizeOpenAICompatRequestedModel(t *testing.T) {
t.Parallel()
tests := []struct {
name string
input string
want string
}{
{name: "gpt reasoning alias strips xhigh", input: "gpt-5.4-xhigh", want: "gpt-5.4"},
{name: "gpt reasoning alias strips none", input: "gpt-5.4-none", want: "gpt-5.4"},
{name: "codex max model stays intact", input: "gpt-5.1-codex-max", want: "gpt-5.1-codex-max"},
{name: "non openai model unchanged", input: "claude-opus-4-6", want: "claude-opus-4-6"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.want, NormalizeOpenAICompatRequestedModel(tt.input))
})
}
}
func TestApplyOpenAICompatModelNormalization(t *testing.T) {
t.Parallel()
t.Run("derives xhigh from model suffix when output config missing", func(t *testing.T) {
req := &apicompat.AnthropicRequest{Model: "gpt-5.4-xhigh"}
applyOpenAICompatModelNormalization(req)
require.Equal(t, "gpt-5.4", req.Model)
require.NotNil(t, req.OutputConfig)
require.Equal(t, "max", req.OutputConfig.Effort)
})
t.Run("explicit output config wins over model suffix", func(t *testing.T) {
req := &apicompat.AnthropicRequest{
Model: "gpt-5.4-xhigh",
OutputConfig: &apicompat.AnthropicOutputConfig{Effort: "low"},
}
applyOpenAICompatModelNormalization(req)
require.Equal(t, "gpt-5.4", req.Model)
require.NotNil(t, req.OutputConfig)
require.Equal(t, "low", req.OutputConfig.Effort)
})
t.Run("non openai model is untouched", func(t *testing.T) {
req := &apicompat.AnthropicRequest{Model: "claude-opus-4-6"}
applyOpenAICompatModelNormalization(req)
require.Equal(t, "claude-opus-4-6", req.Model)
require.Nil(t, req.OutputConfig)
})
}
func TestForwardAsAnthropic_NormalizesRoutingAndEffortForGpt54XHigh(t *testing.T) {
t.Parallel()
gin.SetMode(gin.TestMode)
rec := httptest.NewRecorder()
c, _ := gin.CreateTestContext(rec)
body := []byte(`{"model":"gpt-5.4-xhigh","max_tokens":16,"messages":[{"role":"user","content":"hello"}],"stream":false}`)
c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(body))
c.Request.Header.Set("Content-Type", "application/json")
upstreamBody := strings.Join([]string{
`data: {"type":"response.completed","response":{"id":"resp_1","object":"response","model":"gpt-5.4","status":"completed","output":[{"type":"message","id":"msg_1","role":"assistant","status":"completed","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":5,"output_tokens":2,"total_tokens":7}}}`,
"",
"data: [DONE]",
"",
}, "\n")
upstream := &httpUpstreamRecorder{resp: &http.Response{
StatusCode: http.StatusOK,
Header: http.Header{"Content-Type": []string{"text/event-stream"}, "x-request-id": []string{"rid_compat"}},
Body: io.NopCloser(strings.NewReader(upstreamBody)),
}}
svc := &OpenAIGatewayService{httpUpstream: upstream}
account := &Account{
ID: 1,
Name: "openai-oauth",
Platform: PlatformOpenAI,
Type: AccountTypeOAuth,
Concurrency: 1,
Credentials: map[string]any{
"access_token": "oauth-token",
"chatgpt_account_id": "chatgpt-acc",
"model_mapping": map[string]any{
"gpt-5.4": "gpt-5.4",
},
},
}
result, err := svc.ForwardAsAnthropic(context.Background(), c, account, body, "", "gpt-5.1")
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, "gpt-5.4-xhigh", result.Model)
require.Equal(t, "gpt-5.4", result.UpstreamModel)
require.Equal(t, "gpt-5.4", result.BillingModel)
require.NotNil(t, result.ReasoningEffort)
require.Equal(t, "xhigh", *result.ReasoningEffort)
require.Equal(t, "gpt-5.4", gjson.GetBytes(upstream.lastBody, "model").String())
require.Equal(t, "xhigh", gjson.GetBytes(upstream.lastBody, "reasoning.effort").String())
require.Equal(t, http.StatusOK, rec.Code)
require.Equal(t, "gpt-5.4-xhigh", gjson.GetBytes(rec.Body.Bytes(), "model").String())
require.Equal(t, "ok", gjson.GetBytes(rec.Body.Bytes(), "content.0.text").String())
t.Logf("upstream body: %s", string(upstream.lastBody))
t.Logf("response body: %s", rec.Body.String())
}
......@@ -40,6 +40,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
return nil, fmt.Errorf("parse anthropic request: %w", err)
}
originalModel := anthropicReq.Model
applyOpenAICompatModelNormalization(&anthropicReq)
clientStream := anthropicReq.Stream // client's original stream preference
// 2. Convert Anthropic → Responses
......@@ -59,7 +60,7 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
}
// 3. Model mapping
mappedModel := resolveOpenAIForwardModel(account, originalModel, defaultMappedModel)
mappedModel := resolveOpenAIForwardModel(account, anthropicReq.Model, defaultMappedModel)
responsesReq.Model = mappedModel
logger.L().Debug("openai messages: model mapping applied",
......
......@@ -895,14 +895,16 @@ func TestOpenAIGatewayServiceRecordUsage_UsesRequestedModelAndUpstreamModelMetad
require.Equal(t, 1, userRepo.deductCalls)
}
func TestOpenAIGatewayServiceRecordUsage_BillsMappedRequestsUsingUpstreamModelFallback(t *testing.T) {
func TestOpenAIGatewayServiceRecordUsage_BillsMappedRequestsUsingRequestedModel(t *testing.T) {
usageRepo := &openAIRecordUsageLogRepoStub{inserted: true}
userRepo := &openAIRecordUsageUserRepoStub{}
subRepo := &openAIRecordUsageSubRepoStub{}
svc := newOpenAIRecordUsageServiceForTest(usageRepo, userRepo, subRepo, nil)
usage := OpenAIUsage{InputTokens: 20, OutputTokens: 10}
expectedCost, err := svc.billingService.CalculateCost("gpt-5.1-codex", UsageTokens{
// Billing should use the requested model ("gpt-5.1"), not the upstream mapped model ("gpt-5.1-codex").
// This ensures pricing is always based on the model the user requested.
expectedCost, err := svc.billingService.CalculateCost("gpt-5.1", UsageTokens{
InputTokens: 20,
OutputTokens: 10,
}, 1.1)
......
......@@ -4153,9 +4153,6 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec
}
billingModel := forwardResultBillingModel(result.Model, result.UpstreamModel)
if result.BillingModel != "" {
billingModel = strings.TrimSpace(result.BillingModel)
}
serviceTier := ""
if result.ServiceTier != nil {
serviceTier = strings.TrimSpace(*result.ServiceTier)
......
......@@ -502,6 +502,25 @@ func (s *OpenAIOAuthService) RefreshAccountToken(ctx context.Context, account *A
refreshToken := account.GetCredential("refresh_token")
if refreshToken == "" {
accessToken := account.GetCredential("access_token")
if accessToken != "" {
tokenInfo := &OpenAITokenInfo{
AccessToken: accessToken,
RefreshToken: "",
IDToken: account.GetCredential("id_token"),
ClientID: account.GetCredential("client_id"),
Email: account.GetCredential("email"),
ChatGPTAccountID: account.GetCredential("chatgpt_account_id"),
ChatGPTUserID: account.GetCredential("chatgpt_user_id"),
OrganizationID: account.GetCredential("organization_id"),
PlanType: account.GetCredential("plan_type"),
}
if expiresAt := account.GetCredentialAsTime("expires_at"); expiresAt != nil {
tokenInfo.ExpiresAt = expiresAt.Unix()
tokenInfo.ExpiresIn = int64(time.Until(*expiresAt).Seconds())
}
return tokenInfo, nil
}
return nil, infraerrors.New(http.StatusBadRequest, "OPENAI_OAUTH_NO_REFRESH_TOKEN", "no refresh token available")
}
......
package service
import (
"context"
"errors"
"sync/atomic"
"testing"
"time"
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
"github.com/stretchr/testify/require"
)
type openaiOAuthClientRefreshStub struct {
refreshCalls int32
}
func (s *openaiOAuthClientRefreshStub) ExchangeCode(ctx context.Context, code, codeVerifier, redirectURI, proxyURL, clientID string) (*openai.TokenResponse, error) {
return nil, errors.New("not implemented")
}
func (s *openaiOAuthClientRefreshStub) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*openai.TokenResponse, error) {
atomic.AddInt32(&s.refreshCalls, 1)
return nil, errors.New("not implemented")
}
func (s *openaiOAuthClientRefreshStub) RefreshTokenWithClientID(ctx context.Context, refreshToken, proxyURL string, clientID string) (*openai.TokenResponse, error) {
atomic.AddInt32(&s.refreshCalls, 1)
return nil, errors.New("not implemented")
}
func TestOpenAIOAuthService_RefreshAccountToken_NoRefreshTokenUsesExistingAccessToken(t *testing.T) {
client := &openaiOAuthClientRefreshStub{}
svc := NewOpenAIOAuthService(nil, client)
expiresAt := time.Now().Add(30 * time.Minute).UTC().Format(time.RFC3339)
account := &Account{
ID: 77,
Platform: PlatformOpenAI,
Type: AccountTypeOAuth,
Credentials: map[string]any{
"access_token": "existing-access-token",
"expires_at": expiresAt,
"client_id": "client-id-1",
},
}
info, err := svc.RefreshAccountToken(context.Background(), account)
require.NoError(t, err)
require.NotNil(t, info)
require.Equal(t, "existing-access-token", info.AccessToken)
require.Equal(t, "client-id-1", info.ClientID)
require.Zero(t, atomic.LoadInt32(&client.refreshCalls), "existing access token should be reused without calling refresh")
}
......@@ -189,10 +189,38 @@ func (s *PricingService) checkAndUpdatePricing() error {
return s.downloadPricingData()
}
// 检查文件是否过期
// 先加载本地文件(确保服务可用),再检查是否需要更新
if err := s.loadPricingData(pricingFile); err != nil {
logger.LegacyPrintf("service.pricing", "[Pricing] Failed to load local file, downloading: %v", err)
return s.downloadPricingData()
}
// 如果配置了哈希URL,通过远程哈希检查是否有更新
if s.cfg.Pricing.HashURL != "" {
remoteHash, err := s.fetchRemoteHash()
if err != nil {
logger.LegacyPrintf("service.pricing", "[Pricing] Failed to fetch remote hash on startup: %v", err)
return nil // 已加载本地文件,哈希获取失败不影响启动
}
s.mu.RLock()
localHash := s.localHash
s.mu.RUnlock()
if localHash == "" || remoteHash != localHash {
logger.LegacyPrintf("service.pricing", "[Pricing] Remote hash differs on startup (local=%s remote=%s), downloading...",
localHash[:min(8, len(localHash))], remoteHash[:min(8, len(remoteHash))])
if err := s.downloadPricingData(); err != nil {
logger.LegacyPrintf("service.pricing", "[Pricing] Download failed, using existing file: %v", err)
}
}
return nil
}
// 没有哈希URL时,基于文件年龄检查
info, err := os.Stat(pricingFile)
if err != nil {
return s.downloadPricingData()
return nil // 已加载本地文件
}
fileAge := time.Since(info.ModTime())
......@@ -205,21 +233,11 @@ func (s *PricingService) checkAndUpdatePricing() error {
}
}
// 加载本地文件
return s.loadPricingData(pricingFile)
return nil
}
// syncWithRemote 与远程同步(基于哈希校验)
func (s *PricingService) syncWithRemote() error {
pricingFile := s.getPricingFilePath()
// 计算本地文件哈希
localHash, err := s.computeFileHash(pricingFile)
if err != nil {
logger.LegacyPrintf("service.pricing", "[Pricing] Failed to compute local hash: %v", err)
return s.downloadPricingData()
}
// 如果配置了哈希URL,从远程获取哈希进行比对
if s.cfg.Pricing.HashURL != "" {
remoteHash, err := s.fetchRemoteHash()
......@@ -228,8 +246,13 @@ func (s *PricingService) syncWithRemote() error {
return nil // 哈希获取失败不影响正常使用
}
if remoteHash != localHash {
logger.LegacyPrintf("service.pricing", "%s", "[Pricing] Remote hash differs, downloading new version...")
s.mu.RLock()
localHash := s.localHash
s.mu.RUnlock()
if localHash == "" || remoteHash != localHash {
logger.LegacyPrintf("service.pricing", "[Pricing] Remote hash differs (local=%s remote=%s), downloading new version...",
localHash[:min(8, len(localHash))], remoteHash[:min(8, len(remoteHash))])
return s.downloadPricingData()
}
logger.LegacyPrintf("service.pricing", "%s", "[Pricing] Hash check passed, no update needed")
......@@ -237,6 +260,7 @@ func (s *PricingService) syncWithRemote() error {
}
// 没有哈希URL时,基于时间检查
pricingFile := s.getPricingFilePath()
info, err := os.Stat(pricingFile)
if err != nil {
return s.downloadPricingData()
......@@ -264,11 +288,12 @@ func (s *PricingService) downloadPricingData() error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
var expectedHash string
// 获取远程哈希(用于同步锚点,不作为完整性校验)
var remoteHash string
if strings.TrimSpace(s.cfg.Pricing.HashURL) != "" {
expectedHash, err = s.fetchRemoteHash()
remoteHash, err = s.fetchRemoteHash()
if err != nil {
return fmt.Errorf("fetch remote hash: %w", err)
logger.LegacyPrintf("service.pricing", "[Pricing] Failed to fetch remote hash (continuing): %v", err)
}
}
......@@ -277,11 +302,13 @@ func (s *PricingService) downloadPricingData() error {
return fmt.Errorf("download failed: %w", err)
}
if expectedHash != "" {
actualHash := sha256.Sum256(body)
if !strings.EqualFold(expectedHash, hex.EncodeToString(actualHash[:])) {
return fmt.Errorf("pricing hash mismatch")
}
// 哈希校验:不匹配时仅告警,不阻止更新
// 远程哈希文件可能与数据文件不同步(如维护者更新了数据但未更新哈希文件)
dataHash := sha256.Sum256(body)
dataHashStr := hex.EncodeToString(dataHash[:])
if remoteHash != "" && !strings.EqualFold(remoteHash, dataHashStr) {
logger.LegacyPrintf("service.pricing", "[Pricing] Hash mismatch warning: remote=%s data=%s (hash file may be out of sync)",
remoteHash[:min(8, len(remoteHash))], dataHashStr[:8])
}
// 解析JSON数据(使用灵活的解析方式)
......@@ -296,11 +323,14 @@ func (s *PricingService) downloadPricingData() error {
logger.LegacyPrintf("service.pricing", "[Pricing] Failed to save file: %v", err)
}
// 保存哈希
hash := sha256.Sum256(body)
hashStr := hex.EncodeToString(hash[:])
// 使用远程哈希作为同步锚点,防止重复下载
// 当远程哈希不可用时,回退到数据本身的哈希
syncHash := dataHashStr
if remoteHash != "" {
syncHash = remoteHash
}
hashFile := s.getHashFilePath()
if err := os.WriteFile(hashFile, []byte(hashStr+"\n"), 0644); err != nil {
if err := os.WriteFile(hashFile, []byte(syncHash+"\n"), 0644); err != nil {
logger.LegacyPrintf("service.pricing", "[Pricing] Failed to save hash: %v", err)
}
......@@ -308,7 +338,7 @@ func (s *PricingService) downloadPricingData() error {
s.mu.Lock()
s.pricingData = data
s.lastUpdated = time.Now()
s.localHash = hashStr
s.localHash = syncHash
s.mu.Unlock()
logger.LegacyPrintf("service.pricing", "[Pricing] Downloaded %d models successfully", len(data))
......@@ -486,16 +516,6 @@ func (s *PricingService) validatePricingURL(raw string) (string, error) {
return normalized, nil
}
// computeFileHash 计算文件哈希
func (s *PricingService) computeFileHash(filePath string) (string, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
hash := sha256.Sum256(data)
return hex.EncodeToString(hash[:]), nil
}
// GetModelPricing 获取模型价格(带模糊匹配)
func (s *PricingService) GetModelPricing(modelName string) *LiteLLMModelPricing {
s.mu.RLock()
......
......@@ -33,6 +33,7 @@ type TokenRefreshService struct {
proxyRepo ProxyRepository
stopCh chan struct{}
stopOnce sync.Once
wg sync.WaitGroup
}
......@@ -130,7 +131,9 @@ func (s *TokenRefreshService) Start() {
// Stop 停止刷新服务(可安全多次调用)
func (s *TokenRefreshService) Stop() {
s.stopOnce.Do(func() {
close(s.stopCh)
})
s.wg.Wait()
slog.Info("token_refresh.service_stopped")
}
......@@ -430,6 +433,7 @@ func isNonRetryableRefreshError(err error) bool {
"unauthorized_client", // 客户端未授权
"access_denied", // 访问被拒绝
"missing_project_id", // 缺少 project_id
"no refresh token available",
}
for _, needle := range nonRetryable {
if strings.Contains(msg, needle) {
......
......@@ -19,6 +19,7 @@ type tokenRefreshAccountRepo struct {
updateCredentialsCalls int
setErrorCalls int
clearTempCalls int
setTempUnschedCalls int
lastAccount *Account
updateErr error
}
......@@ -58,6 +59,11 @@ func (r *tokenRefreshAccountRepo) ClearTempUnschedulable(ctx context.Context, id
return nil
}
func (r *tokenRefreshAccountRepo) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
r.setTempUnschedCalls++
return nil
}
type tokenCacheInvalidatorStub struct {
calls int
err error
......@@ -490,6 +496,31 @@ func TestTokenRefreshService_RefreshWithRetry_NonRetryableErrorAllPlatforms(t *t
}
}
func TestTokenRefreshService_RefreshWithRetry_NoRefreshTokenDoesNotTempUnschedule(t *testing.T) {
repo := &tokenRefreshAccountRepo{}
cfg := &config.Config{
TokenRefresh: config.TokenRefreshConfig{
MaxRetries: 2,
RetryBackoffSeconds: 0,
},
}
service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, nil, cfg, nil)
account := &Account{
ID: 18,
Platform: PlatformOpenAI,
Type: AccountTypeOAuth,
}
refresher := &tokenRefresherStub{
err: errors.New("no refresh token available"),
}
err := service.refreshWithRetry(context.Background(), account, refresher, refresher, time.Hour)
require.Error(t, err)
require.Equal(t, 0, repo.updateCalls)
require.Equal(t, 0, repo.setTempUnschedCalls, "missing refresh token should not mark the account temp unschedulable")
require.Equal(t, 1, repo.setErrorCalls, "missing refresh token should be treated as a non-retryable credential state")
}
// TestIsNonRetryableRefreshError 测试不可重试错误判断
func TestIsNonRetryableRefreshError(t *testing.T) {
tests := []struct {
......@@ -503,6 +534,7 @@ func TestIsNonRetryableRefreshError(t *testing.T) {
{name: "invalid_client", err: errors.New("invalid_client"), expected: true},
{name: "unauthorized_client", err: errors.New("unauthorized_client"), expected: true},
{name: "access_denied", err: errors.New("access_denied"), expected: true},
{name: "no_refresh_token", err: errors.New("no refresh token available"), expected: true},
{name: "invalid_grant_with_desc", err: errors.New("Error: invalid_grant - token revoked"), expected: true},
{name: "case_insensitive", err: errors.New("INVALID_GRANT"), expected: true},
}
......
......@@ -21,8 +21,8 @@ func optionalNonEqualStringPtr(value, compare string) *string {
}
func forwardResultBillingModel(requestedModel, upstreamModel string) string {
if trimmedUpstream := strings.TrimSpace(upstreamModel); trimmedUpstream != "" {
return trimmedUpstream
if trimmed := strings.TrimSpace(requestedModel); trimmed != "" {
return trimmed
}
return strings.TrimSpace(requestedModel)
return strings.TrimSpace(upstreamModel)
}
......@@ -865,10 +865,10 @@ rate_limit:
pricing:
# URL to fetch model pricing data (default: pinned model-price-repo commit)
# 获取模型定价数据的 URL(默认:固定 commit 的 model-price-repo)
remote_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.json"
remote_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/refs/heads/main//model_prices_and_context_window.json"
# Hash verification URL (optional)
# 哈希校验 URL(可选)
hash_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.sha256"
hash_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/refs/heads/main//model_prices_and_context_window.sha256"
# Local data directory for caching
# 本地数据缓存目录
data_dir: "./data"
......
......@@ -4383,6 +4383,7 @@ export default {
provider: 'Type',
active: 'Active',
endpoint: 'Endpoint',
bucket: 'Bucket',
storagePath: 'Storage Path',
capacityUsage: 'Capacity / Used',
capacityUnlimited: 'Unlimited',
......
......@@ -4547,6 +4547,7 @@ export default {
provider: '存储类型',
active: '生效状态',
endpoint: '端点',
bucket: '存储桶',
storagePath: '存储路径',
capacityUsage: '容量 / 已用',
capacityUnlimited: '无限制',
......
......@@ -2,6 +2,7 @@
import { computed, onMounted, reactive, ref, watch } from 'vue'
import { opsAPI, type OpsRuntimeLogConfig, type OpsSystemLog, type OpsSystemLogSinkHealth } from '@/api/admin/ops'
import Pagination from '@/components/common/Pagination.vue'
import Select from '@/components/common/Select.vue'
import { useAppStore } from '@/stores'
const appStore = useAppStore()
......@@ -56,6 +57,37 @@ const filters = reactive({
q: ''
})
const runtimeLevelOptions = [
{ value: 'debug', label: 'debug' },
{ value: 'info', label: 'info' },
{ value: 'warn', label: 'warn' },
{ value: 'error', label: 'error' }
]
const stacktraceLevelOptions = [
{ value: 'none', label: 'none' },
{ value: 'error', label: 'error' },
{ value: 'fatal', label: 'fatal' }
]
const timeRangeOptions = [
{ value: '5m', label: '5m' },
{ value: '30m', label: '30m' },
{ value: '1h', label: '1h' },
{ value: '6h', label: '6h' },
{ value: '24h', label: '24h' },
{ value: '7d', label: '7d' },
{ value: '30d', label: '30d' }
]
const filterLevelOptions = [
{ value: '', label: '全部' },
{ value: 'debug', label: 'debug' },
{ value: 'info', label: 'info' },
{ value: 'warn', label: 'warn' },
{ value: 'error', label: 'error' }
]
const levelBadgeClass = (level: string) => {
const v = String(level || '').toLowerCase()
if (v === 'error' || v === 'fatal') return 'bg-red-100 text-red-700 dark:bg-red-900/30 dark:text-red-300'
......@@ -347,20 +379,11 @@ onMounted(async () => {
<div class="grid grid-cols-1 gap-3 md:grid-cols-2 xl:grid-cols-6">
<label class="text-xs text-gray-600 dark:text-gray-300">
级别
<select v-model="runtimeConfig.level" class="input mt-1">
<option value="debug">debug</option>
<option value="info">info</option>
<option value="warn">warn</option>
<option value="error">error</option>
</select>
<Select v-model="runtimeConfig.level" class="mt-1" :options="runtimeLevelOptions" />
</label>
<label class="text-xs text-gray-600 dark:text-gray-300">
堆栈阈值
<select v-model="runtimeConfig.stacktrace_level" class="input mt-1">
<option value="none">none</option>
<option value="error">error</option>
<option value="fatal">fatal</option>
</select>
<Select v-model="runtimeConfig.stacktrace_level" class="mt-1" :options="stacktraceLevelOptions" />
</label>
<label class="text-xs text-gray-600 dark:text-gray-300">
采样初始
......@@ -403,15 +426,7 @@ onMounted(async () => {
<div class="mb-4 grid grid-cols-1 gap-3 md:grid-cols-5">
<label class="text-xs text-gray-600 dark:text-gray-300">
时间范围
<select v-model="filters.time_range" class="input mt-1">
<option value="5m">5m</option>
<option value="30m">30m</option>
<option value="1h">1h</option>
<option value="6h">6h</option>
<option value="24h">24h</option>
<option value="7d">7d</option>
<option value="30d">30d</option>
</select>
<Select v-model="filters.time_range" class="mt-1" :options="timeRangeOptions" />
</label>
<label class="text-xs text-gray-600 dark:text-gray-300">
开始时间(可选)
......@@ -423,13 +438,7 @@ onMounted(async () => {
</label>
<label class="text-xs text-gray-600 dark:text-gray-300">
级别
<select v-model="filters.level" class="input mt-1">
<option value="">全部</option>
<option value="debug">debug</option>
<option value="info">info</option>
<option value="warn">warn</option>
<option value="error">error</option>
</select>
<Select v-model="filters.level" class="mt-1" :options="filterLevelOptions" />
</label>
<label class="text-xs text-gray-600 dark:text-gray-300">
组件
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment