Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
陈曦
sub2api
Commits
90bce60b
Commit
90bce60b
authored
Jan 15, 2026
by
yangjianbo
Browse files
feat: merge dev
parent
a458e684
Changes
107
Expand all
Hide whitespace changes
Inline
Side-by-side
backend/internal/handler/admin/ops_handler.go
View file @
90bce60b
...
...
@@ -19,6 +19,57 @@ type OpsHandler struct {
opsService
*
service
.
OpsService
}
// GetErrorLogByID returns ops error log detail.
// GET /api/v1/admin/ops/errors/:id
func
(
h
*
OpsHandler
)
GetErrorLogByID
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
idStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"id"
))
id
,
err
:=
strconv
.
ParseInt
(
idStr
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid error id"
)
return
}
detail
,
err
:=
h
.
opsService
.
GetErrorLogByID
(
c
.
Request
.
Context
(),
id
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
detail
)
}
const
(
opsListViewErrors
=
"errors"
opsListViewExcluded
=
"excluded"
opsListViewAll
=
"all"
)
func
parseOpsViewParam
(
c
*
gin
.
Context
)
string
{
if
c
==
nil
{
return
""
}
v
:=
strings
.
ToLower
(
strings
.
TrimSpace
(
c
.
Query
(
"view"
)))
switch
v
{
case
""
,
opsListViewErrors
:
return
opsListViewErrors
case
opsListViewExcluded
:
return
opsListViewExcluded
case
opsListViewAll
:
return
opsListViewAll
default
:
return
opsListViewErrors
}
}
func
NewOpsHandler
(
opsService
*
service
.
OpsService
)
*
OpsHandler
{
return
&
OpsHandler
{
opsService
:
opsService
}
}
...
...
@@ -47,16 +98,26 @@ func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
return
}
filter
:=
&
service
.
OpsErrorLogFilter
{
Page
:
page
,
PageSize
:
pageSize
,
}
filter
:=
&
service
.
OpsErrorLogFilter
{
Page
:
page
,
PageSize
:
pageSize
}
if
!
startTime
.
IsZero
()
{
filter
.
StartTime
=
&
startTime
}
if
!
endTime
.
IsZero
()
{
filter
.
EndTime
=
&
endTime
}
filter
.
View
=
parseOpsViewParam
(
c
)
filter
.
Phase
=
strings
.
TrimSpace
(
c
.
Query
(
"phase"
))
filter
.
Owner
=
strings
.
TrimSpace
(
c
.
Query
(
"error_owner"
))
filter
.
Source
=
strings
.
TrimSpace
(
c
.
Query
(
"error_source"
))
filter
.
Query
=
strings
.
TrimSpace
(
c
.
Query
(
"q"
))
filter
.
UserQuery
=
strings
.
TrimSpace
(
c
.
Query
(
"user_query"
))
// Force request errors: client-visible status >= 400.
// buildOpsErrorLogsWhere already applies this for non-upstream phase.
if
strings
.
EqualFold
(
strings
.
TrimSpace
(
filter
.
Phase
),
"upstream"
)
{
filter
.
Phase
=
""
}
if
platform
:=
strings
.
TrimSpace
(
c
.
Query
(
"platform"
));
platform
!=
""
{
filter
.
Platform
=
platform
...
...
@@ -77,11 +138,19 @@ func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
}
filter
.
AccountID
=
&
id
}
if
phase
:=
strings
.
TrimSpace
(
c
.
Query
(
"phase"
));
phase
!=
""
{
filter
.
Phase
=
phase
}
if
q
:=
strings
.
TrimSpace
(
c
.
Query
(
"q"
));
q
!=
""
{
filter
.
Query
=
q
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"resolved"
));
v
!=
""
{
switch
strings
.
ToLower
(
v
)
{
case
"1"
,
"true"
,
"yes"
:
b
:=
true
filter
.
Resolved
=
&
b
case
"0"
,
"false"
,
"no"
:
b
:=
false
filter
.
Resolved
=
&
b
default
:
response
.
BadRequest
(
c
,
"Invalid resolved"
)
return
}
}
if
statusCodesStr
:=
strings
.
TrimSpace
(
c
.
Query
(
"status_codes"
));
statusCodesStr
!=
""
{
parts
:=
strings
.
Split
(
statusCodesStr
,
","
)
...
...
@@ -106,13 +175,120 @@ func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Paginated
(
c
,
result
.
Errors
,
int64
(
result
.
Total
),
result
.
Page
,
result
.
PageSize
)
}
// ListRequestErrors lists client-visible request errors.
// GET /api/v1/admin/ops/request-errors
func
(
h
*
OpsHandler
)
ListRequestErrors
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
page
,
pageSize
:=
response
.
ParsePagination
(
c
)
if
pageSize
>
500
{
pageSize
=
500
}
startTime
,
endTime
,
err
:=
parseOpsTimeRange
(
c
,
"1h"
)
if
err
!=
nil
{
response
.
BadRequest
(
c
,
err
.
Error
())
return
}
filter
:=
&
service
.
OpsErrorLogFilter
{
Page
:
page
,
PageSize
:
pageSize
}
if
!
startTime
.
IsZero
()
{
filter
.
StartTime
=
&
startTime
}
if
!
endTime
.
IsZero
()
{
filter
.
EndTime
=
&
endTime
}
filter
.
View
=
parseOpsViewParam
(
c
)
filter
.
Phase
=
strings
.
TrimSpace
(
c
.
Query
(
"phase"
))
filter
.
Owner
=
strings
.
TrimSpace
(
c
.
Query
(
"error_owner"
))
filter
.
Source
=
strings
.
TrimSpace
(
c
.
Query
(
"error_source"
))
filter
.
Query
=
strings
.
TrimSpace
(
c
.
Query
(
"q"
))
filter
.
UserQuery
=
strings
.
TrimSpace
(
c
.
Query
(
"user_query"
))
// Force request errors: client-visible status >= 400.
// buildOpsErrorLogsWhere already applies this for non-upstream phase.
if
strings
.
EqualFold
(
strings
.
TrimSpace
(
filter
.
Phase
),
"upstream"
)
{
filter
.
Phase
=
""
}
if
platform
:=
strings
.
TrimSpace
(
c
.
Query
(
"platform"
));
platform
!=
""
{
filter
.
Platform
=
platform
}
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"group_id"
));
v
!=
""
{
id
,
err
:=
strconv
.
ParseInt
(
v
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid group_id"
)
return
}
filter
.
GroupID
=
&
id
}
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"account_id"
));
v
!=
""
{
id
,
err
:=
strconv
.
ParseInt
(
v
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid account_id"
)
return
}
filter
.
AccountID
=
&
id
}
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"resolved"
));
v
!=
""
{
switch
strings
.
ToLower
(
v
)
{
case
"1"
,
"true"
,
"yes"
:
b
:=
true
filter
.
Resolved
=
&
b
case
"0"
,
"false"
,
"no"
:
b
:=
false
filter
.
Resolved
=
&
b
default
:
response
.
BadRequest
(
c
,
"Invalid resolved"
)
return
}
}
if
statusCodesStr
:=
strings
.
TrimSpace
(
c
.
Query
(
"status_codes"
));
statusCodesStr
!=
""
{
parts
:=
strings
.
Split
(
statusCodesStr
,
","
)
out
:=
make
([]
int
,
0
,
len
(
parts
))
for
_
,
part
:=
range
parts
{
p
:=
strings
.
TrimSpace
(
part
)
if
p
==
""
{
continue
}
n
,
err
:=
strconv
.
Atoi
(
p
)
if
err
!=
nil
||
n
<
0
{
response
.
BadRequest
(
c
,
"Invalid status_codes"
)
return
}
out
=
append
(
out
,
n
)
}
filter
.
StatusCodes
=
out
}
result
,
err
:=
h
.
opsService
.
GetErrorLogs
(
c
.
Request
.
Context
(),
filter
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Paginated
(
c
,
result
.
Errors
,
int64
(
result
.
Total
),
result
.
Page
,
result
.
PageSize
)
}
// GetErrorLogByID returns a single error log detail.
// GET /api/v1/admin/ops/errors/:id
func
(
h
*
OpsHandler
)
GetErrorLogByID
(
c
*
gin
.
Context
)
{
// GetRequestError returns request error detail.
// GET /api/v1/admin/ops/request-errors/:id
func
(
h
*
OpsHandler
)
GetRequestError
(
c
*
gin
.
Context
)
{
// same storage; just proxy to existing detail
h
.
GetErrorLogByID
(
c
)
}
// ListRequestErrorUpstreamErrors lists upstream error logs correlated to a request error.
// GET /api/v1/admin/ops/request-errors/:id/upstream-errors
func
(
h
*
OpsHandler
)
ListRequestErrorUpstreamErrors
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
...
...
@@ -129,15 +305,306 @@ func (h *OpsHandler) GetErrorLogByID(c *gin.Context) {
return
}
// Load request error to get correlation keys.
detail
,
err
:=
h
.
opsService
.
GetErrorLogByID
(
c
.
Request
.
Context
(),
id
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
detail
)
// Correlate by request_id/client_request_id.
requestID
:=
strings
.
TrimSpace
(
detail
.
RequestID
)
clientRequestID
:=
strings
.
TrimSpace
(
detail
.
ClientRequestID
)
if
requestID
==
""
&&
clientRequestID
==
""
{
response
.
Paginated
(
c
,
[]
*
service
.
OpsErrorLog
{},
0
,
1
,
10
)
return
}
page
,
pageSize
:=
response
.
ParsePagination
(
c
)
if
pageSize
>
500
{
pageSize
=
500
}
// Keep correlation window wide enough so linked upstream errors
// are discoverable even when UI defaults to 1h elsewhere.
startTime
,
endTime
,
err
:=
parseOpsTimeRange
(
c
,
"30d"
)
if
err
!=
nil
{
response
.
BadRequest
(
c
,
err
.
Error
())
return
}
filter
:=
&
service
.
OpsErrorLogFilter
{
Page
:
page
,
PageSize
:
pageSize
}
if
!
startTime
.
IsZero
()
{
filter
.
StartTime
=
&
startTime
}
if
!
endTime
.
IsZero
()
{
filter
.
EndTime
=
&
endTime
}
filter
.
View
=
"all"
filter
.
Phase
=
"upstream"
filter
.
Owner
=
"provider"
filter
.
Source
=
strings
.
TrimSpace
(
c
.
Query
(
"error_source"
))
filter
.
Query
=
strings
.
TrimSpace
(
c
.
Query
(
"q"
))
if
platform
:=
strings
.
TrimSpace
(
c
.
Query
(
"platform"
));
platform
!=
""
{
filter
.
Platform
=
platform
}
// Prefer exact match on request_id; if missing, fall back to client_request_id.
if
requestID
!=
""
{
filter
.
RequestID
=
requestID
}
else
{
filter
.
ClientRequestID
=
clientRequestID
}
result
,
err
:=
h
.
opsService
.
GetErrorLogs
(
c
.
Request
.
Context
(),
filter
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
// If client asks for details, expand each upstream error log to include upstream response fields.
includeDetail
:=
strings
.
TrimSpace
(
c
.
Query
(
"include_detail"
))
if
includeDetail
==
"1"
||
strings
.
EqualFold
(
includeDetail
,
"true"
)
||
strings
.
EqualFold
(
includeDetail
,
"yes"
)
{
details
:=
make
([]
*
service
.
OpsErrorLogDetail
,
0
,
len
(
result
.
Errors
))
for
_
,
item
:=
range
result
.
Errors
{
if
item
==
nil
{
continue
}
d
,
err
:=
h
.
opsService
.
GetErrorLogByID
(
c
.
Request
.
Context
(),
item
.
ID
)
if
err
!=
nil
||
d
==
nil
{
continue
}
details
=
append
(
details
,
d
)
}
response
.
Paginated
(
c
,
details
,
int64
(
result
.
Total
),
result
.
Page
,
result
.
PageSize
)
return
}
response
.
Paginated
(
c
,
result
.
Errors
,
int64
(
result
.
Total
),
result
.
Page
,
result
.
PageSize
)
}
// RetryRequestErrorClient retries the client request based on stored request body.
// POST /api/v1/admin/ops/request-errors/:id/retry-client
func
(
h
*
OpsHandler
)
RetryRequestErrorClient
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
subject
,
ok
:=
middleware
.
GetAuthSubjectFromContext
(
c
)
if
!
ok
||
subject
.
UserID
<=
0
{
response
.
Error
(
c
,
http
.
StatusUnauthorized
,
"Unauthorized"
)
return
}
idStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"id"
))
id
,
err
:=
strconv
.
ParseInt
(
idStr
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid error id"
)
return
}
result
,
err
:=
h
.
opsService
.
RetryError
(
c
.
Request
.
Context
(),
subject
.
UserID
,
id
,
service
.
OpsRetryModeClient
,
nil
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
result
)
}
// RetryRequestErrorUpstreamEvent retries a specific upstream attempt using captured upstream_request_body.
// POST /api/v1/admin/ops/request-errors/:id/upstream-errors/:idx/retry
func
(
h
*
OpsHandler
)
RetryRequestErrorUpstreamEvent
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
subject
,
ok
:=
middleware
.
GetAuthSubjectFromContext
(
c
)
if
!
ok
||
subject
.
UserID
<=
0
{
response
.
Error
(
c
,
http
.
StatusUnauthorized
,
"Unauthorized"
)
return
}
idStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"id"
))
id
,
err
:=
strconv
.
ParseInt
(
idStr
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid error id"
)
return
}
idxStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"idx"
))
idx
,
err
:=
strconv
.
Atoi
(
idxStr
)
if
err
!=
nil
||
idx
<
0
{
response
.
BadRequest
(
c
,
"Invalid upstream idx"
)
return
}
result
,
err
:=
h
.
opsService
.
RetryUpstreamEvent
(
c
.
Request
.
Context
(),
subject
.
UserID
,
id
,
idx
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
result
)
}
// ResolveRequestError toggles resolved status.
// PUT /api/v1/admin/ops/request-errors/:id/resolve
func
(
h
*
OpsHandler
)
ResolveRequestError
(
c
*
gin
.
Context
)
{
h
.
UpdateErrorResolution
(
c
)
}
// ListUpstreamErrors lists independent upstream errors.
// GET /api/v1/admin/ops/upstream-errors
func
(
h
*
OpsHandler
)
ListUpstreamErrors
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
page
,
pageSize
:=
response
.
ParsePagination
(
c
)
if
pageSize
>
500
{
pageSize
=
500
}
startTime
,
endTime
,
err
:=
parseOpsTimeRange
(
c
,
"1h"
)
if
err
!=
nil
{
response
.
BadRequest
(
c
,
err
.
Error
())
return
}
filter
:=
&
service
.
OpsErrorLogFilter
{
Page
:
page
,
PageSize
:
pageSize
}
if
!
startTime
.
IsZero
()
{
filter
.
StartTime
=
&
startTime
}
if
!
endTime
.
IsZero
()
{
filter
.
EndTime
=
&
endTime
}
filter
.
View
=
parseOpsViewParam
(
c
)
filter
.
Phase
=
"upstream"
filter
.
Owner
=
"provider"
filter
.
Source
=
strings
.
TrimSpace
(
c
.
Query
(
"error_source"
))
filter
.
Query
=
strings
.
TrimSpace
(
c
.
Query
(
"q"
))
if
platform
:=
strings
.
TrimSpace
(
c
.
Query
(
"platform"
));
platform
!=
""
{
filter
.
Platform
=
platform
}
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"group_id"
));
v
!=
""
{
id
,
err
:=
strconv
.
ParseInt
(
v
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid group_id"
)
return
}
filter
.
GroupID
=
&
id
}
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"account_id"
));
v
!=
""
{
id
,
err
:=
strconv
.
ParseInt
(
v
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid account_id"
)
return
}
filter
.
AccountID
=
&
id
}
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"resolved"
));
v
!=
""
{
switch
strings
.
ToLower
(
v
)
{
case
"1"
,
"true"
,
"yes"
:
b
:=
true
filter
.
Resolved
=
&
b
case
"0"
,
"false"
,
"no"
:
b
:=
false
filter
.
Resolved
=
&
b
default
:
response
.
BadRequest
(
c
,
"Invalid resolved"
)
return
}
}
if
statusCodesStr
:=
strings
.
TrimSpace
(
c
.
Query
(
"status_codes"
));
statusCodesStr
!=
""
{
parts
:=
strings
.
Split
(
statusCodesStr
,
","
)
out
:=
make
([]
int
,
0
,
len
(
parts
))
for
_
,
part
:=
range
parts
{
p
:=
strings
.
TrimSpace
(
part
)
if
p
==
""
{
continue
}
n
,
err
:=
strconv
.
Atoi
(
p
)
if
err
!=
nil
||
n
<
0
{
response
.
BadRequest
(
c
,
"Invalid status_codes"
)
return
}
out
=
append
(
out
,
n
)
}
filter
.
StatusCodes
=
out
}
result
,
err
:=
h
.
opsService
.
GetErrorLogs
(
c
.
Request
.
Context
(),
filter
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Paginated
(
c
,
result
.
Errors
,
int64
(
result
.
Total
),
result
.
Page
,
result
.
PageSize
)
}
// GetUpstreamError returns upstream error detail.
// GET /api/v1/admin/ops/upstream-errors/:id
func
(
h
*
OpsHandler
)
GetUpstreamError
(
c
*
gin
.
Context
)
{
h
.
GetErrorLogByID
(
c
)
}
// RetryUpstreamError retries upstream error using the original account_id.
// POST /api/v1/admin/ops/upstream-errors/:id/retry
func
(
h
*
OpsHandler
)
RetryUpstreamError
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
subject
,
ok
:=
middleware
.
GetAuthSubjectFromContext
(
c
)
if
!
ok
||
subject
.
UserID
<=
0
{
response
.
Error
(
c
,
http
.
StatusUnauthorized
,
"Unauthorized"
)
return
}
idStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"id"
))
id
,
err
:=
strconv
.
ParseInt
(
idStr
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid error id"
)
return
}
result
,
err
:=
h
.
opsService
.
RetryError
(
c
.
Request
.
Context
(),
subject
.
UserID
,
id
,
service
.
OpsRetryModeUpstream
,
nil
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
result
)
}
// ResolveUpstreamError toggles resolved status.
// PUT /api/v1/admin/ops/upstream-errors/:id/resolve
func
(
h
*
OpsHandler
)
ResolveUpstreamError
(
c
*
gin
.
Context
)
{
h
.
UpdateErrorResolution
(
c
)
}
// ==================== Existing endpoints ====================
// ListRequestDetails returns a request-level list (success + error) for drill-down.
// GET /api/v1/admin/ops/requests
func
(
h
*
OpsHandler
)
ListRequestDetails
(
c
*
gin
.
Context
)
{
...
...
@@ -242,6 +709,11 @@ func (h *OpsHandler) ListRequestDetails(c *gin.Context) {
type
opsRetryRequest
struct
{
Mode
string
`json:"mode"`
PinnedAccountID
*
int64
`json:"pinned_account_id"`
Force
bool
`json:"force"`
}
type
opsResolveRequest
struct
{
Resolved
bool
`json:"resolved"`
}
// RetryErrorRequest retries a failed request using stored request_body.
...
...
@@ -278,6 +750,16 @@ func (h *OpsHandler) RetryErrorRequest(c *gin.Context) {
req
.
Mode
=
service
.
OpsRetryModeClient
}
// Force flag is currently a UI-level acknowledgement. Server may still enforce safety constraints.
_
=
req
.
Force
// Legacy endpoint safety: only allow retrying the client request here.
// Upstream retries must go through the split endpoints.
if
strings
.
EqualFold
(
strings
.
TrimSpace
(
req
.
Mode
),
service
.
OpsRetryModeUpstream
)
{
response
.
BadRequest
(
c
,
"upstream retry is not supported on this endpoint"
)
return
}
result
,
err
:=
h
.
opsService
.
RetryError
(
c
.
Request
.
Context
(),
subject
.
UserID
,
id
,
req
.
Mode
,
req
.
PinnedAccountID
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
...
...
@@ -287,6 +769,81 @@ func (h *OpsHandler) RetryErrorRequest(c *gin.Context) {
response
.
Success
(
c
,
result
)
}
// ListRetryAttempts lists retry attempts for an error log.
// GET /api/v1/admin/ops/errors/:id/retries
func
(
h
*
OpsHandler
)
ListRetryAttempts
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
idStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"id"
))
id
,
err
:=
strconv
.
ParseInt
(
idStr
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid error id"
)
return
}
limit
:=
50
if
v
:=
strings
.
TrimSpace
(
c
.
Query
(
"limit"
));
v
!=
""
{
n
,
err
:=
strconv
.
Atoi
(
v
)
if
err
!=
nil
||
n
<=
0
{
response
.
BadRequest
(
c
,
"Invalid limit"
)
return
}
limit
=
n
}
items
,
err
:=
h
.
opsService
.
ListRetryAttemptsByErrorID
(
c
.
Request
.
Context
(),
id
,
limit
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
items
)
}
// UpdateErrorResolution allows manual resolve/unresolve.
// PUT /api/v1/admin/ops/errors/:id/resolve
func
(
h
*
OpsHandler
)
UpdateErrorResolution
(
c
*
gin
.
Context
)
{
if
h
.
opsService
==
nil
{
response
.
Error
(
c
,
http
.
StatusServiceUnavailable
,
"Ops service not available"
)
return
}
if
err
:=
h
.
opsService
.
RequireMonitoringEnabled
(
c
.
Request
.
Context
());
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
subject
,
ok
:=
middleware
.
GetAuthSubjectFromContext
(
c
)
if
!
ok
||
subject
.
UserID
<=
0
{
response
.
Error
(
c
,
http
.
StatusUnauthorized
,
"Unauthorized"
)
return
}
idStr
:=
strings
.
TrimSpace
(
c
.
Param
(
"id"
))
id
,
err
:=
strconv
.
ParseInt
(
idStr
,
10
,
64
)
if
err
!=
nil
||
id
<=
0
{
response
.
BadRequest
(
c
,
"Invalid error id"
)
return
}
var
req
opsResolveRequest
if
err
:=
c
.
ShouldBindJSON
(
&
req
);
err
!=
nil
{
response
.
BadRequest
(
c
,
"Invalid request: "
+
err
.
Error
())
return
}
uid
:=
subject
.
UserID
if
err
:=
h
.
opsService
.
UpdateErrorResolution
(
c
.
Request
.
Context
(),
id
,
req
.
Resolved
,
&
uid
,
nil
);
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
gin
.
H
{
"ok"
:
true
})
}
func
parseOpsTimeRange
(
c
*
gin
.
Context
,
defaultRange
string
)
(
time
.
Time
,
time
.
Time
,
error
)
{
startStr
:=
strings
.
TrimSpace
(
c
.
Query
(
"start_time"
))
endStr
:=
strings
.
TrimSpace
(
c
.
Query
(
"end_time"
))
...
...
@@ -358,6 +915,10 @@ func parseOpsDuration(v string) (time.Duration, bool) {
return
6
*
time
.
Hour
,
true
case
"24h"
:
return
24
*
time
.
Hour
,
true
case
"7d"
:
return
7
*
24
*
time
.
Hour
,
true
case
"30d"
:
return
30
*
24
*
time
.
Hour
,
true
default
:
return
0
,
false
}
...
...
backend/internal/handler/admin/proxy_handler.go
View file @
90bce60b
...
...
@@ -196,6 +196,28 @@ func (h *ProxyHandler) Delete(c *gin.Context) {
response
.
Success
(
c
,
gin
.
H
{
"message"
:
"Proxy deleted successfully"
})
}
// BatchDelete handles batch deleting proxies
// POST /api/v1/admin/proxies/batch-delete
func
(
h
*
ProxyHandler
)
BatchDelete
(
c
*
gin
.
Context
)
{
type
BatchDeleteRequest
struct
{
IDs
[]
int64
`json:"ids" binding:"required,min=1"`
}
var
req
BatchDeleteRequest
if
err
:=
c
.
ShouldBindJSON
(
&
req
);
err
!=
nil
{
response
.
BadRequest
(
c
,
"Invalid request: "
+
err
.
Error
())
return
}
result
,
err
:=
h
.
adminService
.
BatchDeleteProxies
(
c
.
Request
.
Context
(),
req
.
IDs
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
response
.
Success
(
c
,
result
)
}
// Test handles testing proxy connectivity
// POST /api/v1/admin/proxies/:id/test
func
(
h
*
ProxyHandler
)
Test
(
c
*
gin
.
Context
)
{
...
...
@@ -243,19 +265,17 @@ func (h *ProxyHandler) GetProxyAccounts(c *gin.Context) {
return
}
page
,
pageSize
:=
response
.
ParsePagination
(
c
)
accounts
,
total
,
err
:=
h
.
adminService
.
GetProxyAccounts
(
c
.
Request
.
Context
(),
proxyID
,
page
,
pageSize
)
accounts
,
err
:=
h
.
adminService
.
GetProxyAccounts
(
c
.
Request
.
Context
(),
proxyID
)
if
err
!=
nil
{
response
.
ErrorFrom
(
c
,
err
)
return
}
out
:=
make
([]
dto
.
Account
,
0
,
len
(
accounts
))
out
:=
make
([]
dto
.
Proxy
Account
Summary
,
0
,
len
(
accounts
))
for
i
:=
range
accounts
{
out
=
append
(
out
,
*
dto
.
AccountFromService
(
&
accounts
[
i
]))
out
=
append
(
out
,
*
dto
.
Proxy
Account
Summary
FromService
(
&
accounts
[
i
]))
}
response
.
Paginated
(
c
,
out
,
total
,
page
,
pageSize
)
response
.
Success
(
c
,
out
)
}
// BatchCreateProxyItem represents a single proxy in batch create request
...
...
backend/internal/handler/dto/mappers.go
View file @
90bce60b
...
...
@@ -125,6 +125,7 @@ func AccountFromServiceShallow(a *service.Account) *Account {
ProxyID
:
a
.
ProxyID
,
Concurrency
:
a
.
Concurrency
,
Priority
:
a
.
Priority
,
RateMultiplier
:
a
.
BillingRateMultiplier
(),
Status
:
a
.
Status
,
ErrorMessage
:
a
.
ErrorMessage
,
LastUsedAt
:
a
.
LastUsedAt
,
...
...
@@ -212,8 +213,24 @@ func ProxyWithAccountCountFromService(p *service.ProxyWithAccountCount) *ProxyWi
return
nil
}
return
&
ProxyWithAccountCount
{
Proxy
:
*
ProxyFromService
(
&
p
.
Proxy
),
AccountCount
:
p
.
AccountCount
,
Proxy
:
*
ProxyFromService
(
&
p
.
Proxy
),
AccountCount
:
p
.
AccountCount
,
LatencyMs
:
p
.
LatencyMs
,
LatencyStatus
:
p
.
LatencyStatus
,
LatencyMessage
:
p
.
LatencyMessage
,
}
}
func
ProxyAccountSummaryFromService
(
a
*
service
.
ProxyAccountSummary
)
*
ProxyAccountSummary
{
if
a
==
nil
{
return
nil
}
return
&
ProxyAccountSummary
{
ID
:
a
.
ID
,
Name
:
a
.
Name
,
Platform
:
a
.
Platform
,
Type
:
a
.
Type
,
Notes
:
a
.
Notes
,
}
}
...
...
@@ -279,6 +296,7 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, inclu
TotalCost
:
l
.
TotalCost
,
ActualCost
:
l
.
ActualCost
,
RateMultiplier
:
l
.
RateMultiplier
,
AccountRateMultiplier
:
l
.
AccountRateMultiplier
,
BillingType
:
l
.
BillingType
,
Stream
:
l
.
Stream
,
DurationMs
:
l
.
DurationMs
,
...
...
backend/internal/handler/dto/types.go
View file @
90bce60b
...
...
@@ -76,6 +76,7 @@ type Account struct {
ProxyID
*
int64
`json:"proxy_id"`
Concurrency
int
`json:"concurrency"`
Priority
int
`json:"priority"`
RateMultiplier
float64
`json:"rate_multiplier"`
Status
string
`json:"status"`
ErrorMessage
string
`json:"error_message"`
LastUsedAt
*
time
.
Time
`json:"last_used_at"`
...
...
@@ -129,7 +130,18 @@ type Proxy struct {
type
ProxyWithAccountCount
struct
{
Proxy
AccountCount
int64
`json:"account_count"`
AccountCount
int64
`json:"account_count"`
LatencyMs
*
int64
`json:"latency_ms,omitempty"`
LatencyStatus
string
`json:"latency_status,omitempty"`
LatencyMessage
string
`json:"latency_message,omitempty"`
}
type
ProxyAccountSummary
struct
{
ID
int64
`json:"id"`
Name
string
`json:"name"`
Platform
string
`json:"platform"`
Type
string
`json:"type"`
Notes
*
string
`json:"notes,omitempty"`
}
type
RedeemCode
struct
{
...
...
@@ -169,13 +181,14 @@ type UsageLog struct {
CacheCreation5mTokens
int
`json:"cache_creation_5m_tokens"`
CacheCreation1hTokens
int
`json:"cache_creation_1h_tokens"`
InputCost
float64
`json:"input_cost"`
OutputCost
float64
`json:"output_cost"`
CacheCreationCost
float64
`json:"cache_creation_cost"`
CacheReadCost
float64
`json:"cache_read_cost"`
TotalCost
float64
`json:"total_cost"`
ActualCost
float64
`json:"actual_cost"`
RateMultiplier
float64
`json:"rate_multiplier"`
InputCost
float64
`json:"input_cost"`
OutputCost
float64
`json:"output_cost"`
CacheCreationCost
float64
`json:"cache_creation_cost"`
CacheReadCost
float64
`json:"cache_read_cost"`
TotalCost
float64
`json:"total_cost"`
ActualCost
float64
`json:"actual_cost"`
RateMultiplier
float64
`json:"rate_multiplier"`
AccountRateMultiplier
*
float64
`json:"account_rate_multiplier"`
BillingType
int8
`json:"billing_type"`
Stream
bool
`json:"stream"`
...
...
backend/internal/handler/ops_error_logger.go
View file @
90bce60b
...
...
@@ -544,6 +544,11 @@ func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
body
:=
w
.
buf
.
Bytes
()
parsed
:=
parseOpsErrorResponse
(
body
)
// Skip logging if the error should be filtered based on settings
if
shouldSkipOpsErrorLog
(
c
.
Request
.
Context
(),
ops
,
parsed
.
Message
,
string
(
body
),
c
.
Request
.
URL
.
Path
)
{
return
}
apiKey
,
_
:=
middleware2
.
GetAPIKeyFromContext
(
c
)
clientRequestID
,
_
:=
c
.
Request
.
Context
()
.
Value
(
ctxkey
.
ClientRequestID
)
.
(
string
)
...
...
@@ -832,28 +837,30 @@ func normalizeOpsErrorType(errType string, code string) string {
func
classifyOpsPhase
(
errType
,
message
,
code
string
)
string
{
msg
:=
strings
.
ToLower
(
message
)
// Standardized phases: request|auth|routing|upstream|network|internal
// Map billing/concurrency/response => request; scheduling => routing.
switch
strings
.
TrimSpace
(
code
)
{
case
"INSUFFICIENT_BALANCE"
,
"USAGE_LIMIT_EXCEEDED"
,
"SUBSCRIPTION_NOT_FOUND"
,
"SUBSCRIPTION_INVALID"
:
return
"
billing
"
return
"
request
"
}
switch
errType
{
case
"authentication_error"
:
return
"auth"
case
"billing_error"
,
"subscription_error"
:
return
"
billing
"
return
"
request
"
case
"rate_limit_error"
:
if
strings
.
Contains
(
msg
,
"concurrency"
)
||
strings
.
Contains
(
msg
,
"pending"
)
||
strings
.
Contains
(
msg
,
"queue"
)
{
return
"
concurrency
"
return
"
request
"
}
return
"upstream"
case
"invalid_request_error"
:
return
"re
sponse
"
return
"re
quest
"
case
"upstream_error"
,
"overloaded_error"
:
return
"upstream"
case
"api_error"
:
if
strings
.
Contains
(
msg
,
"no available accounts"
)
{
return
"
schedul
ing"
return
"
rout
ing"
}
return
"internal"
default
:
...
...
@@ -914,34 +921,38 @@ func classifyOpsIsBusinessLimited(errType, phase, code string, status int, messa
}
func
classifyOpsErrorOwner
(
phase
string
,
message
string
)
string
{
// Standardized owners: client|provider|platform
switch
phase
{
case
"upstream"
,
"network"
:
return
"provider"
case
"
billing"
,
"concurrency"
,
"auth"
,
"response
"
:
case
"
request"
,
"auth
"
:
return
"client"
case
"routing"
,
"internal"
:
return
"platform"
default
:
if
strings
.
Contains
(
strings
.
ToLower
(
message
),
"upstream"
)
{
return
"provider"
}
return
"
sub2api
"
return
"
platform
"
}
}
func
classifyOpsErrorSource
(
phase
string
,
message
string
)
string
{
// Standardized sources: client_request|upstream_http|gateway
switch
phase
{
case
"upstream"
:
return
"upstream_http"
case
"network"
:
return
"
upstream_network
"
case
"
billing
"
:
return
"
billing
"
case
"
concurrency
"
:
return
"
concurrenc
y"
return
"
gateway
"
case
"
request"
,
"auth
"
:
return
"
client_request
"
case
"
routing"
,
"internal
"
:
return
"
gatewa
y"
default
:
if
strings
.
Contains
(
strings
.
ToLower
(
message
),
"upstream"
)
{
return
"upstream_http"
}
return
"
internal
"
return
"
gateway
"
}
}
...
...
@@ -963,3 +974,42 @@ func truncateString(s string, max int) string {
func
strconvItoa
(
v
int
)
string
{
return
strconv
.
Itoa
(
v
)
}
// shouldSkipOpsErrorLog determines if an error should be skipped from logging based on settings.
// Returns true for errors that should be filtered according to OpsAdvancedSettings.
func
shouldSkipOpsErrorLog
(
ctx
context
.
Context
,
ops
*
service
.
OpsService
,
message
,
body
,
requestPath
string
)
bool
{
if
ops
==
nil
{
return
false
}
// Get advanced settings to check filter configuration
settings
,
err
:=
ops
.
GetOpsAdvancedSettings
(
ctx
)
if
err
!=
nil
||
settings
==
nil
{
// If we can't get settings, don't skip (fail open)
return
false
}
msgLower
:=
strings
.
ToLower
(
message
)
bodyLower
:=
strings
.
ToLower
(
body
)
// Check if count_tokens errors should be ignored
if
settings
.
IgnoreCountTokensErrors
&&
strings
.
Contains
(
requestPath
,
"/count_tokens"
)
{
return
true
}
// Check if context canceled errors should be ignored (client disconnects)
if
settings
.
IgnoreContextCanceled
{
if
strings
.
Contains
(
msgLower
,
"context canceled"
)
||
strings
.
Contains
(
bodyLower
,
"context canceled"
)
{
return
true
}
}
// Check if "no available accounts" errors should be ignored
if
settings
.
IgnoreNoAvailableAccounts
{
if
strings
.
Contains
(
msgLower
,
"no available accounts"
)
||
strings
.
Contains
(
bodyLower
,
"no available accounts"
)
{
return
true
}
}
return
false
}
backend/internal/pkg/usagestats/account_stats.go
View file @
90bce60b
package
usagestats
// AccountStats 账号使用统计
//
// cost: 账号口径费用(使用 total_cost * account_rate_multiplier)
// standard_cost: 标准费用(使用 total_cost,不含倍率)
// user_cost: 用户/API Key 口径费用(使用 actual_cost,受分组倍率影响)
type
AccountStats
struct
{
Requests
int64
`json:"requests"`
Tokens
int64
`json:"tokens"`
Cost
float64
`json:"cost"`
Requests
int64
`json:"requests"`
Tokens
int64
`json:"tokens"`
Cost
float64
`json:"cost"`
StandardCost
float64
`json:"standard_cost"`
UserCost
float64
`json:"user_cost"`
}
backend/internal/pkg/usagestats/usage_log_types.go
View file @
90bce60b
...
...
@@ -147,14 +147,15 @@ type UsageLogFilters struct {
// UsageStats represents usage statistics
type
UsageStats
struct
{
TotalRequests
int64
`json:"total_requests"`
TotalInputTokens
int64
`json:"total_input_tokens"`
TotalOutputTokens
int64
`json:"total_output_tokens"`
TotalCacheTokens
int64
`json:"total_cache_tokens"`
TotalTokens
int64
`json:"total_tokens"`
TotalCost
float64
`json:"total_cost"`
TotalActualCost
float64
`json:"total_actual_cost"`
AverageDurationMs
float64
`json:"average_duration_ms"`
TotalRequests
int64
`json:"total_requests"`
TotalInputTokens
int64
`json:"total_input_tokens"`
TotalOutputTokens
int64
`json:"total_output_tokens"`
TotalCacheTokens
int64
`json:"total_cache_tokens"`
TotalTokens
int64
`json:"total_tokens"`
TotalCost
float64
`json:"total_cost"`
TotalActualCost
float64
`json:"total_actual_cost"`
TotalAccountCost
*
float64
`json:"total_account_cost,omitempty"`
AverageDurationMs
float64
`json:"average_duration_ms"`
}
// BatchUserUsageStats represents usage stats for a single user
...
...
@@ -177,25 +178,29 @@ type AccountUsageHistory struct {
Label
string
`json:"label"`
Requests
int64
`json:"requests"`
Tokens
int64
`json:"tokens"`
Cost
float64
`json:"cost"`
ActualCost
float64
`json:"actual_cost"`
Cost
float64
`json:"cost"`
// 标准计费(total_cost)
ActualCost
float64
`json:"actual_cost"`
// 账号口径费用(total_cost * account_rate_multiplier)
UserCost
float64
`json:"user_cost"`
// 用户口径费用(actual_cost,受分组倍率影响)
}
// AccountUsageSummary represents summary statistics for an account
type
AccountUsageSummary
struct
{
Days
int
`json:"days"`
ActualDaysUsed
int
`json:"actual_days_used"`
TotalCost
float64
`json:"total_cost"`
TotalCost
float64
`json:"total_cost"`
// 账号口径费用
TotalUserCost
float64
`json:"total_user_cost"`
// 用户口径费用
TotalStandardCost
float64
`json:"total_standard_cost"`
TotalRequests
int64
`json:"total_requests"`
TotalTokens
int64
`json:"total_tokens"`
AvgDailyCost
float64
`json:"avg_daily_cost"`
AvgDailyCost
float64
`json:"avg_daily_cost"`
// 账号口径日均
AvgDailyUserCost
float64
`json:"avg_daily_user_cost"`
AvgDailyRequests
float64
`json:"avg_daily_requests"`
AvgDailyTokens
float64
`json:"avg_daily_tokens"`
AvgDurationMs
float64
`json:"avg_duration_ms"`
Today
*
struct
{
Date
string
`json:"date"`
Cost
float64
`json:"cost"`
UserCost
float64
`json:"user_cost"`
Requests
int64
`json:"requests"`
Tokens
int64
`json:"tokens"`
}
`json:"today"`
...
...
@@ -203,6 +208,7 @@ type AccountUsageSummary struct {
Date
string
`json:"date"`
Label
string
`json:"label"`
Cost
float64
`json:"cost"`
UserCost
float64
`json:"user_cost"`
Requests
int64
`json:"requests"`
}
`json:"highest_cost_day"`
HighestRequestDay
*
struct
{
...
...
@@ -210,6 +216,7 @@ type AccountUsageSummary struct {
Label
string
`json:"label"`
Requests
int64
`json:"requests"`
Cost
float64
`json:"cost"`
UserCost
float64
`json:"user_cost"`
}
`json:"highest_request_day"`
}
...
...
backend/internal/repository/account_repo.go
View file @
90bce60b
...
...
@@ -80,6 +80,10 @@ func (r *accountRepository) Create(ctx context.Context, account *service.Account
SetSchedulable
(
account
.
Schedulable
)
.
SetAutoPauseOnExpired
(
account
.
AutoPauseOnExpired
)
if
account
.
RateMultiplier
!=
nil
{
builder
.
SetRateMultiplier
(
*
account
.
RateMultiplier
)
}
if
account
.
ProxyID
!=
nil
{
builder
.
SetProxyID
(
*
account
.
ProxyID
)
}
...
...
@@ -291,6 +295,10 @@ func (r *accountRepository) Update(ctx context.Context, account *service.Account
SetSchedulable
(
account
.
Schedulable
)
.
SetAutoPauseOnExpired
(
account
.
AutoPauseOnExpired
)
if
account
.
RateMultiplier
!=
nil
{
builder
.
SetRateMultiplier
(
*
account
.
RateMultiplier
)
}
if
account
.
ProxyID
!=
nil
{
builder
.
SetProxyID
(
*
account
.
ProxyID
)
}
else
{
...
...
@@ -999,6 +1007,11 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates
args
=
append
(
args
,
*
updates
.
Priority
)
idx
++
}
if
updates
.
RateMultiplier
!=
nil
{
setClauses
=
append
(
setClauses
,
"rate_multiplier = $"
+
itoa
(
idx
))
args
=
append
(
args
,
*
updates
.
RateMultiplier
)
idx
++
}
if
updates
.
Status
!=
nil
{
setClauses
=
append
(
setClauses
,
"status = $"
+
itoa
(
idx
))
args
=
append
(
args
,
*
updates
.
Status
)
...
...
@@ -1347,6 +1360,8 @@ func accountEntityToService(m *dbent.Account) *service.Account {
return
nil
}
rateMultiplier
:=
m
.
RateMultiplier
return
&
service
.
Account
{
ID
:
m
.
ID
,
Name
:
m
.
Name
,
...
...
@@ -1358,6 +1373,7 @@ func accountEntityToService(m *dbent.Account) *service.Account {
ProxyID
:
m
.
ProxyID
,
Concurrency
:
m
.
Concurrency
,
Priority
:
m
.
Priority
,
RateMultiplier
:
&
rateMultiplier
,
Status
:
m
.
Status
,
ErrorMessage
:
derefString
(
m
.
ErrorMessage
),
LastUsedAt
:
m
.
LastUsedAt
,
...
...
backend/internal/repository/dashboard_aggregation_repo.go
View file @
90bce60b
...
...
@@ -8,6 +8,7 @@ import (
"strings"
"time"
"github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/lib/pq"
)
...
...
@@ -41,21 +42,22 @@ func isPostgresDriver(db *sql.DB) bool {
}
func
(
r
*
dashboardAggregationRepository
)
AggregateRange
(
ctx
context
.
Context
,
start
,
end
time
.
Time
)
error
{
startUTC
:=
start
.
UTC
()
endUTC
:=
end
.
UTC
()
if
!
endUTC
.
After
(
startUTC
)
{
loc
:=
timezone
.
Location
()
startLocal
:=
start
.
In
(
loc
)
endLocal
:=
end
.
In
(
loc
)
if
!
endLocal
.
After
(
startLocal
)
{
return
nil
}
hourStart
:=
start
UTC
.
Truncate
(
time
.
Hour
)
hourEnd
:=
end
UTC
.
Truncate
(
time
.
Hour
)
if
end
UTC
.
After
(
hourEnd
)
{
hourStart
:=
start
Local
.
Truncate
(
time
.
Hour
)
hourEnd
:=
end
Local
.
Truncate
(
time
.
Hour
)
if
end
Local
.
After
(
hourEnd
)
{
hourEnd
=
hourEnd
.
Add
(
time
.
Hour
)
}
dayStart
:=
truncateToDay
UTC
(
start
UTC
)
dayEnd
:=
truncateToDay
UTC
(
end
UTC
)
if
end
UTC
.
After
(
dayEnd
)
{
dayStart
:=
truncateToDay
(
start
Local
)
dayEnd
:=
truncateToDay
(
end
Local
)
if
end
Local
.
After
(
dayEnd
)
{
dayEnd
=
dayEnd
.
Add
(
24
*
time
.
Hour
)
}
...
...
@@ -146,38 +148,41 @@ func (r *dashboardAggregationRepository) EnsureUsageLogsPartitions(ctx context.C
}
func
(
r
*
dashboardAggregationRepository
)
insertHourlyActiveUsers
(
ctx
context
.
Context
,
start
,
end
time
.
Time
)
error
{
tzName
:=
timezone
.
Name
()
query
:=
`
INSERT INTO usage_dashboard_hourly_users (bucket_start, user_id)
SELECT DISTINCT
date_trunc('hour', created_at AT TIME ZONE
'UTC'
) AT TIME ZONE
'UTC'
AS bucket_start,
date_trunc('hour', created_at AT TIME ZONE
$3
) AT TIME ZONE
$3
AS bucket_start,
user_id
FROM usage_logs
WHERE created_at >= $1 AND created_at < $2
ON CONFLICT DO NOTHING
`
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
.
UTC
(),
end
.
UTC
()
)
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
,
end
,
tzName
)
return
err
}
func
(
r
*
dashboardAggregationRepository
)
insertDailyActiveUsers
(
ctx
context
.
Context
,
start
,
end
time
.
Time
)
error
{
tzName
:=
timezone
.
Name
()
query
:=
`
INSERT INTO usage_dashboard_daily_users (bucket_date, user_id)
SELECT DISTINCT
(bucket_start AT TIME ZONE
'UTC'
)::date AS bucket_date,
(bucket_start AT TIME ZONE
$3
)::date AS bucket_date,
user_id
FROM usage_dashboard_hourly_users
WHERE bucket_start >= $1 AND bucket_start < $2
ON CONFLICT DO NOTHING
`
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
.
UTC
(),
end
.
UTC
()
)
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
,
end
,
tzName
)
return
err
}
func
(
r
*
dashboardAggregationRepository
)
upsertHourlyAggregates
(
ctx
context
.
Context
,
start
,
end
time
.
Time
)
error
{
tzName
:=
timezone
.
Name
()
query
:=
`
WITH hourly AS (
SELECT
date_trunc('hour', created_at AT TIME ZONE
'UTC'
) AT TIME ZONE
'UTC'
AS bucket_start,
date_trunc('hour', created_at AT TIME ZONE
$3
) AT TIME ZONE
$3
AS bucket_start,
COUNT(*) AS total_requests,
COALESCE(SUM(input_tokens), 0) AS input_tokens,
COALESCE(SUM(output_tokens), 0) AS output_tokens,
...
...
@@ -236,15 +241,16 @@ func (r *dashboardAggregationRepository) upsertHourlyAggregates(ctx context.Cont
active_users = EXCLUDED.active_users,
computed_at = EXCLUDED.computed_at
`
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
.
UTC
(),
end
.
UTC
()
)
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
,
end
,
tzName
)
return
err
}
func
(
r
*
dashboardAggregationRepository
)
upsertDailyAggregates
(
ctx
context
.
Context
,
start
,
end
time
.
Time
)
error
{
tzName
:=
timezone
.
Name
()
query
:=
`
WITH daily AS (
SELECT
(bucket_start AT TIME ZONE
'UTC'
)::date AS bucket_date,
(bucket_start AT TIME ZONE
$5
)::date AS bucket_date,
COALESCE(SUM(total_requests), 0) AS total_requests,
COALESCE(SUM(input_tokens), 0) AS input_tokens,
COALESCE(SUM(output_tokens), 0) AS output_tokens,
...
...
@@ -255,7 +261,7 @@ func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Conte
COALESCE(SUM(total_duration_ms), 0) AS total_duration_ms
FROM usage_dashboard_hourly
WHERE bucket_start >= $1 AND bucket_start < $2
GROUP BY (bucket_start AT TIME ZONE
'UTC'
)::date
GROUP BY (bucket_start AT TIME ZONE
$5
)::date
),
user_counts AS (
SELECT bucket_date, COUNT(*) AS active_users
...
...
@@ -303,7 +309,7 @@ func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Conte
active_users = EXCLUDED.active_users,
computed_at = EXCLUDED.computed_at
`
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
.
UTC
(),
end
.
UTC
(),
start
.
UTC
(),
end
.
UTC
()
)
_
,
err
:=
r
.
sql
.
ExecContext
(
ctx
,
query
,
start
,
end
,
start
,
end
,
tzName
)
return
err
}
...
...
@@ -376,9 +382,8 @@ func (r *dashboardAggregationRepository) createUsageLogsPartition(ctx context.Co
return
err
}
func
truncateToDayUTC
(
t
time
.
Time
)
time
.
Time
{
t
=
t
.
UTC
()
return
time
.
Date
(
t
.
Year
(),
t
.
Month
(),
t
.
Day
(),
0
,
0
,
0
,
0
,
time
.
UTC
)
func
truncateToDay
(
t
time
.
Time
)
time
.
Time
{
return
timezone
.
StartOfDay
(
t
)
}
func
truncateToMonthUTC
(
t
time
.
Time
)
time
.
Time
{
...
...
backend/internal/repository/ops_repo.go
View file @
90bce60b
This diff is collapsed.
Click to expand it.
backend/internal/repository/ops_repo_alerts.go
View file @
90bce60b
...
...
@@ -354,7 +354,7 @@ SELECT
created_at
FROM ops_alert_events
`
+
where
+
`
ORDER BY fired_at DESC
ORDER BY fired_at DESC
, id DESC
LIMIT `
+
limitArg
rows
,
err
:=
r
.
db
.
QueryContext
(
ctx
,
q
,
args
...
)
...
...
@@ -413,6 +413,43 @@ LIMIT ` + limitArg
return
out
,
nil
}
func
(
r
*
opsRepository
)
GetAlertEventByID
(
ctx
context
.
Context
,
eventID
int64
)
(
*
service
.
OpsAlertEvent
,
error
)
{
if
r
==
nil
||
r
.
db
==
nil
{
return
nil
,
fmt
.
Errorf
(
"nil ops repository"
)
}
if
eventID
<=
0
{
return
nil
,
fmt
.
Errorf
(
"invalid event id"
)
}
q
:=
`
SELECT
id,
COALESCE(rule_id, 0),
COALESCE(severity, ''),
COALESCE(status, ''),
COALESCE(title, ''),
COALESCE(description, ''),
metric_value,
threshold_value,
dimensions,
fired_at,
resolved_at,
email_sent,
created_at
FROM ops_alert_events
WHERE id = $1`
row
:=
r
.
db
.
QueryRowContext
(
ctx
,
q
,
eventID
)
ev
,
err
:=
scanOpsAlertEvent
(
row
)
if
err
!=
nil
{
if
err
==
sql
.
ErrNoRows
{
return
nil
,
nil
}
return
nil
,
err
}
return
ev
,
nil
}
func
(
r
*
opsRepository
)
GetActiveAlertEvent
(
ctx
context
.
Context
,
ruleID
int64
)
(
*
service
.
OpsAlertEvent
,
error
)
{
if
r
==
nil
||
r
.
db
==
nil
{
return
nil
,
fmt
.
Errorf
(
"nil ops repository"
)
...
...
@@ -591,6 +628,121 @@ type opsAlertEventRow interface {
Scan
(
dest
...
any
)
error
}
func
(
r
*
opsRepository
)
CreateAlertSilence
(
ctx
context
.
Context
,
input
*
service
.
OpsAlertSilence
)
(
*
service
.
OpsAlertSilence
,
error
)
{
if
r
==
nil
||
r
.
db
==
nil
{
return
nil
,
fmt
.
Errorf
(
"nil ops repository"
)
}
if
input
==
nil
{
return
nil
,
fmt
.
Errorf
(
"nil input"
)
}
if
input
.
RuleID
<=
0
{
return
nil
,
fmt
.
Errorf
(
"invalid rule_id"
)
}
platform
:=
strings
.
TrimSpace
(
input
.
Platform
)
if
platform
==
""
{
return
nil
,
fmt
.
Errorf
(
"invalid platform"
)
}
if
input
.
Until
.
IsZero
()
{
return
nil
,
fmt
.
Errorf
(
"invalid until"
)
}
q
:=
`
INSERT INTO ops_alert_silences (
rule_id,
platform,
group_id,
region,
until,
reason,
created_by,
created_at
) VALUES (
$1,$2,$3,$4,$5,$6,$7,NOW()
)
RETURNING id, rule_id, platform, group_id, region, until, COALESCE(reason,''), created_by, created_at`
row
:=
r
.
db
.
QueryRowContext
(
ctx
,
q
,
input
.
RuleID
,
platform
,
opsNullInt64
(
input
.
GroupID
),
opsNullString
(
input
.
Region
),
input
.
Until
,
opsNullString
(
input
.
Reason
),
opsNullInt64
(
input
.
CreatedBy
),
)
var
out
service
.
OpsAlertSilence
var
groupID
sql
.
NullInt64
var
region
sql
.
NullString
var
createdBy
sql
.
NullInt64
if
err
:=
row
.
Scan
(
&
out
.
ID
,
&
out
.
RuleID
,
&
out
.
Platform
,
&
groupID
,
&
region
,
&
out
.
Until
,
&
out
.
Reason
,
&
createdBy
,
&
out
.
CreatedAt
,
);
err
!=
nil
{
return
nil
,
err
}
if
groupID
.
Valid
{
v
:=
groupID
.
Int64
out
.
GroupID
=
&
v
}
if
region
.
Valid
{
v
:=
strings
.
TrimSpace
(
region
.
String
)
if
v
!=
""
{
out
.
Region
=
&
v
}
}
if
createdBy
.
Valid
{
v
:=
createdBy
.
Int64
out
.
CreatedBy
=
&
v
}
return
&
out
,
nil
}
func
(
r
*
opsRepository
)
IsAlertSilenced
(
ctx
context
.
Context
,
ruleID
int64
,
platform
string
,
groupID
*
int64
,
region
*
string
,
now
time
.
Time
)
(
bool
,
error
)
{
if
r
==
nil
||
r
.
db
==
nil
{
return
false
,
fmt
.
Errorf
(
"nil ops repository"
)
}
if
ruleID
<=
0
{
return
false
,
fmt
.
Errorf
(
"invalid rule id"
)
}
platform
=
strings
.
TrimSpace
(
platform
)
if
platform
==
""
{
return
false
,
nil
}
if
now
.
IsZero
()
{
now
=
time
.
Now
()
.
UTC
()
}
q
:=
`
SELECT 1
FROM ops_alert_silences
WHERE rule_id = $1
AND platform = $2
AND (group_id IS NOT DISTINCT FROM $3)
AND (region IS NOT DISTINCT FROM $4)
AND until > $5
LIMIT 1`
var
dummy
int
err
:=
r
.
db
.
QueryRowContext
(
ctx
,
q
,
ruleID
,
platform
,
opsNullInt64
(
groupID
),
opsNullString
(
region
),
now
)
.
Scan
(
&
dummy
)
if
err
!=
nil
{
if
err
==
sql
.
ErrNoRows
{
return
false
,
nil
}
return
false
,
err
}
return
true
,
nil
}
func
scanOpsAlertEvent
(
row
opsAlertEventRow
)
(
*
service
.
OpsAlertEvent
,
error
)
{
var
ev
service
.
OpsAlertEvent
var
metricValue
sql
.
NullFloat64
...
...
@@ -652,6 +804,10 @@ func buildOpsAlertEventsWhere(filter *service.OpsAlertEventFilter) (string, []an
args
=
append
(
args
,
severity
)
clauses
=
append
(
clauses
,
"severity = $"
+
itoa
(
len
(
args
)))
}
if
filter
.
EmailSent
!=
nil
{
args
=
append
(
args
,
*
filter
.
EmailSent
)
clauses
=
append
(
clauses
,
"email_sent = $"
+
itoa
(
len
(
args
)))
}
if
filter
.
StartTime
!=
nil
&&
!
filter
.
StartTime
.
IsZero
()
{
args
=
append
(
args
,
*
filter
.
StartTime
)
clauses
=
append
(
clauses
,
"fired_at >= $"
+
itoa
(
len
(
args
)))
...
...
@@ -661,6 +817,14 @@ func buildOpsAlertEventsWhere(filter *service.OpsAlertEventFilter) (string, []an
clauses
=
append
(
clauses
,
"fired_at < $"
+
itoa
(
len
(
args
)))
}
// Cursor pagination (descending by fired_at, then id)
if
filter
.
BeforeFiredAt
!=
nil
&&
!
filter
.
BeforeFiredAt
.
IsZero
()
&&
filter
.
BeforeID
!=
nil
&&
*
filter
.
BeforeID
>
0
{
args
=
append
(
args
,
*
filter
.
BeforeFiredAt
)
tsArg
:=
"$"
+
itoa
(
len
(
args
))
args
=
append
(
args
,
*
filter
.
BeforeID
)
idArg
:=
"$"
+
itoa
(
len
(
args
))
clauses
=
append
(
clauses
,
fmt
.
Sprintf
(
"(fired_at < %s OR (fired_at = %s AND id < %s))"
,
tsArg
,
tsArg
,
idArg
))
}
// Dimensions are stored in JSONB. We filter best-effort without requiring GIN indexes.
if
platform
:=
strings
.
TrimSpace
(
filter
.
Platform
);
platform
!=
""
{
args
=
append
(
args
,
platform
)
...
...
backend/internal/repository/proxy_latency_cache.go
0 → 100644
View file @
90bce60b
package
repository
import
(
"context"
"encoding/json"
"fmt"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/redis/go-redis/v9"
)
const
proxyLatencyKeyPrefix
=
"proxy:latency:"
func
proxyLatencyKey
(
proxyID
int64
)
string
{
return
fmt
.
Sprintf
(
"%s%d"
,
proxyLatencyKeyPrefix
,
proxyID
)
}
type
proxyLatencyCache
struct
{
rdb
*
redis
.
Client
}
func
NewProxyLatencyCache
(
rdb
*
redis
.
Client
)
service
.
ProxyLatencyCache
{
return
&
proxyLatencyCache
{
rdb
:
rdb
}
}
func
(
c
*
proxyLatencyCache
)
GetProxyLatencies
(
ctx
context
.
Context
,
proxyIDs
[]
int64
)
(
map
[
int64
]
*
service
.
ProxyLatencyInfo
,
error
)
{
results
:=
make
(
map
[
int64
]
*
service
.
ProxyLatencyInfo
)
if
len
(
proxyIDs
)
==
0
{
return
results
,
nil
}
keys
:=
make
([]
string
,
0
,
len
(
proxyIDs
))
for
_
,
id
:=
range
proxyIDs
{
keys
=
append
(
keys
,
proxyLatencyKey
(
id
))
}
values
,
err
:=
c
.
rdb
.
MGet
(
ctx
,
keys
...
)
.
Result
()
if
err
!=
nil
{
return
results
,
err
}
for
i
,
raw
:=
range
values
{
if
raw
==
nil
{
continue
}
var
payload
[]
byte
switch
v
:=
raw
.
(
type
)
{
case
string
:
payload
=
[]
byte
(
v
)
case
[]
byte
:
payload
=
v
default
:
continue
}
var
info
service
.
ProxyLatencyInfo
if
err
:=
json
.
Unmarshal
(
payload
,
&
info
);
err
!=
nil
{
continue
}
results
[
proxyIDs
[
i
]]
=
&
info
}
return
results
,
nil
}
func
(
c
*
proxyLatencyCache
)
SetProxyLatency
(
ctx
context
.
Context
,
proxyID
int64
,
info
*
service
.
ProxyLatencyInfo
)
error
{
if
info
==
nil
{
return
nil
}
payload
,
err
:=
json
.
Marshal
(
info
)
if
err
!=
nil
{
return
err
}
return
c
.
rdb
.
Set
(
ctx
,
proxyLatencyKey
(
proxyID
),
payload
,
0
)
.
Err
()
}
backend/internal/repository/proxy_probe_service.go
View file @
90bce60b
...
...
@@ -34,7 +34,10 @@ func NewProxyExitInfoProber(cfg *config.Config) service.ProxyExitInfoProber {
}
}
const
defaultIPInfoURL
=
"https://ipinfo.io/json"
const
(
defaultIPInfoURL
=
"https://ipinfo.io/json"
defaultProxyProbeTimeout
=
30
*
time
.
Second
)
type
proxyProbeService
struct
{
ipInfoURL
string
...
...
@@ -46,7 +49,7 @@ type proxyProbeService struct {
func
(
s
*
proxyProbeService
)
ProbeProxy
(
ctx
context
.
Context
,
proxyURL
string
)
(
*
service
.
ProxyExitInfo
,
int64
,
error
)
{
client
,
err
:=
httpclient
.
GetClient
(
httpclient
.
Options
{
ProxyURL
:
proxyURL
,
Timeout
:
15
*
time
.
Second
,
Timeout
:
defaultProxyProbeTimeout
,
InsecureSkipVerify
:
s
.
insecureSkipVerify
,
ProxyStrict
:
true
,
ValidateResolvedIP
:
s
.
validateResolvedIP
,
...
...
backend/internal/repository/proxy_repo.go
View file @
90bce60b
...
...
@@ -219,12 +219,54 @@ func (r *proxyRepository) ExistsByHostPortAuth(ctx context.Context, host string,
// CountAccountsByProxyID returns the number of accounts using a specific proxy
func
(
r
*
proxyRepository
)
CountAccountsByProxyID
(
ctx
context
.
Context
,
proxyID
int64
)
(
int64
,
error
)
{
var
count
int64
if
err
:=
scanSingleRow
(
ctx
,
r
.
sql
,
"SELECT COUNT(*) FROM accounts WHERE proxy_id = $1"
,
[]
any
{
proxyID
},
&
count
);
err
!=
nil
{
if
err
:=
scanSingleRow
(
ctx
,
r
.
sql
,
"SELECT COUNT(*) FROM accounts WHERE proxy_id = $1
AND deleted_at IS NULL
"
,
[]
any
{
proxyID
},
&
count
);
err
!=
nil
{
return
0
,
err
}
return
count
,
nil
}
func
(
r
*
proxyRepository
)
ListAccountSummariesByProxyID
(
ctx
context
.
Context
,
proxyID
int64
)
([]
service
.
ProxyAccountSummary
,
error
)
{
rows
,
err
:=
r
.
sql
.
QueryContext
(
ctx
,
`
SELECT id, name, platform, type, notes
FROM accounts
WHERE proxy_id = $1 AND deleted_at IS NULL
ORDER BY id DESC
`
,
proxyID
)
if
err
!=
nil
{
return
nil
,
err
}
defer
func
()
{
_
=
rows
.
Close
()
}()
out
:=
make
([]
service
.
ProxyAccountSummary
,
0
)
for
rows
.
Next
()
{
var
(
id
int64
name
string
platform
string
accType
string
notes
sql
.
NullString
)
if
err
:=
rows
.
Scan
(
&
id
,
&
name
,
&
platform
,
&
accType
,
&
notes
);
err
!=
nil
{
return
nil
,
err
}
var
notesPtr
*
string
if
notes
.
Valid
{
notesPtr
=
&
notes
.
String
}
out
=
append
(
out
,
service
.
ProxyAccountSummary
{
ID
:
id
,
Name
:
name
,
Platform
:
platform
,
Type
:
accType
,
Notes
:
notesPtr
,
})
}
if
err
:=
rows
.
Err
();
err
!=
nil
{
return
nil
,
err
}
return
out
,
nil
}
// GetAccountCountsForProxies returns a map of proxy ID to account count for all proxies
func
(
r
*
proxyRepository
)
GetAccountCountsForProxies
(
ctx
context
.
Context
)
(
counts
map
[
int64
]
int64
,
err
error
)
{
rows
,
err
:=
r
.
sql
.
QueryContext
(
ctx
,
"SELECT proxy_id, COUNT(*) AS count FROM accounts WHERE proxy_id IS NOT NULL AND deleted_at IS NULL GROUP BY proxy_id"
)
...
...
backend/internal/repository/scheduler_snapshot_outbox_integration_test.go
View file @
90bce60b
...
...
@@ -27,7 +27,7 @@ func TestSchedulerSnapshotOutboxReplay(t *testing.T) {
RunMode
:
config
.
RunModeStandard
,
Gateway
:
config
.
GatewayConfig
{
Scheduling
:
config
.
GatewaySchedulingConfig
{
OutboxPollIntervalSeconds
:
1
,
OutboxPollIntervalSeconds
:
1
,
FullRebuildIntervalSeconds
:
0
,
DbFallbackEnabled
:
true
,
},
...
...
backend/internal/repository/usage_log_repo.go
View file @
90bce60b
...
...
@@ -22,7 +22,7 @@ import (
"github.com/lib/pq"
)
const
usageLogSelectColumns
=
"id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, created_at"
const
usageLogSelectColumns
=
"id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier,
account_rate_multiplier,
billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, created_at"
type
usageLogRepository
struct
{
client
*
dbent
.
Client
...
...
@@ -105,6 +105,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog)
total_cost,
actual_cost,
rate_multiplier,
account_rate_multiplier,
billing_type,
stream,
duration_ms,
...
...
@@ -120,7 +121,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog)
$8, $9, $10, $11,
$12, $13,
$14, $15, $16, $17, $18, $19,
$20, $21, $22, $23, $24, $25, $26, $27, $28, $29
$20, $21, $22, $23, $24, $25, $26, $27, $28, $29
, $30
)
ON CONFLICT (request_id, api_key_id) DO NOTHING
RETURNING id, created_at
...
...
@@ -160,6 +161,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog)
log
.
TotalCost
,
log
.
ActualCost
,
rateMultiplier
,
log
.
AccountRateMultiplier
,
log
.
BillingType
,
log
.
Stream
,
duration
,
...
...
@@ -270,13 +272,13 @@ type DashboardStats = usagestats.DashboardStats
func
(
r
*
usageLogRepository
)
GetDashboardStats
(
ctx
context
.
Context
)
(
*
DashboardStats
,
error
)
{
stats
:=
&
DashboardStats
{}
now
:=
time
.
Now
()
.
UTC
()
today
UTC
:=
truncate
To
D
ay
UTC
(
now
)
now
:=
time
zone
.
Now
()
today
Start
:=
timezone
.
To
d
ay
(
)
if
err
:=
r
.
fillDashboardEntityStats
(
ctx
,
stats
,
today
UTC
,
now
);
err
!=
nil
{
if
err
:=
r
.
fillDashboardEntityStats
(
ctx
,
stats
,
today
Start
,
now
);
err
!=
nil
{
return
nil
,
err
}
if
err
:=
r
.
fillDashboardUsageStatsAggregated
(
ctx
,
stats
,
today
UTC
,
now
);
err
!=
nil
{
if
err
:=
r
.
fillDashboardUsageStatsAggregated
(
ctx
,
stats
,
today
Start
,
now
);
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -298,13 +300,13 @@ func (r *usageLogRepository) GetDashboardStatsWithRange(ctx context.Context, sta
}
stats
:=
&
DashboardStats
{}
now
:=
time
.
Now
()
.
UTC
()
today
UTC
:=
truncate
To
D
ay
UTC
(
now
)
now
:=
time
zone
.
Now
()
today
Start
:=
timezone
.
To
d
ay
(
)
if
err
:=
r
.
fillDashboardEntityStats
(
ctx
,
stats
,
today
UTC
,
now
);
err
!=
nil
{
if
err
:=
r
.
fillDashboardEntityStats
(
ctx
,
stats
,
today
Start
,
now
);
err
!=
nil
{
return
nil
,
err
}
if
err
:=
r
.
fillDashboardUsageStatsFromUsageLogs
(
ctx
,
stats
,
startUTC
,
endUTC
,
today
UTC
,
now
);
err
!=
nil
{
if
err
:=
r
.
fillDashboardUsageStatsFromUsageLogs
(
ctx
,
stats
,
startUTC
,
endUTC
,
today
Start
,
now
);
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -455,7 +457,7 @@ func (r *usageLogRepository) fillDashboardUsageStatsAggregated(ctx context.Conte
FROM usage_dashboard_hourly
WHERE bucket_start = $1
`
hourStart
:=
now
.
UTC
(
)
.
Truncate
(
time
.
Hour
)
hourStart
:=
now
.
In
(
timezone
.
Location
()
)
.
Truncate
(
time
.
Hour
)
if
err
:=
scanSingleRow
(
ctx
,
r
.
sql
,
hourlyActiveQuery
,
[]
any
{
hourStart
},
&
stats
.
HourlyActiveUsers
);
err
!=
nil
{
if
err
!=
sql
.
ErrNoRows
{
return
err
...
...
@@ -835,7 +837,9 @@ func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID
SELECT
COUNT(*) as requests,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens,
COALESCE(SUM(actual_cost), 0) as cost
COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost,
COALESCE(SUM(total_cost), 0) as standard_cost,
COALESCE(SUM(actual_cost), 0) as user_cost
FROM usage_logs
WHERE account_id = $1 AND created_at >= $2
`
...
...
@@ -849,6 +853,8 @@ func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID
&
stats
.
Requests
,
&
stats
.
Tokens
,
&
stats
.
Cost
,
&
stats
.
StandardCost
,
&
stats
.
UserCost
,
);
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -861,7 +867,9 @@ func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountI
SELECT
COUNT(*) as requests,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens,
COALESCE(SUM(actual_cost), 0) as cost
COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost,
COALESCE(SUM(total_cost), 0) as standard_cost,
COALESCE(SUM(actual_cost), 0) as user_cost
FROM usage_logs
WHERE account_id = $1 AND created_at >= $2
`
...
...
@@ -875,6 +883,8 @@ func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountI
&
stats
.
Requests
,
&
stats
.
Tokens
,
&
stats
.
Cost
,
&
stats
.
StandardCost
,
&
stats
.
UserCost
,
);
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -1400,8 +1410,8 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe
return
result
,
nil
}
// GetUsageTrendWithFilters returns usage trend data with optional
user/api_key
filters
func
(
r
*
usageLogRepository
)
GetUsageTrendWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
granularity
string
,
userID
,
apiKeyID
int64
)
(
results
[]
TrendDataPoint
,
err
error
)
{
// GetUsageTrendWithFilters returns usage trend data with optional filters
func
(
r
*
usageLogRepository
)
GetUsageTrendWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
granularity
string
,
userID
,
apiKeyID
,
accountID
,
groupID
int64
,
model
string
,
stream
*
bool
)
(
results
[]
TrendDataPoint
,
err
error
)
{
dateFormat
:=
"YYYY-MM-DD"
if
granularity
==
"hour"
{
dateFormat
=
"YYYY-MM-DD HH24:00"
...
...
@@ -1430,6 +1440,22 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start
query
+=
fmt
.
Sprintf
(
" AND api_key_id = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
apiKeyID
)
}
if
accountID
>
0
{
query
+=
fmt
.
Sprintf
(
" AND account_id = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
accountID
)
}
if
groupID
>
0
{
query
+=
fmt
.
Sprintf
(
" AND group_id = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
groupID
)
}
if
model
!=
""
{
query
+=
fmt
.
Sprintf
(
" AND model = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
model
)
}
if
stream
!=
nil
{
query
+=
fmt
.
Sprintf
(
" AND stream = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
*
stream
)
}
query
+=
" GROUP BY date ORDER BY date ASC"
rows
,
err
:=
r
.
sql
.
QueryContext
(
ctx
,
query
,
args
...
)
...
...
@@ -1452,9 +1478,15 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start
return
results
,
nil
}
// GetModelStatsWithFilters returns model statistics with optional user/api_key filters
func
(
r
*
usageLogRepository
)
GetModelStatsWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
userID
,
apiKeyID
,
accountID
int64
)
(
results
[]
ModelStat
,
err
error
)
{
query
:=
`
// GetModelStatsWithFilters returns model statistics with optional filters
func
(
r
*
usageLogRepository
)
GetModelStatsWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
userID
,
apiKeyID
,
accountID
,
groupID
int64
,
stream
*
bool
)
(
results
[]
ModelStat
,
err
error
)
{
actualCostExpr
:=
"COALESCE(SUM(actual_cost), 0) as actual_cost"
// 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。
if
accountID
>
0
&&
userID
==
0
&&
apiKeyID
==
0
{
actualCostExpr
=
"COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost"
}
query
:=
fmt
.
Sprintf
(
`
SELECT
model,
COUNT(*) as requests,
...
...
@@ -1462,10 +1494,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start
COALESCE(SUM(output_tokens), 0) as output_tokens,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens,
COALESCE(SUM(total_cost), 0) as cost,
COALESCE(SUM(actual_cost), 0) as actual_cost
%s
FROM usage_logs
WHERE created_at >= $1 AND created_at < $2
`
`
,
actualCostExpr
)
args
:=
[]
any
{
startTime
,
endTime
}
if
userID
>
0
{
...
...
@@ -1480,6 +1512,14 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start
query
+=
fmt
.
Sprintf
(
" AND account_id = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
accountID
)
}
if
groupID
>
0
{
query
+=
fmt
.
Sprintf
(
" AND group_id = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
groupID
)
}
if
stream
!=
nil
{
query
+=
fmt
.
Sprintf
(
" AND stream = $%d"
,
len
(
args
)
+
1
)
args
=
append
(
args
,
*
stream
)
}
query
+=
" GROUP BY model ORDER BY total_tokens DESC"
rows
,
err
:=
r
.
sql
.
QueryContext
(
ctx
,
query
,
args
...
)
...
...
@@ -1587,12 +1627,14 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us
COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens,
COALESCE(SUM(total_cost), 0) as total_cost,
COALESCE(SUM(actual_cost), 0) as total_actual_cost,
COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as total_account_cost,
COALESCE(AVG(duration_ms), 0) as avg_duration_ms
FROM usage_logs
%s
`
,
buildWhere
(
conditions
))
stats
:=
&
UsageStats
{}
var
totalAccountCost
float64
if
err
:=
scanSingleRow
(
ctx
,
r
.
sql
,
...
...
@@ -1604,10 +1646,14 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us
&
stats
.
TotalCacheTokens
,
&
stats
.
TotalCost
,
&
stats
.
TotalActualCost
,
&
totalAccountCost
,
&
stats
.
AverageDurationMs
,
);
err
!=
nil
{
return
nil
,
err
}
if
filters
.
AccountID
>
0
{
stats
.
TotalAccountCost
=
&
totalAccountCost
}
stats
.
TotalTokens
=
stats
.
TotalInputTokens
+
stats
.
TotalOutputTokens
+
stats
.
TotalCacheTokens
return
stats
,
nil
}
...
...
@@ -1634,7 +1680,8 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
COUNT(*) as requests,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens,
COALESCE(SUM(total_cost), 0) as cost,
COALESCE(SUM(actual_cost), 0) as actual_cost
COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost,
COALESCE(SUM(actual_cost), 0) as user_cost
FROM usage_logs
WHERE account_id = $1 AND created_at >= $2 AND created_at < $3
GROUP BY date
...
...
@@ -1661,7 +1708,8 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
var
tokens
int64
var
cost
float64
var
actualCost
float64
if
err
=
rows
.
Scan
(
&
date
,
&
requests
,
&
tokens
,
&
cost
,
&
actualCost
);
err
!=
nil
{
var
userCost
float64
if
err
=
rows
.
Scan
(
&
date
,
&
requests
,
&
tokens
,
&
cost
,
&
actualCost
,
&
userCost
);
err
!=
nil
{
return
nil
,
err
}
t
,
_
:=
time
.
Parse
(
"2006-01-02"
,
date
)
...
...
@@ -1672,19 +1720,21 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
Tokens
:
tokens
,
Cost
:
cost
,
ActualCost
:
actualCost
,
UserCost
:
userCost
,
})
}
if
err
=
rows
.
Err
();
err
!=
nil
{
return
nil
,
err
}
var
totalAc
tual
Cost
,
totalStandardCost
float64
var
totalAc
countCost
,
totalUser
Cost
,
totalStandardCost
float64
var
totalRequests
,
totalTokens
int64
var
highestCostDay
,
highestRequestDay
*
AccountUsageHistory
for
i
:=
range
history
{
h
:=
&
history
[
i
]
totalActualCost
+=
h
.
ActualCost
totalAccountCost
+=
h
.
ActualCost
totalUserCost
+=
h
.
UserCost
totalStandardCost
+=
h
.
Cost
totalRequests
+=
h
.
Requests
totalTokens
+=
h
.
Tokens
...
...
@@ -1711,11 +1761,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
summary
:=
AccountUsageSummary
{
Days
:
daysCount
,
ActualDaysUsed
:
actualDaysUsed
,
TotalCost
:
totalActualCost
,
TotalCost
:
totalAccountCost
,
TotalUserCost
:
totalUserCost
,
TotalStandardCost
:
totalStandardCost
,
TotalRequests
:
totalRequests
,
TotalTokens
:
totalTokens
,
AvgDailyCost
:
totalActualCost
/
float64
(
actualDaysUsed
),
AvgDailyCost
:
totalAccountCost
/
float64
(
actualDaysUsed
),
AvgDailyUserCost
:
totalUserCost
/
float64
(
actualDaysUsed
),
AvgDailyRequests
:
float64
(
totalRequests
)
/
float64
(
actualDaysUsed
),
AvgDailyTokens
:
float64
(
totalTokens
)
/
float64
(
actualDaysUsed
),
AvgDurationMs
:
avgDuration
,
...
...
@@ -1727,11 +1779,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
summary
.
Today
=
&
struct
{
Date
string
`json:"date"`
Cost
float64
`json:"cost"`
UserCost
float64
`json:"user_cost"`
Requests
int64
`json:"requests"`
Tokens
int64
`json:"tokens"`
}{
Date
:
history
[
i
]
.
Date
,
Cost
:
history
[
i
]
.
ActualCost
,
UserCost
:
history
[
i
]
.
UserCost
,
Requests
:
history
[
i
]
.
Requests
,
Tokens
:
history
[
i
]
.
Tokens
,
}
...
...
@@ -1744,11 +1798,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
Date
string
`json:"date"`
Label
string
`json:"label"`
Cost
float64
`json:"cost"`
UserCost
float64
`json:"user_cost"`
Requests
int64
`json:"requests"`
}{
Date
:
highestCostDay
.
Date
,
Label
:
highestCostDay
.
Label
,
Cost
:
highestCostDay
.
ActualCost
,
UserCost
:
highestCostDay
.
UserCost
,
Requests
:
highestCostDay
.
Requests
,
}
}
...
...
@@ -1759,15 +1815,17 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
Label
string
`json:"label"`
Requests
int64
`json:"requests"`
Cost
float64
`json:"cost"`
UserCost
float64
`json:"user_cost"`
}{
Date
:
highestRequestDay
.
Date
,
Label
:
highestRequestDay
.
Label
,
Requests
:
highestRequestDay
.
Requests
,
Cost
:
highestRequestDay
.
ActualCost
,
UserCost
:
highestRequestDay
.
UserCost
,
}
}
models
,
err
:=
r
.
GetModelStatsWithFilters
(
ctx
,
startTime
,
endTime
,
0
,
0
,
accountID
)
models
,
err
:=
r
.
GetModelStatsWithFilters
(
ctx
,
startTime
,
endTime
,
0
,
0
,
accountID
,
0
,
nil
)
if
err
!=
nil
{
models
=
[]
ModelStat
{}
}
...
...
@@ -1994,36 +2052,37 @@ func (r *usageLogRepository) loadSubscriptions(ctx context.Context, ids []int64)
func
scanUsageLog
(
scanner
interface
{
Scan
(
...
any
)
error
})
(
*
service
.
UsageLog
,
error
)
{
var
(
id
int64
userID
int64
apiKeyID
int64
accountID
int64
requestID
sql
.
NullString
model
string
groupID
sql
.
NullInt64
subscriptionID
sql
.
NullInt64
inputTokens
int
outputTokens
int
cacheCreationTokens
int
cacheReadTokens
int
cacheCreation5m
int
cacheCreation1h
int
inputCost
float64
outputCost
float64
cacheCreationCost
float64
cacheReadCost
float64
totalCost
float64
actualCost
float64
rateMultiplier
float64
billingType
int16
stream
bool
durationMs
sql
.
NullInt64
firstTokenMs
sql
.
NullInt64
userAgent
sql
.
NullString
ipAddress
sql
.
NullString
imageCount
int
imageSize
sql
.
NullString
createdAt
time
.
Time
id
int64
userID
int64
apiKeyID
int64
accountID
int64
requestID
sql
.
NullString
model
string
groupID
sql
.
NullInt64
subscriptionID
sql
.
NullInt64
inputTokens
int
outputTokens
int
cacheCreationTokens
int
cacheReadTokens
int
cacheCreation5m
int
cacheCreation1h
int
inputCost
float64
outputCost
float64
cacheCreationCost
float64
cacheReadCost
float64
totalCost
float64
actualCost
float64
rateMultiplier
float64
accountRateMultiplier
sql
.
NullFloat64
billingType
int16
stream
bool
durationMs
sql
.
NullInt64
firstTokenMs
sql
.
NullInt64
userAgent
sql
.
NullString
ipAddress
sql
.
NullString
imageCount
int
imageSize
sql
.
NullString
createdAt
time
.
Time
)
if
err
:=
scanner
.
Scan
(
...
...
@@ -2048,6 +2107,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e
&
totalCost
,
&
actualCost
,
&
rateMultiplier
,
&
accountRateMultiplier
,
&
billingType
,
&
stream
,
&
durationMs
,
...
...
@@ -2080,6 +2140,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e
TotalCost
:
totalCost
,
ActualCost
:
actualCost
,
RateMultiplier
:
rateMultiplier
,
AccountRateMultiplier
:
nullFloat64Ptr
(
accountRateMultiplier
),
BillingType
:
int8
(
billingType
),
Stream
:
stream
,
ImageCount
:
imageCount
,
...
...
@@ -2186,6 +2247,14 @@ func nullInt(v *int) sql.NullInt64 {
return
sql
.
NullInt64
{
Int64
:
int64
(
*
v
),
Valid
:
true
}
}
func
nullFloat64Ptr
(
v
sql
.
NullFloat64
)
*
float64
{
if
!
v
.
Valid
{
return
nil
}
out
:=
v
.
Float64
return
&
out
}
func
nullString
(
v
*
string
)
sql
.
NullString
{
if
v
==
nil
||
*
v
==
""
{
return
sql
.
NullString
{}
...
...
backend/internal/repository/usage_log_repo_integration_test.go
View file @
90bce60b
...
...
@@ -11,6 +11,7 @@ import (
dbent
"github.com/Wei-Shaw/sub2api/ent"
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
"github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/stretchr/testify/suite"
...
...
@@ -36,6 +37,12 @@ func TestUsageLogRepoSuite(t *testing.T) {
suite
.
Run
(
t
,
new
(
UsageLogRepoSuite
))
}
// truncateToDayUTC 截断到 UTC 日期边界(测试辅助函数)
func
truncateToDayUTC
(
t
time
.
Time
)
time
.
Time
{
t
=
t
.
UTC
()
return
time
.
Date
(
t
.
Year
(),
t
.
Month
(),
t
.
Day
(),
0
,
0
,
0
,
0
,
time
.
UTC
)
}
func
(
s
*
UsageLogRepoSuite
)
createUsageLog
(
user
*
service
.
User
,
apiKey
*
service
.
APIKey
,
account
*
service
.
Account
,
inputTokens
,
outputTokens
int
,
cost
float64
,
createdAt
time
.
Time
)
*
service
.
UsageLog
{
log
:=
&
service
.
UsageLog
{
UserID
:
user
.
ID
,
...
...
@@ -95,6 +102,34 @@ func (s *UsageLogRepoSuite) TestGetByID_NotFound() {
s
.
Require
()
.
Error
(
err
,
"expected error for non-existent ID"
)
}
func
(
s
*
UsageLogRepoSuite
)
TestGetByID_ReturnsAccountRateMultiplier
()
{
user
:=
mustCreateUser
(
s
.
T
(),
s
.
client
,
&
service
.
User
{
Email
:
"getbyid-mult@test.com"
})
apiKey
:=
mustCreateApiKey
(
s
.
T
(),
s
.
client
,
&
service
.
APIKey
{
UserID
:
user
.
ID
,
Key
:
"sk-getbyid-mult"
,
Name
:
"k"
})
account
:=
mustCreateAccount
(
s
.
T
(),
s
.
client
,
&
service
.
Account
{
Name
:
"acc-getbyid-mult"
})
m
:=
0.5
log
:=
&
service
.
UsageLog
{
UserID
:
user
.
ID
,
APIKeyID
:
apiKey
.
ID
,
AccountID
:
account
.
ID
,
RequestID
:
uuid
.
New
()
.
String
(),
Model
:
"claude-3"
,
InputTokens
:
10
,
OutputTokens
:
20
,
TotalCost
:
1.0
,
ActualCost
:
2.0
,
AccountRateMultiplier
:
&
m
,
CreatedAt
:
timezone
.
Today
()
.
Add
(
2
*
time
.
Hour
),
}
_
,
err
:=
s
.
repo
.
Create
(
s
.
ctx
,
log
)
s
.
Require
()
.
NoError
(
err
)
got
,
err
:=
s
.
repo
.
GetByID
(
s
.
ctx
,
log
.
ID
)
s
.
Require
()
.
NoError
(
err
)
s
.
Require
()
.
NotNil
(
got
.
AccountRateMultiplier
)
s
.
Require
()
.
InEpsilon
(
0.5
,
*
got
.
AccountRateMultiplier
,
0.0001
)
}
// --- Delete ---
func
(
s
*
UsageLogRepoSuite
)
TestDelete
()
{
...
...
@@ -403,12 +438,49 @@ func (s *UsageLogRepoSuite) TestGetAccountTodayStats() {
apiKey
:=
mustCreateApiKey
(
s
.
T
(),
s
.
client
,
&
service
.
APIKey
{
UserID
:
user
.
ID
,
Key
:
"sk-acctoday"
,
Name
:
"k"
})
account
:=
mustCreateAccount
(
s
.
T
(),
s
.
client
,
&
service
.
Account
{
Name
:
"acc-today"
})
s
.
createUsageLog
(
user
,
apiKey
,
account
,
10
,
20
,
0.5
,
time
.
Now
())
createdAt
:=
timezone
.
Today
()
.
Add
(
1
*
time
.
Hour
)
m1
:=
1.5
m2
:=
0.0
_
,
err
:=
s
.
repo
.
Create
(
s
.
ctx
,
&
service
.
UsageLog
{
UserID
:
user
.
ID
,
APIKeyID
:
apiKey
.
ID
,
AccountID
:
account
.
ID
,
RequestID
:
uuid
.
New
()
.
String
(),
Model
:
"claude-3"
,
InputTokens
:
10
,
OutputTokens
:
20
,
TotalCost
:
1.0
,
ActualCost
:
2.0
,
AccountRateMultiplier
:
&
m1
,
CreatedAt
:
createdAt
,
})
s
.
Require
()
.
NoError
(
err
)
_
,
err
=
s
.
repo
.
Create
(
s
.
ctx
,
&
service
.
UsageLog
{
UserID
:
user
.
ID
,
APIKeyID
:
apiKey
.
ID
,
AccountID
:
account
.
ID
,
RequestID
:
uuid
.
New
()
.
String
(),
Model
:
"claude-3"
,
InputTokens
:
5
,
OutputTokens
:
5
,
TotalCost
:
0.5
,
ActualCost
:
1.0
,
AccountRateMultiplier
:
&
m2
,
CreatedAt
:
createdAt
,
})
s
.
Require
()
.
NoError
(
err
)
stats
,
err
:=
s
.
repo
.
GetAccountTodayStats
(
s
.
ctx
,
account
.
ID
)
s
.
Require
()
.
NoError
(
err
,
"GetAccountTodayStats"
)
s
.
Require
()
.
Equal
(
int64
(
1
),
stats
.
Requests
)
s
.
Require
()
.
Equal
(
int64
(
30
),
stats
.
Tokens
)
s
.
Require
()
.
Equal
(
int64
(
2
),
stats
.
Requests
)
s
.
Require
()
.
Equal
(
int64
(
40
),
stats
.
Tokens
)
// account cost = SUM(total_cost * account_rate_multiplier)
s
.
Require
()
.
InEpsilon
(
1.5
,
stats
.
Cost
,
0.0001
)
// standard cost = SUM(total_cost)
s
.
Require
()
.
InEpsilon
(
1.5
,
stats
.
StandardCost
,
0.0001
)
// user cost = SUM(actual_cost)
s
.
Require
()
.
InEpsilon
(
3.0
,
stats
.
UserCost
,
0.0001
)
}
func
(
s
*
UsageLogRepoSuite
)
TestDashboardAggregationConsistency
()
{
...
...
@@ -416,8 +488,8 @@ func (s *UsageLogRepoSuite) TestDashboardAggregationConsistency() {
// 使用固定的时间偏移确保 hour1 和 hour2 在同一天且都在过去
// 选择当天 02:00 和 03:00 作为测试时间点(基于 now 的日期)
dayStart
:=
truncateToDayUTC
(
now
)
hour1
:=
dayStart
.
Add
(
2
*
time
.
Hour
)
// 当天 02:00
hour2
:=
dayStart
.
Add
(
3
*
time
.
Hour
)
// 当天 03:00
hour1
:=
dayStart
.
Add
(
2
*
time
.
Hour
)
// 当天 02:00
hour2
:=
dayStart
.
Add
(
3
*
time
.
Hour
)
// 当天 03:00
// 如果当前时间早于 hour2,则使用昨天的时间
if
now
.
Before
(
hour2
.
Add
(
time
.
Hour
))
{
dayStart
=
dayStart
.
Add
(
-
24
*
time
.
Hour
)
...
...
@@ -872,17 +944,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() {
endTime
:=
base
.
Add
(
48
*
time
.
Hour
)
// Test with user filter
trend
,
err
:=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"day"
,
user
.
ID
,
0
)
trend
,
err
:=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"day"
,
user
.
ID
,
0
,
0
,
0
,
""
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetUsageTrendWithFilters user filter"
)
s
.
Require
()
.
Len
(
trend
,
2
)
// Test with apiKey filter
trend
,
err
=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"day"
,
0
,
apiKey
.
ID
)
trend
,
err
=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"day"
,
0
,
apiKey
.
ID
,
0
,
0
,
""
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetUsageTrendWithFilters apiKey filter"
)
s
.
Require
()
.
Len
(
trend
,
2
)
// Test with both filters
trend
,
err
=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"day"
,
user
.
ID
,
apiKey
.
ID
)
trend
,
err
=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"day"
,
user
.
ID
,
apiKey
.
ID
,
0
,
0
,
""
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetUsageTrendWithFilters both filters"
)
s
.
Require
()
.
Len
(
trend
,
2
)
}
...
...
@@ -899,7 +971,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() {
startTime
:=
base
.
Add
(
-
1
*
time
.
Hour
)
endTime
:=
base
.
Add
(
3
*
time
.
Hour
)
trend
,
err
:=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"hour"
,
user
.
ID
,
0
)
trend
,
err
:=
s
.
repo
.
GetUsageTrendWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
"hour"
,
user
.
ID
,
0
,
0
,
0
,
""
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetUsageTrendWithFilters hourly"
)
s
.
Require
()
.
Len
(
trend
,
2
)
}
...
...
@@ -945,17 +1017,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() {
endTime
:=
base
.
Add
(
2
*
time
.
Hour
)
// Test with user filter
stats
,
err
:=
s
.
repo
.
GetModelStatsWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
user
.
ID
,
0
,
0
)
stats
,
err
:=
s
.
repo
.
GetModelStatsWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
user
.
ID
,
0
,
0
,
0
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetModelStatsWithFilters user filter"
)
s
.
Require
()
.
Len
(
stats
,
2
)
// Test with apiKey filter
stats
,
err
=
s
.
repo
.
GetModelStatsWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
0
,
apiKey
.
ID
,
0
)
stats
,
err
=
s
.
repo
.
GetModelStatsWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
0
,
apiKey
.
ID
,
0
,
0
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetModelStatsWithFilters apiKey filter"
)
s
.
Require
()
.
Len
(
stats
,
2
)
// Test with account filter
stats
,
err
=
s
.
repo
.
GetModelStatsWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
0
,
0
,
account
.
ID
)
stats
,
err
=
s
.
repo
.
GetModelStatsWithFilters
(
s
.
ctx
,
startTime
,
endTime
,
0
,
0
,
account
.
ID
,
0
,
nil
)
s
.
Require
()
.
NoError
(
err
,
"GetModelStatsWithFilters account filter"
)
s
.
Require
()
.
Len
(
stats
,
2
)
}
...
...
backend/internal/repository/wire.go
View file @
90bce60b
...
...
@@ -69,6 +69,7 @@ var ProviderSet = wire.NewSet(
NewGeminiTokenCache
,
NewSchedulerCache
,
NewSchedulerOutboxRepository
,
NewProxyLatencyCache
,
// HTTP service ports (DI Strategy A: return interface directly)
NewTurnstileVerifier
,
...
...
backend/internal/server/api_contract_test.go
View file @
90bce60b
...
...
@@ -239,9 +239,10 @@ func TestAPIContracts(t *testing.T) {
"cache_creation_cost": 0,
"cache_read_cost": 0,
"total_cost": 0.5,
"actual_cost": 0.5,
"rate_multiplier": 1,
"billing_type": 0,
"actual_cost": 0.5,
"rate_multiplier": 1,
"account_rate_multiplier": null,
"billing_type": 0,
"stream": true,
"duration_ms": 100,
"first_token_ms": 50,
...
...
@@ -262,11 +263,11 @@ func TestAPIContracts(t *testing.T) {
name
:
"GET /api/v1/admin/settings"
,
setup
:
func
(
t
*
testing
.
T
,
deps
*
contractDeps
)
{
t
.
Helper
()
deps
.
settingRepo
.
SetAll
(
map
[
string
]
string
{
service
.
SettingKeyRegistrationEnabled
:
"true"
,
service
.
SettingKeyEmailVerifyEnabled
:
"false"
,
deps
.
settingRepo
.
SetAll
(
map
[
string
]
string
{
service
.
SettingKeyRegistrationEnabled
:
"true"
,
service
.
SettingKeyEmailVerifyEnabled
:
"false"
,
service
.
SettingKeySMTPHost
:
"smtp.example.com"
,
service
.
SettingKeySMTPHost
:
"smtp.example.com"
,
service
.
SettingKeySMTPPort
:
"587"
,
service
.
SettingKeySMTPUsername
:
"user"
,
service
.
SettingKeySMTPPassword
:
"secret"
,
...
...
@@ -285,15 +286,15 @@ func TestAPIContracts(t *testing.T) {
service
.
SettingKeyContactInfo
:
"support"
,
service
.
SettingKeyDocURL
:
"https://docs.example.com"
,
service
.
SettingKeyDefaultConcurrency
:
"5"
,
service
.
SettingKeyDefaultBalance
:
"1.25"
,
service
.
SettingKeyDefaultConcurrency
:
"5"
,
service
.
SettingKeyDefaultBalance
:
"1.25"
,
service
.
SettingKeyOpsMonitoringEnabled
:
"false"
,
service
.
SettingKeyOpsRealtimeMonitoringEnabled
:
"true"
,
service
.
SettingKeyOpsQueryModeDefault
:
"auto"
,
service
.
SettingKeyOpsMetricsIntervalSeconds
:
"60"
,
})
},
service
.
SettingKeyOpsMonitoringEnabled
:
"false"
,
service
.
SettingKeyOpsRealtimeMonitoringEnabled
:
"true"
,
service
.
SettingKeyOpsQueryModeDefault
:
"auto"
,
service
.
SettingKeyOpsMetricsIntervalSeconds
:
"60"
,
})
},
method
:
http
.
MethodGet
,
path
:
"/api/v1/admin/settings"
,
wantStatus
:
http
.
StatusOK
,
...
...
@@ -435,7 +436,7 @@ func newContractDeps(t *testing.T) *contractDeps {
settingRepo
:=
newStubSettingRepo
()
settingService
:=
service
.
NewSettingService
(
settingRepo
,
cfg
)
adminService
:=
service
.
NewAdminService
(
userRepo
,
groupRepo
,
&
accountRepo
,
proxyRepo
,
apiKeyRepo
,
redeemRepo
,
nil
,
nil
,
nil
)
adminService
:=
service
.
NewAdminService
(
userRepo
,
groupRepo
,
&
accountRepo
,
proxyRepo
,
apiKeyRepo
,
redeemRepo
,
nil
,
nil
,
nil
,
nil
)
authHandler
:=
handler
.
NewAuthHandler
(
cfg
,
nil
,
userService
,
settingService
,
nil
)
apiKeyHandler
:=
handler
.
NewAPIKeyHandler
(
apiKeyService
)
usageHandler
:=
handler
.
NewUsageHandler
(
usageService
,
apiKeyService
)
...
...
@@ -858,6 +859,10 @@ func (stubProxyRepo) CountAccountsByProxyID(ctx context.Context, proxyID int64)
return
0
,
errors
.
New
(
"not implemented"
)
}
func
(
stubProxyRepo
)
ListAccountSummariesByProxyID
(
ctx
context
.
Context
,
proxyID
int64
)
([]
service
.
ProxyAccountSummary
,
error
)
{
return
nil
,
errors
.
New
(
"not implemented"
)
}
type
stubRedeemCodeRepo
struct
{}
func
(
stubRedeemCodeRepo
)
Create
(
ctx
context
.
Context
,
code
*
service
.
RedeemCode
)
error
{
...
...
@@ -1229,11 +1234,11 @@ func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.D
return
nil
,
errors
.
New
(
"not implemented"
)
}
func
(
r
*
stubUsageLogRepo
)
GetUsageTrendWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
granularity
string
,
userID
,
apiKeyID
int64
)
([]
usagestats
.
TrendDataPoint
,
error
)
{
func
(
r
*
stubUsageLogRepo
)
GetUsageTrendWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
granularity
string
,
userID
,
apiKeyID
,
accountID
,
groupID
int64
,
model
string
,
stream
*
bool
)
([]
usagestats
.
TrendDataPoint
,
error
)
{
return
nil
,
errors
.
New
(
"not implemented"
)
}
func
(
r
*
stubUsageLogRepo
)
GetModelStatsWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
userID
,
apiKeyID
,
accountID
int64
)
([]
usagestats
.
ModelStat
,
error
)
{
func
(
r
*
stubUsageLogRepo
)
GetModelStatsWithFilters
(
ctx
context
.
Context
,
startTime
,
endTime
time
.
Time
,
userID
,
apiKeyID
,
accountID
,
groupID
int64
,
stream
*
bool
)
([]
usagestats
.
ModelStat
,
error
)
{
return
nil
,
errors
.
New
(
"not implemented"
)
}
...
...
backend/internal/server/routes/admin.go
View file @
90bce60b
...
...
@@ -81,6 +81,9 @@ func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
ops
.
PUT
(
"/alert-rules/:id"
,
h
.
Admin
.
Ops
.
UpdateAlertRule
)
ops
.
DELETE
(
"/alert-rules/:id"
,
h
.
Admin
.
Ops
.
DeleteAlertRule
)
ops
.
GET
(
"/alert-events"
,
h
.
Admin
.
Ops
.
ListAlertEvents
)
ops
.
GET
(
"/alert-events/:id"
,
h
.
Admin
.
Ops
.
GetAlertEvent
)
ops
.
PUT
(
"/alert-events/:id/status"
,
h
.
Admin
.
Ops
.
UpdateAlertEventStatus
)
ops
.
POST
(
"/alert-silences"
,
h
.
Admin
.
Ops
.
CreateAlertSilence
)
// Email notification config (DB-backed)
ops
.
GET
(
"/email-notification/config"
,
h
.
Admin
.
Ops
.
GetEmailNotificationConfig
)
...
...
@@ -110,10 +113,26 @@ func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
ws
.
GET
(
"/qps"
,
h
.
Admin
.
Ops
.
QPSWSHandler
)
}
// Error logs (
MVP-1
)
// Error logs (
legacy
)
ops
.
GET
(
"/errors"
,
h
.
Admin
.
Ops
.
GetErrorLogs
)
ops
.
GET
(
"/errors/:id"
,
h
.
Admin
.
Ops
.
GetErrorLogByID
)
ops
.
GET
(
"/errors/:id/retries"
,
h
.
Admin
.
Ops
.
ListRetryAttempts
)
ops
.
POST
(
"/errors/:id/retry"
,
h
.
Admin
.
Ops
.
RetryErrorRequest
)
ops
.
PUT
(
"/errors/:id/resolve"
,
h
.
Admin
.
Ops
.
UpdateErrorResolution
)
// Request errors (client-visible failures)
ops
.
GET
(
"/request-errors"
,
h
.
Admin
.
Ops
.
ListRequestErrors
)
ops
.
GET
(
"/request-errors/:id"
,
h
.
Admin
.
Ops
.
GetRequestError
)
ops
.
GET
(
"/request-errors/:id/upstream-errors"
,
h
.
Admin
.
Ops
.
ListRequestErrorUpstreamErrors
)
ops
.
POST
(
"/request-errors/:id/retry-client"
,
h
.
Admin
.
Ops
.
RetryRequestErrorClient
)
ops
.
POST
(
"/request-errors/:id/upstream-errors/:idx/retry"
,
h
.
Admin
.
Ops
.
RetryRequestErrorUpstreamEvent
)
ops
.
PUT
(
"/request-errors/:id/resolve"
,
h
.
Admin
.
Ops
.
ResolveRequestError
)
// Upstream errors (independent upstream failures)
ops
.
GET
(
"/upstream-errors"
,
h
.
Admin
.
Ops
.
ListUpstreamErrors
)
ops
.
GET
(
"/upstream-errors/:id"
,
h
.
Admin
.
Ops
.
GetUpstreamError
)
ops
.
POST
(
"/upstream-errors/:id/retry"
,
h
.
Admin
.
Ops
.
RetryUpstreamError
)
ops
.
PUT
(
"/upstream-errors/:id/resolve"
,
h
.
Admin
.
Ops
.
ResolveUpstreamError
)
// Request drilldown (success + error)
ops
.
GET
(
"/requests"
,
h
.
Admin
.
Ops
.
ListRequestDetails
)
...
...
@@ -250,6 +269,7 @@ func registerProxyRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
proxies
.
POST
(
"/:id/test"
,
h
.
Admin
.
Proxy
.
Test
)
proxies
.
GET
(
"/:id/stats"
,
h
.
Admin
.
Proxy
.
GetStats
)
proxies
.
GET
(
"/:id/accounts"
,
h
.
Admin
.
Proxy
.
GetProxyAccounts
)
proxies
.
POST
(
"/batch-delete"
,
h
.
Admin
.
Proxy
.
BatchDelete
)
proxies
.
POST
(
"/batch"
,
h
.
Admin
.
Proxy
.
BatchCreate
)
}
}
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment