"vscode:/vscode.git/clone" did not exist on "28dc34b6a38b670920dc9c02819e0fd95ee33037"
Commit 8cf83c98 authored by erio's avatar erio
Browse files

feat(channel-monitor): aggregate history to daily rollups + soft delete

明细只保留 1 天,超过 1 天聚合到新表 channel_monitor_daily_rollups(按
monitor_id/model/bucket_date 维度),聚合保留 30 天。两张表都用 SoftDeleteMixin
软删除(DELETE 自动改为 UPDATE deleted_at = NOW())。

聚合 + 清理任务由 OpsCleanupService 的 cron 统一调度,与运维监控的清理共享
schedule(默认 0 2 * * *)和 leader lock。ChannelMonitorRunner 的 cleanupLoop
被移除,只保留 dueCheckLoop。

读取路径 ComputeAvailability* 改为 UNION 明细(今天 deleted_at IS NULL)+
聚合(过去 windowDays 天 deleted_at IS NULL),SUM(ok)/SUM(total) 自然加权
计算可用率,AVG latency 用 SUM(sum_latency_ms)/SUM(count_latency)。

watermark 表 channel_monitor_aggregation_watermark 单行(id=1),记录
last_aggregated_date,重启后从该日期 +1 继续聚合,首次为 nil 则从
today - 30d 开始回填,单次最多 35 天上限避免长事务。

raw SQL 的 ListLatestPerModel / ListLatestForMonitorIDs / ListRecentHistoryForMonitors
都补上 deleted_at IS NULL 过滤(SoftDeleteMixin interceptor 只对 ent query 生效)。

bump version to 0.1.114.28

GroupBadge 在 MonitorKeyPickerDialog 中复用平台主题色 + 倍率/专属倍率
(顺手优化)。
parent ba98243c
...@@ -23,6 +23,7 @@ import ( ...@@ -23,6 +23,7 @@ import (
"github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentity"
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
"github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/group"
...@@ -72,6 +73,8 @@ type Client struct { ...@@ -72,6 +73,8 @@ type Client struct {
AuthIdentityChannel *AuthIdentityChannelClient AuthIdentityChannel *AuthIdentityChannelClient
// ChannelMonitor is the client for interacting with the ChannelMonitor builders. // ChannelMonitor is the client for interacting with the ChannelMonitor builders.
ChannelMonitor *ChannelMonitorClient ChannelMonitor *ChannelMonitorClient
// ChannelMonitorDailyRollup is the client for interacting with the ChannelMonitorDailyRollup builders.
ChannelMonitorDailyRollup *ChannelMonitorDailyRollupClient
// ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders. // ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders.
ChannelMonitorHistory *ChannelMonitorHistoryClient ChannelMonitorHistory *ChannelMonitorHistoryClient
// ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders. // ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders.
...@@ -139,6 +142,7 @@ func (c *Client) init() { ...@@ -139,6 +142,7 @@ func (c *Client) init() {
c.AuthIdentity = NewAuthIdentityClient(c.config) c.AuthIdentity = NewAuthIdentityClient(c.config)
c.AuthIdentityChannel = NewAuthIdentityChannelClient(c.config) c.AuthIdentityChannel = NewAuthIdentityChannelClient(c.config)
c.ChannelMonitor = NewChannelMonitorClient(c.config) c.ChannelMonitor = NewChannelMonitorClient(c.config)
c.ChannelMonitorDailyRollup = NewChannelMonitorDailyRollupClient(c.config)
c.ChannelMonitorHistory = NewChannelMonitorHistoryClient(c.config) c.ChannelMonitorHistory = NewChannelMonitorHistoryClient(c.config)
c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config) c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config)
c.Group = NewGroupClient(c.config) c.Group = NewGroupClient(c.config)
...@@ -253,40 +257,41 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { ...@@ -253,40 +257,41 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
cfg := c.config cfg := c.config
cfg.driver = tx cfg.driver = tx
return &Tx{ return &Tx{
ctx: ctx, ctx: ctx,
config: cfg, config: cfg,
APIKey: NewAPIKeyClient(cfg), APIKey: NewAPIKeyClient(cfg),
Account: NewAccountClient(cfg), Account: NewAccountClient(cfg),
AccountGroup: NewAccountGroupClient(cfg), AccountGroup: NewAccountGroupClient(cfg),
Announcement: NewAnnouncementClient(cfg), Announcement: NewAnnouncementClient(cfg),
AnnouncementRead: NewAnnouncementReadClient(cfg), AnnouncementRead: NewAnnouncementReadClient(cfg),
AuthIdentity: NewAuthIdentityClient(cfg), AuthIdentity: NewAuthIdentityClient(cfg),
AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), AuthIdentityChannel: NewAuthIdentityChannelClient(cfg),
ChannelMonitor: NewChannelMonitorClient(cfg), ChannelMonitor: NewChannelMonitorClient(cfg),
ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), ChannelMonitorDailyRollup: NewChannelMonitorDailyRollupClient(cfg),
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg),
Group: NewGroupClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
IdempotencyRecord: NewIdempotencyRecordClient(cfg), Group: NewGroupClient(cfg),
IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg), IdempotencyRecord: NewIdempotencyRecordClient(cfg),
PaymentAuditLog: NewPaymentAuditLogClient(cfg), IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg),
PaymentOrder: NewPaymentOrderClient(cfg), PaymentAuditLog: NewPaymentAuditLogClient(cfg),
PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg), PaymentOrder: NewPaymentOrderClient(cfg),
PendingAuthSession: NewPendingAuthSessionClient(cfg), PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg),
PromoCode: NewPromoCodeClient(cfg), PendingAuthSession: NewPendingAuthSessionClient(cfg),
PromoCodeUsage: NewPromoCodeUsageClient(cfg), PromoCode: NewPromoCodeClient(cfg),
Proxy: NewProxyClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg),
RedeemCode: NewRedeemCodeClient(cfg), Proxy: NewProxyClient(cfg),
SecuritySecret: NewSecuritySecretClient(cfg), RedeemCode: NewRedeemCodeClient(cfg),
Setting: NewSettingClient(cfg), SecuritySecret: NewSecuritySecretClient(cfg),
SubscriptionPlan: NewSubscriptionPlanClient(cfg), Setting: NewSettingClient(cfg),
TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg), SubscriptionPlan: NewSubscriptionPlanClient(cfg),
UsageCleanupTask: NewUsageCleanupTaskClient(cfg), TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg),
UsageLog: NewUsageLogClient(cfg), UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
User: NewUserClient(cfg), UsageLog: NewUsageLogClient(cfg),
UserAllowedGroup: NewUserAllowedGroupClient(cfg), User: NewUserClient(cfg),
UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), UserAllowedGroup: NewUserAllowedGroupClient(cfg),
UserAttributeValue: NewUserAttributeValueClient(cfg), UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg),
UserSubscription: NewUserSubscriptionClient(cfg), UserAttributeValue: NewUserAttributeValueClient(cfg),
UserSubscription: NewUserSubscriptionClient(cfg),
}, nil }, nil
} }
...@@ -304,40 +309,41 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) ...@@ -304,40 +309,41 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
cfg := c.config cfg := c.config
cfg.driver = &txDriver{tx: tx, drv: c.driver} cfg.driver = &txDriver{tx: tx, drv: c.driver}
return &Tx{ return &Tx{
ctx: ctx, ctx: ctx,
config: cfg, config: cfg,
APIKey: NewAPIKeyClient(cfg), APIKey: NewAPIKeyClient(cfg),
Account: NewAccountClient(cfg), Account: NewAccountClient(cfg),
AccountGroup: NewAccountGroupClient(cfg), AccountGroup: NewAccountGroupClient(cfg),
Announcement: NewAnnouncementClient(cfg), Announcement: NewAnnouncementClient(cfg),
AnnouncementRead: NewAnnouncementReadClient(cfg), AnnouncementRead: NewAnnouncementReadClient(cfg),
AuthIdentity: NewAuthIdentityClient(cfg), AuthIdentity: NewAuthIdentityClient(cfg),
AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), AuthIdentityChannel: NewAuthIdentityChannelClient(cfg),
ChannelMonitor: NewChannelMonitorClient(cfg), ChannelMonitor: NewChannelMonitorClient(cfg),
ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), ChannelMonitorDailyRollup: NewChannelMonitorDailyRollupClient(cfg),
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg),
Group: NewGroupClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
IdempotencyRecord: NewIdempotencyRecordClient(cfg), Group: NewGroupClient(cfg),
IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg), IdempotencyRecord: NewIdempotencyRecordClient(cfg),
PaymentAuditLog: NewPaymentAuditLogClient(cfg), IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg),
PaymentOrder: NewPaymentOrderClient(cfg), PaymentAuditLog: NewPaymentAuditLogClient(cfg),
PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg), PaymentOrder: NewPaymentOrderClient(cfg),
PendingAuthSession: NewPendingAuthSessionClient(cfg), PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg),
PromoCode: NewPromoCodeClient(cfg), PendingAuthSession: NewPendingAuthSessionClient(cfg),
PromoCodeUsage: NewPromoCodeUsageClient(cfg), PromoCode: NewPromoCodeClient(cfg),
Proxy: NewProxyClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg),
RedeemCode: NewRedeemCodeClient(cfg), Proxy: NewProxyClient(cfg),
SecuritySecret: NewSecuritySecretClient(cfg), RedeemCode: NewRedeemCodeClient(cfg),
Setting: NewSettingClient(cfg), SecuritySecret: NewSecuritySecretClient(cfg),
SubscriptionPlan: NewSubscriptionPlanClient(cfg), Setting: NewSettingClient(cfg),
TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg), SubscriptionPlan: NewSubscriptionPlanClient(cfg),
UsageCleanupTask: NewUsageCleanupTaskClient(cfg), TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg),
UsageLog: NewUsageLogClient(cfg), UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
User: NewUserClient(cfg), UsageLog: NewUsageLogClient(cfg),
UserAllowedGroup: NewUserAllowedGroupClient(cfg), User: NewUserClient(cfg),
UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), UserAllowedGroup: NewUserAllowedGroupClient(cfg),
UserAttributeValue: NewUserAttributeValueClient(cfg), UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg),
UserSubscription: NewUserSubscriptionClient(cfg), UserAttributeValue: NewUserAttributeValueClient(cfg),
UserSubscription: NewUserSubscriptionClient(cfg),
}, nil }, nil
} }
...@@ -369,12 +375,12 @@ func (c *Client) Use(hooks ...Hook) { ...@@ -369,12 +375,12 @@ func (c *Client) Use(hooks ...Hook) {
for _, n := range []interface{ Use(...Hook) }{ for _, n := range []interface{ Use(...Hook) }{
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor, c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor,
c.ChannelMonitorHistory, c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.ChannelMonitorDailyRollup, c.ChannelMonitorHistory, c.ErrorPassthroughRule,
c.IdentityAdoptionDecision, c.PaymentAuditLog, c.PaymentOrder, c.Group, c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog,
c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, c.PromoCodeUsage, c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode,
c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, c.SubscriptionPlan, c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting,
c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, c.User, c.SubscriptionPlan, c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog,
c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue,
c.UserSubscription, c.UserSubscription,
} { } {
n.Use(hooks...) n.Use(hooks...)
...@@ -387,12 +393,12 @@ func (c *Client) Intercept(interceptors ...Interceptor) { ...@@ -387,12 +393,12 @@ func (c *Client) Intercept(interceptors ...Interceptor) {
for _, n := range []interface{ Intercept(...Interceptor) }{ for _, n := range []interface{ Intercept(...Interceptor) }{
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor, c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor,
c.ChannelMonitorHistory, c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.ChannelMonitorDailyRollup, c.ChannelMonitorHistory, c.ErrorPassthroughRule,
c.IdentityAdoptionDecision, c.PaymentAuditLog, c.PaymentOrder, c.Group, c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog,
c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, c.PromoCodeUsage, c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode,
c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, c.SubscriptionPlan, c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting,
c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, c.User, c.SubscriptionPlan, c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog,
c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue,
c.UserSubscription, c.UserSubscription,
} { } {
n.Intercept(interceptors...) n.Intercept(interceptors...)
...@@ -418,6 +424,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { ...@@ -418,6 +424,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
return c.AuthIdentityChannel.mutate(ctx, m) return c.AuthIdentityChannel.mutate(ctx, m)
case *ChannelMonitorMutation: case *ChannelMonitorMutation:
return c.ChannelMonitor.mutate(ctx, m) return c.ChannelMonitor.mutate(ctx, m)
case *ChannelMonitorDailyRollupMutation:
return c.ChannelMonitorDailyRollup.mutate(ctx, m)
case *ChannelMonitorHistoryMutation: case *ChannelMonitorHistoryMutation:
return c.ChannelMonitorHistory.mutate(ctx, m) return c.ChannelMonitorHistory.mutate(ctx, m)
case *ErrorPassthroughRuleMutation: case *ErrorPassthroughRuleMutation:
...@@ -1737,6 +1745,22 @@ func (c *ChannelMonitorClient) QueryHistory(_m *ChannelMonitor) *ChannelMonitorH ...@@ -1737,6 +1745,22 @@ func (c *ChannelMonitorClient) QueryHistory(_m *ChannelMonitor) *ChannelMonitorH
return query return query
} }
// QueryDailyRollups queries the daily_rollups edge of a ChannelMonitor.
func (c *ChannelMonitorClient) QueryDailyRollups(_m *ChannelMonitor) *ChannelMonitorDailyRollupQuery {
query := (&ChannelMonitorDailyRollupClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := _m.ID
step := sqlgraph.NewStep(
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, id),
sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn),
)
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks. // Hooks returns the client hooks.
func (c *ChannelMonitorClient) Hooks() []Hook { func (c *ChannelMonitorClient) Hooks() []Hook {
return c.hooks.ChannelMonitor return c.hooks.ChannelMonitor
...@@ -1762,6 +1786,157 @@ func (c *ChannelMonitorClient) mutate(ctx context.Context, m *ChannelMonitorMuta ...@@ -1762,6 +1786,157 @@ func (c *ChannelMonitorClient) mutate(ctx context.Context, m *ChannelMonitorMuta
} }
} }
// ChannelMonitorDailyRollupClient is a client for the ChannelMonitorDailyRollup schema.
type ChannelMonitorDailyRollupClient struct {
config
}
// NewChannelMonitorDailyRollupClient returns a client for the ChannelMonitorDailyRollup from the given config.
func NewChannelMonitorDailyRollupClient(c config) *ChannelMonitorDailyRollupClient {
return &ChannelMonitorDailyRollupClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `channelmonitordailyrollup.Hooks(f(g(h())))`.
func (c *ChannelMonitorDailyRollupClient) Use(hooks ...Hook) {
c.hooks.ChannelMonitorDailyRollup = append(c.hooks.ChannelMonitorDailyRollup, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `channelmonitordailyrollup.Intercept(f(g(h())))`.
func (c *ChannelMonitorDailyRollupClient) Intercept(interceptors ...Interceptor) {
c.inters.ChannelMonitorDailyRollup = append(c.inters.ChannelMonitorDailyRollup, interceptors...)
}
// Create returns a builder for creating a ChannelMonitorDailyRollup entity.
func (c *ChannelMonitorDailyRollupClient) Create() *ChannelMonitorDailyRollupCreate {
mutation := newChannelMonitorDailyRollupMutation(c.config, OpCreate)
return &ChannelMonitorDailyRollupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of ChannelMonitorDailyRollup entities.
func (c *ChannelMonitorDailyRollupClient) CreateBulk(builders ...*ChannelMonitorDailyRollupCreate) *ChannelMonitorDailyRollupCreateBulk {
return &ChannelMonitorDailyRollupCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *ChannelMonitorDailyRollupClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorDailyRollupCreate, int)) *ChannelMonitorDailyRollupCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &ChannelMonitorDailyRollupCreateBulk{err: fmt.Errorf("calling to ChannelMonitorDailyRollupClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*ChannelMonitorDailyRollupCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &ChannelMonitorDailyRollupCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for ChannelMonitorDailyRollup.
func (c *ChannelMonitorDailyRollupClient) Update() *ChannelMonitorDailyRollupUpdate {
mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdate)
return &ChannelMonitorDailyRollupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ChannelMonitorDailyRollupClient) UpdateOne(_m *ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdateOne {
mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdateOne, withChannelMonitorDailyRollup(_m))
return &ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ChannelMonitorDailyRollupClient) UpdateOneID(id int64) *ChannelMonitorDailyRollupUpdateOne {
mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdateOne, withChannelMonitorDailyRollupID(id))
return &ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for ChannelMonitorDailyRollup.
func (c *ChannelMonitorDailyRollupClient) Delete() *ChannelMonitorDailyRollupDelete {
mutation := newChannelMonitorDailyRollupMutation(c.config, OpDelete)
return &ChannelMonitorDailyRollupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *ChannelMonitorDailyRollupClient) DeleteOne(_m *ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne {
return c.DeleteOneID(_m.ID)
}
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *ChannelMonitorDailyRollupClient) DeleteOneID(id int64) *ChannelMonitorDailyRollupDeleteOne {
builder := c.Delete().Where(channelmonitordailyrollup.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ChannelMonitorDailyRollupDeleteOne{builder}
}
// Query returns a query builder for ChannelMonitorDailyRollup.
func (c *ChannelMonitorDailyRollupClient) Query() *ChannelMonitorDailyRollupQuery {
return &ChannelMonitorDailyRollupQuery{
config: c.config,
ctx: &QueryContext{Type: TypeChannelMonitorDailyRollup},
inters: c.Interceptors(),
}
}
// Get returns a ChannelMonitorDailyRollup entity by its id.
func (c *ChannelMonitorDailyRollupClient) Get(ctx context.Context, id int64) (*ChannelMonitorDailyRollup, error) {
return c.Query().Where(channelmonitordailyrollup.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ChannelMonitorDailyRollupClient) GetX(ctx context.Context, id int64) *ChannelMonitorDailyRollup {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// QueryMonitor queries the monitor edge of a ChannelMonitorDailyRollup.
func (c *ChannelMonitorDailyRollupClient) QueryMonitor(_m *ChannelMonitorDailyRollup) *ChannelMonitorQuery {
query := (&ChannelMonitorClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := _m.ID
step := sqlgraph.NewStep(
sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, id),
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn),
)
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ChannelMonitorDailyRollupClient) Hooks() []Hook {
hooks := c.hooks.ChannelMonitorDailyRollup
return append(hooks[:len(hooks):len(hooks)], channelmonitordailyrollup.Hooks[:]...)
}
// Interceptors returns the client interceptors.
func (c *ChannelMonitorDailyRollupClient) Interceptors() []Interceptor {
inters := c.inters.ChannelMonitorDailyRollup
return append(inters[:len(inters):len(inters)], channelmonitordailyrollup.Interceptors[:]...)
}
func (c *ChannelMonitorDailyRollupClient) mutate(ctx context.Context, m *ChannelMonitorDailyRollupMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&ChannelMonitorDailyRollupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&ChannelMonitorDailyRollupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&ChannelMonitorDailyRollupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown ChannelMonitorDailyRollup mutation op: %q", m.Op())
}
}
// ChannelMonitorHistoryClient is a client for the ChannelMonitorHistory schema. // ChannelMonitorHistoryClient is a client for the ChannelMonitorHistory schema.
type ChannelMonitorHistoryClient struct { type ChannelMonitorHistoryClient struct {
config config
...@@ -1888,12 +2063,14 @@ func (c *ChannelMonitorHistoryClient) QueryMonitor(_m *ChannelMonitorHistory) *C ...@@ -1888,12 +2063,14 @@ func (c *ChannelMonitorHistoryClient) QueryMonitor(_m *ChannelMonitorHistory) *C
// Hooks returns the client hooks. // Hooks returns the client hooks.
func (c *ChannelMonitorHistoryClient) Hooks() []Hook { func (c *ChannelMonitorHistoryClient) Hooks() []Hook {
return c.hooks.ChannelMonitorHistory hooks := c.hooks.ChannelMonitorHistory
return append(hooks[:len(hooks):len(hooks)], channelmonitorhistory.Hooks[:]...)
} }
// Interceptors returns the client interceptors. // Interceptors returns the client interceptors.
func (c *ChannelMonitorHistoryClient) Interceptors() []Interceptor { func (c *ChannelMonitorHistoryClient) Interceptors() []Interceptor {
return c.inters.ChannelMonitorHistory inters := c.inters.ChannelMonitorHistory
return append(inters[:len(inters):len(inters)], channelmonitorhistory.Interceptors[:]...)
} }
func (c *ChannelMonitorHistoryClient) mutate(ctx context.Context, m *ChannelMonitorHistoryMutation) (Value, error) { func (c *ChannelMonitorHistoryClient) mutate(ctx context.Context, m *ChannelMonitorHistoryMutation) (Value, error) {
...@@ -5671,23 +5848,23 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription ...@@ -5671,23 +5848,23 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription
type ( type (
hooks struct { hooks struct {
APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity, APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity,
AuthIdentityChannel, ChannelMonitor, ChannelMonitorHistory, AuthIdentityChannel, ChannelMonitor, ChannelMonitorDailyRollup,
ErrorPassthroughRule, Group, IdempotencyRecord, IdentityAdoptionDecision, ChannelMonitorHistory, ErrorPassthroughRule, Group, IdempotencyRecord,
PaymentAuditLog, PaymentOrder, PaymentProviderInstance, PendingAuthSession, IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder,
PromoCode, PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy,
SubscriptionPlan, TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile,
UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition,
UserSubscription []ent.Hook UserAttributeValue, UserSubscription []ent.Hook
} }
inters struct { inters struct {
APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity, APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity,
AuthIdentityChannel, ChannelMonitor, ChannelMonitorHistory, AuthIdentityChannel, ChannelMonitor, ChannelMonitorDailyRollup,
ErrorPassthroughRule, Group, IdempotencyRecord, IdentityAdoptionDecision, ChannelMonitorHistory, ErrorPassthroughRule, Group, IdempotencyRecord,
PaymentAuditLog, PaymentOrder, PaymentProviderInstance, PendingAuthSession, IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder,
PromoCode, PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy,
SubscriptionPlan, TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile,
UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition,
UserSubscription []ent.Interceptor UserAttributeValue, UserSubscription []ent.Interceptor
} }
) )
......
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentity"
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
"github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/group"
...@@ -104,38 +105,39 @@ var ( ...@@ -104,38 +105,39 @@ var (
func checkColumn(t, c string) error { func checkColumn(t, c string) error {
initCheck.Do(func() { initCheck.Do(func() {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
apikey.Table: apikey.ValidColumn, apikey.Table: apikey.ValidColumn,
account.Table: account.ValidColumn, account.Table: account.ValidColumn,
accountgroup.Table: accountgroup.ValidColumn, accountgroup.Table: accountgroup.ValidColumn,
announcement.Table: announcement.ValidColumn, announcement.Table: announcement.ValidColumn,
announcementread.Table: announcementread.ValidColumn, announcementread.Table: announcementread.ValidColumn,
authidentity.Table: authidentity.ValidColumn, authidentity.Table: authidentity.ValidColumn,
authidentitychannel.Table: authidentitychannel.ValidColumn, authidentitychannel.Table: authidentitychannel.ValidColumn,
channelmonitor.Table: channelmonitor.ValidColumn, channelmonitor.Table: channelmonitor.ValidColumn,
channelmonitorhistory.Table: channelmonitorhistory.ValidColumn, channelmonitordailyrollup.Table: channelmonitordailyrollup.ValidColumn,
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn, channelmonitorhistory.Table: channelmonitorhistory.ValidColumn,
group.Table: group.ValidColumn, errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
idempotencyrecord.Table: idempotencyrecord.ValidColumn, group.Table: group.ValidColumn,
identityadoptiondecision.Table: identityadoptiondecision.ValidColumn, idempotencyrecord.Table: idempotencyrecord.ValidColumn,
paymentauditlog.Table: paymentauditlog.ValidColumn, identityadoptiondecision.Table: identityadoptiondecision.ValidColumn,
paymentorder.Table: paymentorder.ValidColumn, paymentauditlog.Table: paymentauditlog.ValidColumn,
paymentproviderinstance.Table: paymentproviderinstance.ValidColumn, paymentorder.Table: paymentorder.ValidColumn,
pendingauthsession.Table: pendingauthsession.ValidColumn, paymentproviderinstance.Table: paymentproviderinstance.ValidColumn,
promocode.Table: promocode.ValidColumn, pendingauthsession.Table: pendingauthsession.ValidColumn,
promocodeusage.Table: promocodeusage.ValidColumn, promocode.Table: promocode.ValidColumn,
proxy.Table: proxy.ValidColumn, promocodeusage.Table: promocodeusage.ValidColumn,
redeemcode.Table: redeemcode.ValidColumn, proxy.Table: proxy.ValidColumn,
securitysecret.Table: securitysecret.ValidColumn, redeemcode.Table: redeemcode.ValidColumn,
setting.Table: setting.ValidColumn, securitysecret.Table: securitysecret.ValidColumn,
subscriptionplan.Table: subscriptionplan.ValidColumn, setting.Table: setting.ValidColumn,
tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn, subscriptionplan.Table: subscriptionplan.ValidColumn,
usagecleanuptask.Table: usagecleanuptask.ValidColumn, tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn,
usagelog.Table: usagelog.ValidColumn, usagecleanuptask.Table: usagecleanuptask.ValidColumn,
user.Table: user.ValidColumn, usagelog.Table: usagelog.ValidColumn,
userallowedgroup.Table: userallowedgroup.ValidColumn, user.Table: user.ValidColumn,
userattributedefinition.Table: userattributedefinition.ValidColumn, userallowedgroup.Table: userallowedgroup.ValidColumn,
userattributevalue.Table: userattributevalue.ValidColumn, userattributedefinition.Table: userattributedefinition.ValidColumn,
usersubscription.Table: usersubscription.ValidColumn, userattributevalue.Table: userattributevalue.ValidColumn,
usersubscription.Table: usersubscription.ValidColumn,
}) })
}) })
return columnCheck(t, c) return columnCheck(t, c)
......
...@@ -105,6 +105,18 @@ func (f ChannelMonitorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Val ...@@ -105,6 +105,18 @@ func (f ChannelMonitorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Val
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorMutation", m) return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorMutation", m)
} }
// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary
// function as ChannelMonitorDailyRollup mutator.
type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f ChannelMonitorDailyRollupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.ChannelMonitorDailyRollupMutation); ok {
return f(ctx, mv)
}
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorDailyRollupMutation", m)
}
// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary // The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary
// function as ChannelMonitorHistory mutator. // function as ChannelMonitorHistory mutator.
type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryMutation) (ent.Value, error) type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryMutation) (ent.Value, error)
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentity"
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
"github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/group"
...@@ -315,6 +316,33 @@ func (f TraverseChannelMonitor) Traverse(ctx context.Context, q ent.Query) error ...@@ -315,6 +316,33 @@ func (f TraverseChannelMonitor) Traverse(ctx context.Context, q ent.Query) error
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q) return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q)
} }
// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary function as a Querier.
type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupQuery) (ent.Value, error)
// Query calls f(ctx, q).
func (f ChannelMonitorDailyRollupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok {
return f(ctx, q)
}
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q)
}
// The TraverseChannelMonitorDailyRollup type is an adapter to allow the use of ordinary function as Traverser.
type TraverseChannelMonitorDailyRollup func(context.Context, *ent.ChannelMonitorDailyRollupQuery) error
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
func (f TraverseChannelMonitorDailyRollup) Intercept(next ent.Querier) ent.Querier {
return next
}
// Traverse calls f(ctx, q).
func (f TraverseChannelMonitorDailyRollup) Traverse(ctx context.Context, q ent.Query) error {
if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok {
return f(ctx, q)
}
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q)
}
// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary function as a Querier. // The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary function as a Querier.
type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryQuery) (ent.Value, error) type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryQuery) (ent.Value, error)
...@@ -982,6 +1010,8 @@ func NewQuery(q ent.Query) (Query, error) { ...@@ -982,6 +1010,8 @@ func NewQuery(q ent.Query) (Query, error) {
return &query[*ent.AuthIdentityChannelQuery, predicate.AuthIdentityChannel, authidentitychannel.OrderOption]{typ: ent.TypeAuthIdentityChannel, tq: q}, nil return &query[*ent.AuthIdentityChannelQuery, predicate.AuthIdentityChannel, authidentitychannel.OrderOption]{typ: ent.TypeAuthIdentityChannel, tq: q}, nil
case *ent.ChannelMonitorQuery: case *ent.ChannelMonitorQuery:
return &query[*ent.ChannelMonitorQuery, predicate.ChannelMonitor, channelmonitor.OrderOption]{typ: ent.TypeChannelMonitor, tq: q}, nil return &query[*ent.ChannelMonitorQuery, predicate.ChannelMonitor, channelmonitor.OrderOption]{typ: ent.TypeChannelMonitor, tq: q}, nil
case *ent.ChannelMonitorDailyRollupQuery:
return &query[*ent.ChannelMonitorDailyRollupQuery, predicate.ChannelMonitorDailyRollup, channelmonitordailyrollup.OrderOption]{typ: ent.TypeChannelMonitorDailyRollup, tq: q}, nil
case *ent.ChannelMonitorHistoryQuery: case *ent.ChannelMonitorHistoryQuery:
return &query[*ent.ChannelMonitorHistoryQuery, predicate.ChannelMonitorHistory, channelmonitorhistory.OrderOption]{typ: ent.TypeChannelMonitorHistory, tq: q}, nil return &query[*ent.ChannelMonitorHistoryQuery, predicate.ChannelMonitorHistory, channelmonitorhistory.OrderOption]{typ: ent.TypeChannelMonitorHistory, tq: q}, nil
case *ent.ErrorPassthroughRuleQuery: case *ent.ErrorPassthroughRuleQuery:
......
...@@ -461,9 +461,55 @@ var ( ...@@ -461,9 +461,55 @@ var (
}, },
}, },
} }
// ChannelMonitorDailyRollupsColumns holds the columns for the "channel_monitor_daily_rollups" table.
ChannelMonitorDailyRollupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt64, Increment: true},
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
{Name: "model", Type: field.TypeString, Size: 200},
{Name: "bucket_date", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "date"}},
{Name: "total_checks", Type: field.TypeInt, Default: 0},
{Name: "ok_count", Type: field.TypeInt, Default: 0},
{Name: "operational_count", Type: field.TypeInt, Default: 0},
{Name: "degraded_count", Type: field.TypeInt, Default: 0},
{Name: "failed_count", Type: field.TypeInt, Default: 0},
{Name: "error_count", Type: field.TypeInt, Default: 0},
{Name: "sum_latency_ms", Type: field.TypeInt64, Default: 0},
{Name: "count_latency", Type: field.TypeInt, Default: 0},
{Name: "sum_ping_latency_ms", Type: field.TypeInt64, Default: 0},
{Name: "count_ping_latency", Type: field.TypeInt, Default: 0},
{Name: "computed_at", Type: field.TypeTime},
{Name: "monitor_id", Type: field.TypeInt64},
}
// ChannelMonitorDailyRollupsTable holds the schema information for the "channel_monitor_daily_rollups" table.
ChannelMonitorDailyRollupsTable = &schema.Table{
Name: "channel_monitor_daily_rollups",
Columns: ChannelMonitorDailyRollupsColumns,
PrimaryKey: []*schema.Column{ChannelMonitorDailyRollupsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "channel_monitor_daily_rollups_channel_monitors_daily_rollups",
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[15]},
RefColumns: []*schema.Column{ChannelMonitorsColumns[0]},
OnDelete: schema.Cascade,
},
},
Indexes: []*schema.Index{
{
Name: "channelmonitordailyrollup_monitor_id_model_bucket_date",
Unique: true,
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[15], ChannelMonitorDailyRollupsColumns[2], ChannelMonitorDailyRollupsColumns[3]},
},
{
Name: "channelmonitordailyrollup_bucket_date",
Unique: false,
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[3]},
},
},
}
// ChannelMonitorHistoriesColumns holds the columns for the "channel_monitor_histories" table. // ChannelMonitorHistoriesColumns holds the columns for the "channel_monitor_histories" table.
ChannelMonitorHistoriesColumns = []*schema.Column{ ChannelMonitorHistoriesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt64, Increment: true}, {Name: "id", Type: field.TypeInt64, Increment: true},
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
{Name: "model", Type: field.TypeString, Size: 200}, {Name: "model", Type: field.TypeString, Size: 200},
{Name: "status", Type: field.TypeEnum, Enums: []string{"operational", "degraded", "failed", "error"}}, {Name: "status", Type: field.TypeEnum, Enums: []string{"operational", "degraded", "failed", "error"}},
{Name: "latency_ms", Type: field.TypeInt, Nullable: true}, {Name: "latency_ms", Type: field.TypeInt, Nullable: true},
...@@ -480,7 +526,7 @@ var ( ...@@ -480,7 +526,7 @@ var (
ForeignKeys: []*schema.ForeignKey{ ForeignKeys: []*schema.ForeignKey{
{ {
Symbol: "channel_monitor_histories_channel_monitors_history", Symbol: "channel_monitor_histories_channel_monitors_history",
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]}, Columns: []*schema.Column{ChannelMonitorHistoriesColumns[8]},
RefColumns: []*schema.Column{ChannelMonitorsColumns[0]}, RefColumns: []*schema.Column{ChannelMonitorsColumns[0]},
OnDelete: schema.Cascade, OnDelete: schema.Cascade,
}, },
...@@ -489,12 +535,12 @@ var ( ...@@ -489,12 +535,12 @@ var (
{ {
Name: "channelmonitorhistory_monitor_id_model_checked_at", Name: "channelmonitorhistory_monitor_id_model_checked_at",
Unique: false, Unique: false,
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7], ChannelMonitorHistoriesColumns[1], ChannelMonitorHistoriesColumns[6]}, Columns: []*schema.Column{ChannelMonitorHistoriesColumns[8], ChannelMonitorHistoriesColumns[2], ChannelMonitorHistoriesColumns[7]},
}, },
{ {
Name: "channelmonitorhistory_checked_at", Name: "channelmonitorhistory_checked_at",
Unique: false, Unique: false,
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[6]}, Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]},
}, },
}, },
} }
...@@ -1598,6 +1644,7 @@ var ( ...@@ -1598,6 +1644,7 @@ var (
AuthIdentitiesTable, AuthIdentitiesTable,
AuthIdentityChannelsTable, AuthIdentityChannelsTable,
ChannelMonitorsTable, ChannelMonitorsTable,
ChannelMonitorDailyRollupsTable,
ChannelMonitorHistoriesTable, ChannelMonitorHistoriesTable,
ErrorPassthroughRulesTable, ErrorPassthroughRulesTable,
GroupsTable, GroupsTable,
...@@ -1659,6 +1706,10 @@ func init() { ...@@ -1659,6 +1706,10 @@ func init() {
ChannelMonitorsTable.Annotation = &entsql.Annotation{ ChannelMonitorsTable.Annotation = &entsql.Annotation{
Table: "channel_monitors", Table: "channel_monitors",
} }
ChannelMonitorDailyRollupsTable.ForeignKeys[0].RefTable = ChannelMonitorsTable
ChannelMonitorDailyRollupsTable.Annotation = &entsql.Annotation{
Table: "channel_monitor_daily_rollups",
}
ChannelMonitorHistoriesTable.ForeignKeys[0].RefTable = ChannelMonitorsTable ChannelMonitorHistoriesTable.ForeignKeys[0].RefTable = ChannelMonitorsTable
ChannelMonitorHistoriesTable.Annotation = &entsql.Annotation{ ChannelMonitorHistoriesTable.Annotation = &entsql.Annotation{
Table: "channel_monitor_histories", Table: "channel_monitor_histories",
......
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentity"
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
"github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/group"
...@@ -57,38 +58,39 @@ const ( ...@@ -57,38 +58,39 @@ const (
OpUpdateOne = ent.OpUpdateOne OpUpdateOne = ent.OpUpdateOne
   
// Node types. // Node types.
TypeAPIKey = "APIKey" TypeAPIKey = "APIKey"
TypeAccount = "Account" TypeAccount = "Account"
TypeAccountGroup = "AccountGroup" TypeAccountGroup = "AccountGroup"
TypeAnnouncement = "Announcement" TypeAnnouncement = "Announcement"
TypeAnnouncementRead = "AnnouncementRead" TypeAnnouncementRead = "AnnouncementRead"
TypeAuthIdentity = "AuthIdentity" TypeAuthIdentity = "AuthIdentity"
TypeAuthIdentityChannel = "AuthIdentityChannel" TypeAuthIdentityChannel = "AuthIdentityChannel"
TypeChannelMonitor = "ChannelMonitor" TypeChannelMonitor = "ChannelMonitor"
TypeChannelMonitorHistory = "ChannelMonitorHistory" TypeChannelMonitorDailyRollup = "ChannelMonitorDailyRollup"
TypeErrorPassthroughRule = "ErrorPassthroughRule" TypeChannelMonitorHistory = "ChannelMonitorHistory"
TypeGroup = "Group" TypeErrorPassthroughRule = "ErrorPassthroughRule"
TypeIdempotencyRecord = "IdempotencyRecord" TypeGroup = "Group"
TypeIdentityAdoptionDecision = "IdentityAdoptionDecision" TypeIdempotencyRecord = "IdempotencyRecord"
TypePaymentAuditLog = "PaymentAuditLog" TypeIdentityAdoptionDecision = "IdentityAdoptionDecision"
TypePaymentOrder = "PaymentOrder" TypePaymentAuditLog = "PaymentAuditLog"
TypePaymentProviderInstance = "PaymentProviderInstance" TypePaymentOrder = "PaymentOrder"
TypePendingAuthSession = "PendingAuthSession" TypePaymentProviderInstance = "PaymentProviderInstance"
TypePromoCode = "PromoCode" TypePendingAuthSession = "PendingAuthSession"
TypePromoCodeUsage = "PromoCodeUsage" TypePromoCode = "PromoCode"
TypeProxy = "Proxy" TypePromoCodeUsage = "PromoCodeUsage"
TypeRedeemCode = "RedeemCode" TypeProxy = "Proxy"
TypeSecuritySecret = "SecuritySecret" TypeRedeemCode = "RedeemCode"
TypeSetting = "Setting" TypeSecuritySecret = "SecuritySecret"
TypeSubscriptionPlan = "SubscriptionPlan" TypeSetting = "Setting"
TypeTLSFingerprintProfile = "TLSFingerprintProfile" TypeSubscriptionPlan = "SubscriptionPlan"
TypeUsageCleanupTask = "UsageCleanupTask" TypeTLSFingerprintProfile = "TLSFingerprintProfile"
TypeUsageLog = "UsageLog" TypeUsageCleanupTask = "UsageCleanupTask"
TypeUser = "User" TypeUsageLog = "UsageLog"
TypeUserAllowedGroup = "UserAllowedGroup" TypeUser = "User"
TypeUserAttributeDefinition = "UserAttributeDefinition" TypeUserAllowedGroup = "UserAllowedGroup"
TypeUserAttributeValue = "UserAttributeValue" TypeUserAttributeDefinition = "UserAttributeDefinition"
TypeUserSubscription = "UserSubscription" TypeUserAttributeValue = "UserAttributeValue"
TypeUserSubscription = "UserSubscription"
) )
   
// APIKeyMutation represents an operation that mutates the APIKey nodes in the graph. // APIKeyMutation represents an operation that mutates the APIKey nodes in the graph.
...@@ -8741,32 +8743,35 @@ func (m *AuthIdentityChannelMutation) ResetEdge(name string) error { ...@@ -8741,32 +8743,35 @@ func (m *AuthIdentityChannelMutation) ResetEdge(name string) error {
// ChannelMonitorMutation represents an operation that mutates the ChannelMonitor nodes in the graph. // ChannelMonitorMutation represents an operation that mutates the ChannelMonitor nodes in the graph.
type ChannelMonitorMutation struct { type ChannelMonitorMutation struct {
config config
op Op op Op
typ string typ string
id *int64 id *int64
created_at *time.Time created_at *time.Time
updated_at *time.Time updated_at *time.Time
name *string name *string
provider *channelmonitor.Provider provider *channelmonitor.Provider
endpoint *string endpoint *string
api_key_encrypted *string api_key_encrypted *string
primary_model *string primary_model *string
extra_models *[]string extra_models *[]string
appendextra_models []string appendextra_models []string
group_name *string group_name *string
enabled *bool enabled *bool
interval_seconds *int interval_seconds *int
addinterval_seconds *int addinterval_seconds *int
last_checked_at *time.Time last_checked_at *time.Time
created_by *int64 created_by *int64
addcreated_by *int64 addcreated_by *int64
clearedFields map[string]struct{} clearedFields map[string]struct{}
history map[int64]struct{} history map[int64]struct{}
removedhistory map[int64]struct{} removedhistory map[int64]struct{}
clearedhistory bool clearedhistory bool
done bool daily_rollups map[int64]struct{}
oldValue func(context.Context) (*ChannelMonitor, error) removeddaily_rollups map[int64]struct{}
predicates []predicate.ChannelMonitor cleareddaily_rollups bool
done bool
oldValue func(context.Context) (*ChannelMonitor, error)
predicates []predicate.ChannelMonitor
} }
   
var _ ent.Mutation = (*ChannelMonitorMutation)(nil) var _ ent.Mutation = (*ChannelMonitorMutation)(nil)
...@@ -9470,6 +9475,60 @@ func (m *ChannelMonitorMutation) ResetHistory() { ...@@ -9470,6 +9475,60 @@ func (m *ChannelMonitorMutation) ResetHistory() {
m.removedhistory = nil m.removedhistory = nil
} }
   
// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by ids.
func (m *ChannelMonitorMutation) AddDailyRollupIDs(ids ...int64) {
if m.daily_rollups == nil {
m.daily_rollups = make(map[int64]struct{})
}
for i := range ids {
m.daily_rollups[ids[i]] = struct{}{}
}
}
// ClearDailyRollups clears the "daily_rollups" edge to the ChannelMonitorDailyRollup entity.
func (m *ChannelMonitorMutation) ClearDailyRollups() {
m.cleareddaily_rollups = true
}
// DailyRollupsCleared reports if the "daily_rollups" edge to the ChannelMonitorDailyRollup entity was cleared.
func (m *ChannelMonitorMutation) DailyRollupsCleared() bool {
return m.cleareddaily_rollups
}
// RemoveDailyRollupIDs removes the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs.
func (m *ChannelMonitorMutation) RemoveDailyRollupIDs(ids ...int64) {
if m.removeddaily_rollups == nil {
m.removeddaily_rollups = make(map[int64]struct{})
}
for i := range ids {
delete(m.daily_rollups, ids[i])
m.removeddaily_rollups[ids[i]] = struct{}{}
}
}
// RemovedDailyRollups returns the removed IDs of the "daily_rollups" edge to the ChannelMonitorDailyRollup entity.
func (m *ChannelMonitorMutation) RemovedDailyRollupsIDs() (ids []int64) {
for id := range m.removeddaily_rollups {
ids = append(ids, id)
}
return
}
// DailyRollupsIDs returns the "daily_rollups" edge IDs in the mutation.
func (m *ChannelMonitorMutation) DailyRollupsIDs() (ids []int64) {
for id := range m.daily_rollups {
ids = append(ids, id)
}
return
}
// ResetDailyRollups resets all changes to the "daily_rollups" edge.
func (m *ChannelMonitorMutation) ResetDailyRollups() {
m.daily_rollups = nil
m.cleareddaily_rollups = false
m.removeddaily_rollups = nil
}
// Where appends a list predicates to the ChannelMonitorMutation builder. // Where appends a list predicates to the ChannelMonitorMutation builder.
func (m *ChannelMonitorMutation) Where(ps ...predicate.ChannelMonitor) { func (m *ChannelMonitorMutation) Where(ps ...predicate.ChannelMonitor) {
m.predicates = append(m.predicates, ps...) m.predicates = append(m.predicates, ps...)
...@@ -9849,10 +9908,13 @@ func (m *ChannelMonitorMutation) ResetField(name string) error { ...@@ -9849,10 +9908,13 @@ func (m *ChannelMonitorMutation) ResetField(name string) error {
   
// AddedEdges returns all edge names that were set/added in this mutation. // AddedEdges returns all edge names that were set/added in this mutation.
func (m *ChannelMonitorMutation) AddedEdges() []string { func (m *ChannelMonitorMutation) AddedEdges() []string {
edges := make([]string, 0, 1) edges := make([]string, 0, 2)
if m.history != nil { if m.history != nil {
edges = append(edges, channelmonitor.EdgeHistory) edges = append(edges, channelmonitor.EdgeHistory)
} }
if m.daily_rollups != nil {
edges = append(edges, channelmonitor.EdgeDailyRollups)
}
return edges return edges
} }
   
...@@ -9866,16 +9928,25 @@ func (m *ChannelMonitorMutation) AddedIDs(name string) []ent.Value { ...@@ -9866,16 +9928,25 @@ func (m *ChannelMonitorMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id) ids = append(ids, id)
} }
return ids return ids
case channelmonitor.EdgeDailyRollups:
ids := make([]ent.Value, 0, len(m.daily_rollups))
for id := range m.daily_rollups {
ids = append(ids, id)
}
return ids
} }
return nil return nil
} }
   
// RemovedEdges returns all edge names that were removed in this mutation. // RemovedEdges returns all edge names that were removed in this mutation.
func (m *ChannelMonitorMutation) RemovedEdges() []string { func (m *ChannelMonitorMutation) RemovedEdges() []string {
edges := make([]string, 0, 1) edges := make([]string, 0, 2)
if m.removedhistory != nil { if m.removedhistory != nil {
edges = append(edges, channelmonitor.EdgeHistory) edges = append(edges, channelmonitor.EdgeHistory)
} }
if m.removeddaily_rollups != nil {
edges = append(edges, channelmonitor.EdgeDailyRollups)
}
return edges return edges
} }
   
...@@ -9889,16 +9960,25 @@ func (m *ChannelMonitorMutation) RemovedIDs(name string) []ent.Value { ...@@ -9889,16 +9960,25 @@ func (m *ChannelMonitorMutation) RemovedIDs(name string) []ent.Value {
ids = append(ids, id) ids = append(ids, id)
} }
return ids return ids
case channelmonitor.EdgeDailyRollups:
ids := make([]ent.Value, 0, len(m.removeddaily_rollups))
for id := range m.removeddaily_rollups {
ids = append(ids, id)
}
return ids
} }
return nil return nil
} }
   
// ClearedEdges returns all edge names that were cleared in this mutation. // ClearedEdges returns all edge names that were cleared in this mutation.
func (m *ChannelMonitorMutation) ClearedEdges() []string { func (m *ChannelMonitorMutation) ClearedEdges() []string {
edges := make([]string, 0, 1) edges := make([]string, 0, 2)
if m.clearedhistory { if m.clearedhistory {
edges = append(edges, channelmonitor.EdgeHistory) edges = append(edges, channelmonitor.EdgeHistory)
} }
if m.cleareddaily_rollups {
edges = append(edges, channelmonitor.EdgeDailyRollups)
}
return edges return edges
} }
   
...@@ -9908,6 +9988,8 @@ func (m *ChannelMonitorMutation) EdgeCleared(name string) bool { ...@@ -9908,6 +9988,8 @@ func (m *ChannelMonitorMutation) EdgeCleared(name string) bool {
switch name { switch name {
case channelmonitor.EdgeHistory: case channelmonitor.EdgeHistory:
return m.clearedhistory return m.clearedhistory
case channelmonitor.EdgeDailyRollups:
return m.cleareddaily_rollups
} }
return false return false
} }
...@@ -9927,43 +10009,62 @@ func (m *ChannelMonitorMutation) ResetEdge(name string) error { ...@@ -9927,43 +10009,62 @@ func (m *ChannelMonitorMutation) ResetEdge(name string) error {
case channelmonitor.EdgeHistory: case channelmonitor.EdgeHistory:
m.ResetHistory() m.ResetHistory()
return nil return nil
case channelmonitor.EdgeDailyRollups:
m.ResetDailyRollups()
return nil
} }
return fmt.Errorf("unknown ChannelMonitor edge %s", name) return fmt.Errorf("unknown ChannelMonitor edge %s", name)
} }
   
// ChannelMonitorHistoryMutation represents an operation that mutates the ChannelMonitorHistory nodes in the graph. // ChannelMonitorDailyRollupMutation represents an operation that mutates the ChannelMonitorDailyRollup nodes in the graph.
type ChannelMonitorHistoryMutation struct { type ChannelMonitorDailyRollupMutation struct {
config config
op Op op Op
typ string typ string
id *int64 id *int64
model *string deleted_at *time.Time
status *channelmonitorhistory.Status model *string
latency_ms *int bucket_date *time.Time
addlatency_ms *int total_checks *int
ping_latency_ms *int addtotal_checks *int
addping_latency_ms *int ok_count *int
message *string addok_count *int
checked_at *time.Time operational_count *int
clearedFields map[string]struct{} addoperational_count *int
monitor *int64 degraded_count *int
clearedmonitor bool adddegraded_count *int
done bool failed_count *int
oldValue func(context.Context) (*ChannelMonitorHistory, error) addfailed_count *int
predicates []predicate.ChannelMonitorHistory error_count *int
} adderror_count *int
sum_latency_ms *int64
var _ ent.Mutation = (*ChannelMonitorHistoryMutation)(nil) addsum_latency_ms *int64
count_latency *int
// channelmonitorhistoryOption allows management of the mutation configuration using functional options. addcount_latency *int
type channelmonitorhistoryOption func(*ChannelMonitorHistoryMutation) sum_ping_latency_ms *int64
addsum_ping_latency_ms *int64
// newChannelMonitorHistoryMutation creates new mutation for the ChannelMonitorHistory entity. count_ping_latency *int
func newChannelMonitorHistoryMutation(c config, op Op, opts ...channelmonitorhistoryOption) *ChannelMonitorHistoryMutation { addcount_ping_latency *int
m := &ChannelMonitorHistoryMutation{ computed_at *time.Time
clearedFields map[string]struct{}
monitor *int64
clearedmonitor bool
done bool
oldValue func(context.Context) (*ChannelMonitorDailyRollup, error)
predicates []predicate.ChannelMonitorDailyRollup
}
var _ ent.Mutation = (*ChannelMonitorDailyRollupMutation)(nil)
// channelmonitordailyrollupOption allows management of the mutation configuration using functional options.
type channelmonitordailyrollupOption func(*ChannelMonitorDailyRollupMutation)
// newChannelMonitorDailyRollupMutation creates new mutation for the ChannelMonitorDailyRollup entity.
func newChannelMonitorDailyRollupMutation(c config, op Op, opts ...channelmonitordailyrollupOption) *ChannelMonitorDailyRollupMutation {
m := &ChannelMonitorDailyRollupMutation{
config: c, config: c,
op: op, op: op,
typ: TypeChannelMonitorHistory, typ: TypeChannelMonitorDailyRollup,
clearedFields: make(map[string]struct{}), clearedFields: make(map[string]struct{}),
} }
for _, opt := range opts { for _, opt := range opts {
...@@ -9972,20 +10073,20 @@ func newChannelMonitorHistoryMutation(c config, op Op, opts ...channelmonitorhis ...@@ -9972,20 +10073,20 @@ func newChannelMonitorHistoryMutation(c config, op Op, opts ...channelmonitorhis
return m return m
} }
   
// withChannelMonitorHistoryID sets the ID field of the mutation. // withChannelMonitorDailyRollupID sets the ID field of the mutation.
func withChannelMonitorHistoryID(id int64) channelmonitorhistoryOption { func withChannelMonitorDailyRollupID(id int64) channelmonitordailyrollupOption {
return func(m *ChannelMonitorHistoryMutation) { return func(m *ChannelMonitorDailyRollupMutation) {
var ( var (
err error err error
once sync.Once once sync.Once
value *ChannelMonitorHistory value *ChannelMonitorDailyRollup
) )
m.oldValue = func(ctx context.Context) (*ChannelMonitorHistory, error) { m.oldValue = func(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
once.Do(func() { once.Do(func() {
if m.done { if m.done {
err = errors.New("querying old values post mutation is not allowed") err = errors.New("querying old values post mutation is not allowed")
} else { } else {
value, err = m.Client().ChannelMonitorHistory.Get(ctx, id) value, err = m.Client().ChannelMonitorDailyRollup.Get(ctx, id)
} }
}) })
return value, err return value, err
...@@ -9994,10 +10095,10 @@ func withChannelMonitorHistoryID(id int64) channelmonitorhistoryOption { ...@@ -9994,10 +10095,10 @@ func withChannelMonitorHistoryID(id int64) channelmonitorhistoryOption {
} }
} }
   
// withChannelMonitorHistory sets the old ChannelMonitorHistory of the mutation. // withChannelMonitorDailyRollup sets the old ChannelMonitorDailyRollup of the mutation.
func withChannelMonitorHistory(node *ChannelMonitorHistory) channelmonitorhistoryOption { func withChannelMonitorDailyRollup(node *ChannelMonitorDailyRollup) channelmonitordailyrollupOption {
return func(m *ChannelMonitorHistoryMutation) { return func(m *ChannelMonitorDailyRollupMutation) {
m.oldValue = func(context.Context) (*ChannelMonitorHistory, error) { m.oldValue = func(context.Context) (*ChannelMonitorDailyRollup, error) {
return node, nil return node, nil
} }
m.id = &node.ID m.id = &node.ID
...@@ -10006,7 +10107,7 @@ func withChannelMonitorHistory(node *ChannelMonitorHistory) channelmonitorhistor ...@@ -10006,7 +10107,7 @@ func withChannelMonitorHistory(node *ChannelMonitorHistory) channelmonitorhistor
   
// Client returns a new `ent.Client` from the mutation. If the mutation was // Client returns a new `ent.Client` from the mutation. If the mutation was
// executed in a transaction (ent.Tx), a transactional client is returned. // executed in a transaction (ent.Tx), a transactional client is returned.
func (m ChannelMonitorHistoryMutation) Client() *Client { func (m ChannelMonitorDailyRollupMutation) Client() *Client {
client := &Client{config: m.config} client := &Client{config: m.config}
client.init() client.init()
return client return client
...@@ -10014,7 +10115,7 @@ func (m ChannelMonitorHistoryMutation) Client() *Client { ...@@ -10014,7 +10115,7 @@ func (m ChannelMonitorHistoryMutation) Client() *Client {
   
// Tx returns an `ent.Tx` for mutations that were executed in transactions; // Tx returns an `ent.Tx` for mutations that were executed in transactions;
// it returns an error otherwise. // it returns an error otherwise.
func (m ChannelMonitorHistoryMutation) Tx() (*Tx, error) { func (m ChannelMonitorDailyRollupMutation) Tx() (*Tx, error) {
if _, ok := m.driver.(*txDriver); !ok { if _, ok := m.driver.(*txDriver); !ok {
return nil, errors.New("ent: mutation is not running in a transaction") return nil, errors.New("ent: mutation is not running in a transaction")
} }
...@@ -10025,7 +10126,7 @@ func (m ChannelMonitorHistoryMutation) Tx() (*Tx, error) { ...@@ -10025,7 +10126,7 @@ func (m ChannelMonitorHistoryMutation) Tx() (*Tx, error) {
   
// ID returns the ID value in the mutation. Note that the ID is only available // ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database. // if it was provided to the builder or after it was returned from the database.
func (m *ChannelMonitorHistoryMutation) ID() (id int64, exists bool) { func (m *ChannelMonitorDailyRollupMutation) ID() (id int64, exists bool) {
if m.id == nil { if m.id == nil {
return return
} }
...@@ -10036,7 +10137,7 @@ func (m *ChannelMonitorHistoryMutation) ID() (id int64, exists bool) { ...@@ -10036,7 +10137,7 @@ func (m *ChannelMonitorHistoryMutation) ID() (id int64, exists bool) {
// That means, if the mutation is applied within a transaction with an isolation level such // That means, if the mutation is applied within a transaction with an isolation level such
// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
// or updated by the mutation. // or updated by the mutation.
func (m *ChannelMonitorHistoryMutation) IDs(ctx context.Context) ([]int64, error) { func (m *ChannelMonitorDailyRollupMutation) IDs(ctx context.Context) ([]int64, error) {
switch { switch {
case m.op.Is(OpUpdateOne | OpDeleteOne): case m.op.Is(OpUpdateOne | OpDeleteOne):
id, exists := m.ID() id, exists := m.ID()
...@@ -10045,19 +10146,68 @@ func (m *ChannelMonitorHistoryMutation) IDs(ctx context.Context) ([]int64, error ...@@ -10045,19 +10146,68 @@ func (m *ChannelMonitorHistoryMutation) IDs(ctx context.Context) ([]int64, error
} }
fallthrough fallthrough
case m.op.Is(OpUpdate | OpDelete): case m.op.Is(OpUpdate | OpDelete):
return m.Client().ChannelMonitorHistory.Query().Where(m.predicates...).IDs(ctx) return m.Client().ChannelMonitorDailyRollup.Query().Where(m.predicates...).IDs(ctx)
default: default:
return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
} }
} }
   
// SetDeletedAt sets the "deleted_at" field.
func (m *ChannelMonitorDailyRollupMutation) SetDeletedAt(t time.Time) {
m.deleted_at = &t
}
// DeletedAt returns the value of the "deleted_at" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) DeletedAt() (r time.Time, exists bool) {
v := m.deleted_at
if v == nil {
return
}
return *v, true
}
// OldDeletedAt returns the old "deleted_at" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldDeletedAt requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err)
}
return oldValue.DeletedAt, nil
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (m *ChannelMonitorDailyRollupMutation) ClearDeletedAt() {
m.deleted_at = nil
m.clearedFields[channelmonitordailyrollup.FieldDeletedAt] = struct{}{}
}
// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation.
func (m *ChannelMonitorDailyRollupMutation) DeletedAtCleared() bool {
_, ok := m.clearedFields[channelmonitordailyrollup.FieldDeletedAt]
return ok
}
// ResetDeletedAt resets all changes to the "deleted_at" field.
func (m *ChannelMonitorDailyRollupMutation) ResetDeletedAt() {
m.deleted_at = nil
delete(m.clearedFields, channelmonitordailyrollup.FieldDeletedAt)
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (m *ChannelMonitorHistoryMutation) SetMonitorID(i int64) { func (m *ChannelMonitorDailyRollupMutation) SetMonitorID(i int64) {
m.monitor = &i m.monitor = &i
} }
   
// MonitorID returns the value of the "monitor_id" field in the mutation. // MonitorID returns the value of the "monitor_id" field in the mutation.
func (m *ChannelMonitorHistoryMutation) MonitorID() (r int64, exists bool) { func (m *ChannelMonitorDailyRollupMutation) MonitorID() (r int64, exists bool) {
v := m.monitor v := m.monitor
if v == nil { if v == nil {
return return
...@@ -10065,10 +10215,10 @@ func (m *ChannelMonitorHistoryMutation) MonitorID() (r int64, exists bool) { ...@@ -10065,10 +10215,10 @@ func (m *ChannelMonitorHistoryMutation) MonitorID() (r int64, exists bool) {
return *v, true return *v, true
} }
   
// OldMonitorID returns the old "monitor_id" field's value of the ChannelMonitorHistory entity. // OldMonitorID returns the old "monitor_id" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. // If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails. // An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldMonitorID(ctx context.Context) (v int64, err error) { func (m *ChannelMonitorDailyRollupMutation) OldMonitorID(ctx context.Context) (v int64, err error) {
if !m.op.Is(OpUpdateOne) { if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldMonitorID is only allowed on UpdateOne operations") return v, errors.New("OldMonitorID is only allowed on UpdateOne operations")
} }
...@@ -10083,17 +10233,17 @@ func (m *ChannelMonitorHistoryMutation) OldMonitorID(ctx context.Context) (v int ...@@ -10083,17 +10233,17 @@ func (m *ChannelMonitorHistoryMutation) OldMonitorID(ctx context.Context) (v int
} }
   
// ResetMonitorID resets all changes to the "monitor_id" field. // ResetMonitorID resets all changes to the "monitor_id" field.
func (m *ChannelMonitorHistoryMutation) ResetMonitorID() { func (m *ChannelMonitorDailyRollupMutation) ResetMonitorID() {
m.monitor = nil m.monitor = nil
} }
   
// SetModel sets the "model" field. // SetModel sets the "model" field.
func (m *ChannelMonitorHistoryMutation) SetModel(s string) { func (m *ChannelMonitorDailyRollupMutation) SetModel(s string) {
m.model = &s m.model = &s
} }
   
// Model returns the value of the "model" field in the mutation. // Model returns the value of the "model" field in the mutation.
func (m *ChannelMonitorHistoryMutation) Model() (r string, exists bool) { func (m *ChannelMonitorDailyRollupMutation) Model() (r string, exists bool) {
v := m.model v := m.model
if v == nil { if v == nil {
return return
...@@ -10101,10 +10251,10 @@ func (m *ChannelMonitorHistoryMutation) Model() (r string, exists bool) { ...@@ -10101,10 +10251,10 @@ func (m *ChannelMonitorHistoryMutation) Model() (r string, exists bool) {
return *v, true return *v, true
} }
   
// OldModel returns the old "model" field's value of the ChannelMonitorHistory entity. // OldModel returns the old "model" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. // If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails. // An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldModel(ctx context.Context) (v string, err error) { func (m *ChannelMonitorDailyRollupMutation) OldModel(ctx context.Context) (v string, err error) {
if !m.op.Is(OpUpdateOne) { if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldModel is only allowed on UpdateOne operations") return v, errors.New("OldModel is only allowed on UpdateOne operations")
} }
...@@ -10119,206 +10269,1682 @@ func (m *ChannelMonitorHistoryMutation) OldModel(ctx context.Context) (v string, ...@@ -10119,206 +10269,1682 @@ func (m *ChannelMonitorHistoryMutation) OldModel(ctx context.Context) (v string,
} }
   
// ResetModel resets all changes to the "model" field. // ResetModel resets all changes to the "model" field.
func (m *ChannelMonitorHistoryMutation) ResetModel() { func (m *ChannelMonitorDailyRollupMutation) ResetModel() {
m.model = nil m.model = nil
} }
   
// SetStatus sets the "status" field. // SetBucketDate sets the "bucket_date" field.
func (m *ChannelMonitorHistoryMutation) SetStatus(c channelmonitorhistory.Status) { func (m *ChannelMonitorDailyRollupMutation) SetBucketDate(t time.Time) {
m.status = &c m.bucket_date = &t
} }
   
// Status returns the value of the "status" field in the mutation. // BucketDate returns the value of the "bucket_date" field in the mutation.
func (m *ChannelMonitorHistoryMutation) Status() (r channelmonitorhistory.Status, exists bool) { func (m *ChannelMonitorDailyRollupMutation) BucketDate() (r time.Time, exists bool) {
v := m.status v := m.bucket_date
if v == nil { if v == nil {
return return
} }
return *v, true return *v, true
} }
   
// OldStatus returns the old "status" field's value of the ChannelMonitorHistory entity. // OldBucketDate returns the old "bucket_date" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. // If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails. // An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldStatus(ctx context.Context) (v channelmonitorhistory.Status, err error) { func (m *ChannelMonitorDailyRollupMutation) OldBucketDate(ctx context.Context) (v time.Time, err error) {
if !m.op.Is(OpUpdateOne) { if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldStatus is only allowed on UpdateOne operations") return v, errors.New("OldBucketDate is only allowed on UpdateOne operations")
} }
if m.id == nil || m.oldValue == nil { if m.id == nil || m.oldValue == nil {
return v, errors.New("OldStatus requires an ID field in the mutation") return v, errors.New("OldBucketDate requires an ID field in the mutation")
} }
oldValue, err := m.oldValue(ctx) oldValue, err := m.oldValue(ctx)
if err != nil { if err != nil {
return v, fmt.Errorf("querying old value for OldStatus: %w", err) return v, fmt.Errorf("querying old value for OldBucketDate: %w", err)
} }
return oldValue.Status, nil return oldValue.BucketDate, nil
} }
   
// ResetStatus resets all changes to the "status" field. // ResetBucketDate resets all changes to the "bucket_date" field.
func (m *ChannelMonitorHistoryMutation) ResetStatus() { func (m *ChannelMonitorDailyRollupMutation) ResetBucketDate() {
m.status = nil m.bucket_date = nil
} }
   
// SetLatencyMs sets the "latency_ms" field. // SetTotalChecks sets the "total_checks" field.
func (m *ChannelMonitorHistoryMutation) SetLatencyMs(i int) { func (m *ChannelMonitorDailyRollupMutation) SetTotalChecks(i int) {
m.latency_ms = &i m.total_checks = &i
m.addlatency_ms = nil m.addtotal_checks = nil
} }
   
// LatencyMs returns the value of the "latency_ms" field in the mutation. // TotalChecks returns the value of the "total_checks" field in the mutation.
func (m *ChannelMonitorHistoryMutation) LatencyMs() (r int, exists bool) { func (m *ChannelMonitorDailyRollupMutation) TotalChecks() (r int, exists bool) {
v := m.latency_ms v := m.total_checks
if v == nil { if v == nil {
return return
} }
return *v, true return *v, true
} }
   
// OldLatencyMs returns the old "latency_ms" field's value of the ChannelMonitorHistory entity. // OldTotalChecks returns the old "total_checks" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. // If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails. // An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldLatencyMs(ctx context.Context) (v *int, err error) { func (m *ChannelMonitorDailyRollupMutation) OldTotalChecks(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) { if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldLatencyMs is only allowed on UpdateOne operations") return v, errors.New("OldTotalChecks is only allowed on UpdateOne operations")
} }
if m.id == nil || m.oldValue == nil { if m.id == nil || m.oldValue == nil {
return v, errors.New("OldLatencyMs requires an ID field in the mutation") return v, errors.New("OldTotalChecks requires an ID field in the mutation")
} }
oldValue, err := m.oldValue(ctx) oldValue, err := m.oldValue(ctx)
if err != nil { if err != nil {
return v, fmt.Errorf("querying old value for OldLatencyMs: %w", err) return v, fmt.Errorf("querying old value for OldTotalChecks: %w", err)
} }
return oldValue.LatencyMs, nil return oldValue.TotalChecks, nil
} }
   
// AddLatencyMs adds i to the "latency_ms" field. // AddTotalChecks adds i to the "total_checks" field.
func (m *ChannelMonitorHistoryMutation) AddLatencyMs(i int) { func (m *ChannelMonitorDailyRollupMutation) AddTotalChecks(i int) {
if m.addlatency_ms != nil { if m.addtotal_checks != nil {
*m.addlatency_ms += i *m.addtotal_checks += i
} else { } else {
m.addlatency_ms = &i m.addtotal_checks = &i
} }
} }
   
// AddedLatencyMs returns the value that was added to the "latency_ms" field in this mutation. // AddedTotalChecks returns the value that was added to the "total_checks" field in this mutation.
func (m *ChannelMonitorHistoryMutation) AddedLatencyMs() (r int, exists bool) { func (m *ChannelMonitorDailyRollupMutation) AddedTotalChecks() (r int, exists bool) {
v := m.addlatency_ms v := m.addtotal_checks
if v == nil { if v == nil {
return return
} }
return *v, true return *v, true
} }
   
// ClearLatencyMs clears the value of the "latency_ms" field. // ResetTotalChecks resets all changes to the "total_checks" field.
func (m *ChannelMonitorHistoryMutation) ClearLatencyMs() { func (m *ChannelMonitorDailyRollupMutation) ResetTotalChecks() {
m.latency_ms = nil m.total_checks = nil
m.addlatency_ms = nil m.addtotal_checks = nil
m.clearedFields[channelmonitorhistory.FieldLatencyMs] = struct{}{}
}
// LatencyMsCleared returns if the "latency_ms" field was cleared in this mutation.
func (m *ChannelMonitorHistoryMutation) LatencyMsCleared() bool {
_, ok := m.clearedFields[channelmonitorhistory.FieldLatencyMs]
return ok
}
// ResetLatencyMs resets all changes to the "latency_ms" field.
func (m *ChannelMonitorHistoryMutation) ResetLatencyMs() {
m.latency_ms = nil
m.addlatency_ms = nil
delete(m.clearedFields, channelmonitorhistory.FieldLatencyMs)
} }
   
// SetPingLatencyMs sets the "ping_latency_ms" field. // SetOkCount sets the "ok_count" field.
func (m *ChannelMonitorHistoryMutation) SetPingLatencyMs(i int) { func (m *ChannelMonitorDailyRollupMutation) SetOkCount(i int) {
m.ping_latency_ms = &i m.ok_count = &i
m.addping_latency_ms = nil m.addok_count = nil
} }
   
// PingLatencyMs returns the value of the "ping_latency_ms" field in the mutation. // OkCount returns the value of the "ok_count" field in the mutation.
func (m *ChannelMonitorHistoryMutation) PingLatencyMs() (r int, exists bool) { func (m *ChannelMonitorDailyRollupMutation) OkCount() (r int, exists bool) {
v := m.ping_latency_ms v := m.ok_count
if v == nil { if v == nil {
return return
} }
return *v, true return *v, true
} }
   
// OldPingLatencyMs returns the old "ping_latency_ms" field's value of the ChannelMonitorHistory entity. // OldOkCount returns the old "ok_count" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. // If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails. // An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldPingLatencyMs(ctx context.Context) (v *int, err error) { func (m *ChannelMonitorDailyRollupMutation) OldOkCount(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) { if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldPingLatencyMs is only allowed on UpdateOne operations") return v, errors.New("OldOkCount is only allowed on UpdateOne operations")
} }
if m.id == nil || m.oldValue == nil { if m.id == nil || m.oldValue == nil {
return v, errors.New("OldPingLatencyMs requires an ID field in the mutation") return v, errors.New("OldOkCount requires an ID field in the mutation")
} }
oldValue, err := m.oldValue(ctx) oldValue, err := m.oldValue(ctx)
if err != nil { if err != nil {
return v, fmt.Errorf("querying old value for OldPingLatencyMs: %w", err) return v, fmt.Errorf("querying old value for OldOkCount: %w", err)
} }
return oldValue.PingLatencyMs, nil return oldValue.OkCount, nil
} }
   
// AddPingLatencyMs adds i to the "ping_latency_ms" field. // AddOkCount adds i to the "ok_count" field.
func (m *ChannelMonitorHistoryMutation) AddPingLatencyMs(i int) { func (m *ChannelMonitorDailyRollupMutation) AddOkCount(i int) {
if m.addping_latency_ms != nil { if m.addok_count != nil {
*m.addping_latency_ms += i *m.addok_count += i
} else { } else {
m.addping_latency_ms = &i m.addok_count = &i
} }
} }
   
// AddedPingLatencyMs returns the value that was added to the "ping_latency_ms" field in this mutation. // AddedOkCount returns the value that was added to the "ok_count" field in this mutation.
func (m *ChannelMonitorHistoryMutation) AddedPingLatencyMs() (r int, exists bool) { func (m *ChannelMonitorDailyRollupMutation) AddedOkCount() (r int, exists bool) {
v := m.addping_latency_ms v := m.addok_count
if v == nil { if v == nil {
return return
} }
return *v, true return *v, true
} }
   
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. // ResetOkCount resets all changes to the "ok_count" field.
func (m *ChannelMonitorHistoryMutation) ClearPingLatencyMs() { func (m *ChannelMonitorDailyRollupMutation) ResetOkCount() {
m.ping_latency_ms = nil m.ok_count = nil
m.addping_latency_ms = nil m.addok_count = nil
m.clearedFields[channelmonitorhistory.FieldPingLatencyMs] = struct{}{}
}
// PingLatencyMsCleared returns if the "ping_latency_ms" field was cleared in this mutation.
func (m *ChannelMonitorHistoryMutation) PingLatencyMsCleared() bool {
_, ok := m.clearedFields[channelmonitorhistory.FieldPingLatencyMs]
return ok
}
// ResetPingLatencyMs resets all changes to the "ping_latency_ms" field.
func (m *ChannelMonitorHistoryMutation) ResetPingLatencyMs() {
m.ping_latency_ms = nil
m.addping_latency_ms = nil
delete(m.clearedFields, channelmonitorhistory.FieldPingLatencyMs)
} }
   
// SetMessage sets the "message" field. // SetOperationalCount sets the "operational_count" field.
func (m *ChannelMonitorHistoryMutation) SetMessage(s string) { func (m *ChannelMonitorDailyRollupMutation) SetOperationalCount(i int) {
m.message = &s m.operational_count = &i
m.addoperational_count = nil
} }
   
// Message returns the value of the "message" field in the mutation. // OperationalCount returns the value of the "operational_count" field in the mutation.
func (m *ChannelMonitorHistoryMutation) Message() (r string, exists bool) { func (m *ChannelMonitorDailyRollupMutation) OperationalCount() (r int, exists bool) {
v := m.message v := m.operational_count
if v == nil { if v == nil {
return return
} }
return *v, true return *v, true
} }
   
// OldMessage returns the old "message" field's value of the ChannelMonitorHistory entity. // OldOperationalCount returns the old "operational_count" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. // If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails. // An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldMessage(ctx context.Context) (v string, err error) { func (m *ChannelMonitorDailyRollupMutation) OldOperationalCount(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) { if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldMessage is only allowed on UpdateOne operations") return v, errors.New("OldOperationalCount is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldOperationalCount requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldOperationalCount: %w", err)
}
return oldValue.OperationalCount, nil
}
// AddOperationalCount adds i to the "operational_count" field.
func (m *ChannelMonitorDailyRollupMutation) AddOperationalCount(i int) {
if m.addoperational_count != nil {
*m.addoperational_count += i
} else {
m.addoperational_count = &i
}
}
// AddedOperationalCount returns the value that was added to the "operational_count" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedOperationalCount() (r int, exists bool) {
v := m.addoperational_count
if v == nil {
return
}
return *v, true
}
// ResetOperationalCount resets all changes to the "operational_count" field.
func (m *ChannelMonitorDailyRollupMutation) ResetOperationalCount() {
m.operational_count = nil
m.addoperational_count = nil
}
// SetDegradedCount sets the "degraded_count" field.
func (m *ChannelMonitorDailyRollupMutation) SetDegradedCount(i int) {
m.degraded_count = &i
m.adddegraded_count = nil
}
// DegradedCount returns the value of the "degraded_count" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) DegradedCount() (r int, exists bool) {
v := m.degraded_count
if v == nil {
return
}
return *v, true
}
// OldDegradedCount returns the old "degraded_count" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldDegradedCount(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldDegradedCount is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldDegradedCount requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldDegradedCount: %w", err)
}
return oldValue.DegradedCount, nil
}
// AddDegradedCount adds i to the "degraded_count" field.
func (m *ChannelMonitorDailyRollupMutation) AddDegradedCount(i int) {
if m.adddegraded_count != nil {
*m.adddegraded_count += i
} else {
m.adddegraded_count = &i
}
}
// AddedDegradedCount returns the value that was added to the "degraded_count" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedDegradedCount() (r int, exists bool) {
v := m.adddegraded_count
if v == nil {
return
}
return *v, true
}
// ResetDegradedCount resets all changes to the "degraded_count" field.
func (m *ChannelMonitorDailyRollupMutation) ResetDegradedCount() {
m.degraded_count = nil
m.adddegraded_count = nil
}
// SetFailedCount sets the "failed_count" field.
func (m *ChannelMonitorDailyRollupMutation) SetFailedCount(i int) {
m.failed_count = &i
m.addfailed_count = nil
}
// FailedCount returns the value of the "failed_count" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) FailedCount() (r int, exists bool) {
v := m.failed_count
if v == nil {
return
}
return *v, true
}
// OldFailedCount returns the old "failed_count" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldFailedCount(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldFailedCount is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldFailedCount requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldFailedCount: %w", err)
}
return oldValue.FailedCount, nil
}
// AddFailedCount adds i to the "failed_count" field.
func (m *ChannelMonitorDailyRollupMutation) AddFailedCount(i int) {
if m.addfailed_count != nil {
*m.addfailed_count += i
} else {
m.addfailed_count = &i
}
}
// AddedFailedCount returns the value that was added to the "failed_count" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedFailedCount() (r int, exists bool) {
v := m.addfailed_count
if v == nil {
return
}
return *v, true
}
// ResetFailedCount resets all changes to the "failed_count" field.
func (m *ChannelMonitorDailyRollupMutation) ResetFailedCount() {
m.failed_count = nil
m.addfailed_count = nil
}
// SetErrorCount sets the "error_count" field.
func (m *ChannelMonitorDailyRollupMutation) SetErrorCount(i int) {
m.error_count = &i
m.adderror_count = nil
}
// ErrorCount returns the value of the "error_count" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) ErrorCount() (r int, exists bool) {
v := m.error_count
if v == nil {
return
}
return *v, true
}
// OldErrorCount returns the old "error_count" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldErrorCount(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldErrorCount is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldErrorCount requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldErrorCount: %w", err)
}
return oldValue.ErrorCount, nil
}
// AddErrorCount adds i to the "error_count" field.
func (m *ChannelMonitorDailyRollupMutation) AddErrorCount(i int) {
if m.adderror_count != nil {
*m.adderror_count += i
} else {
m.adderror_count = &i
}
}
// AddedErrorCount returns the value that was added to the "error_count" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedErrorCount() (r int, exists bool) {
v := m.adderror_count
if v == nil {
return
}
return *v, true
}
// ResetErrorCount resets all changes to the "error_count" field.
func (m *ChannelMonitorDailyRollupMutation) ResetErrorCount() {
m.error_count = nil
m.adderror_count = nil
}
// SetSumLatencyMs sets the "sum_latency_ms" field.
func (m *ChannelMonitorDailyRollupMutation) SetSumLatencyMs(i int64) {
m.sum_latency_ms = &i
m.addsum_latency_ms = nil
}
// SumLatencyMs returns the value of the "sum_latency_ms" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) SumLatencyMs() (r int64, exists bool) {
v := m.sum_latency_ms
if v == nil {
return
}
return *v, true
}
// OldSumLatencyMs returns the old "sum_latency_ms" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldSumLatencyMs(ctx context.Context) (v int64, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldSumLatencyMs is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldSumLatencyMs requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldSumLatencyMs: %w", err)
}
return oldValue.SumLatencyMs, nil
}
// AddSumLatencyMs adds i to the "sum_latency_ms" field.
func (m *ChannelMonitorDailyRollupMutation) AddSumLatencyMs(i int64) {
if m.addsum_latency_ms != nil {
*m.addsum_latency_ms += i
} else {
m.addsum_latency_ms = &i
}
}
// AddedSumLatencyMs returns the value that was added to the "sum_latency_ms" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedSumLatencyMs() (r int64, exists bool) {
v := m.addsum_latency_ms
if v == nil {
return
}
return *v, true
}
// ResetSumLatencyMs resets all changes to the "sum_latency_ms" field.
func (m *ChannelMonitorDailyRollupMutation) ResetSumLatencyMs() {
m.sum_latency_ms = nil
m.addsum_latency_ms = nil
}
// SetCountLatency sets the "count_latency" field.
func (m *ChannelMonitorDailyRollupMutation) SetCountLatency(i int) {
m.count_latency = &i
m.addcount_latency = nil
}
// CountLatency returns the value of the "count_latency" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) CountLatency() (r int, exists bool) {
v := m.count_latency
if v == nil {
return
}
return *v, true
}
// OldCountLatency returns the old "count_latency" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldCountLatency(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldCountLatency is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldCountLatency requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldCountLatency: %w", err)
}
return oldValue.CountLatency, nil
}
// AddCountLatency adds i to the "count_latency" field.
func (m *ChannelMonitorDailyRollupMutation) AddCountLatency(i int) {
if m.addcount_latency != nil {
*m.addcount_latency += i
} else {
m.addcount_latency = &i
}
}
// AddedCountLatency returns the value that was added to the "count_latency" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedCountLatency() (r int, exists bool) {
v := m.addcount_latency
if v == nil {
return
}
return *v, true
}
// ResetCountLatency resets all changes to the "count_latency" field.
func (m *ChannelMonitorDailyRollupMutation) ResetCountLatency() {
m.count_latency = nil
m.addcount_latency = nil
}
// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field.
func (m *ChannelMonitorDailyRollupMutation) SetSumPingLatencyMs(i int64) {
m.sum_ping_latency_ms = &i
m.addsum_ping_latency_ms = nil
}
// SumPingLatencyMs returns the value of the "sum_ping_latency_ms" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) SumPingLatencyMs() (r int64, exists bool) {
v := m.sum_ping_latency_ms
if v == nil {
return
}
return *v, true
}
// OldSumPingLatencyMs returns the old "sum_ping_latency_ms" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldSumPingLatencyMs(ctx context.Context) (v int64, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldSumPingLatencyMs is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldSumPingLatencyMs requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldSumPingLatencyMs: %w", err)
}
return oldValue.SumPingLatencyMs, nil
}
// AddSumPingLatencyMs adds i to the "sum_ping_latency_ms" field.
func (m *ChannelMonitorDailyRollupMutation) AddSumPingLatencyMs(i int64) {
if m.addsum_ping_latency_ms != nil {
*m.addsum_ping_latency_ms += i
} else {
m.addsum_ping_latency_ms = &i
}
}
// AddedSumPingLatencyMs returns the value that was added to the "sum_ping_latency_ms" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedSumPingLatencyMs() (r int64, exists bool) {
v := m.addsum_ping_latency_ms
if v == nil {
return
}
return *v, true
}
// ResetSumPingLatencyMs resets all changes to the "sum_ping_latency_ms" field.
func (m *ChannelMonitorDailyRollupMutation) ResetSumPingLatencyMs() {
m.sum_ping_latency_ms = nil
m.addsum_ping_latency_ms = nil
}
// SetCountPingLatency sets the "count_ping_latency" field.
func (m *ChannelMonitorDailyRollupMutation) SetCountPingLatency(i int) {
m.count_ping_latency = &i
m.addcount_ping_latency = nil
}
// CountPingLatency returns the value of the "count_ping_latency" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) CountPingLatency() (r int, exists bool) {
v := m.count_ping_latency
if v == nil {
return
}
return *v, true
}
// OldCountPingLatency returns the old "count_ping_latency" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldCountPingLatency(ctx context.Context) (v int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldCountPingLatency is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldCountPingLatency requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldCountPingLatency: %w", err)
}
return oldValue.CountPingLatency, nil
}
// AddCountPingLatency adds i to the "count_ping_latency" field.
func (m *ChannelMonitorDailyRollupMutation) AddCountPingLatency(i int) {
if m.addcount_ping_latency != nil {
*m.addcount_ping_latency += i
} else {
m.addcount_ping_latency = &i
}
}
// AddedCountPingLatency returns the value that was added to the "count_ping_latency" field in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedCountPingLatency() (r int, exists bool) {
v := m.addcount_ping_latency
if v == nil {
return
}
return *v, true
}
// ResetCountPingLatency resets all changes to the "count_ping_latency" field.
func (m *ChannelMonitorDailyRollupMutation) ResetCountPingLatency() {
m.count_ping_latency = nil
m.addcount_ping_latency = nil
}
// SetComputedAt sets the "computed_at" field.
func (m *ChannelMonitorDailyRollupMutation) SetComputedAt(t time.Time) {
m.computed_at = &t
}
// ComputedAt returns the value of the "computed_at" field in the mutation.
func (m *ChannelMonitorDailyRollupMutation) ComputedAt() (r time.Time, exists bool) {
v := m.computed_at
if v == nil {
return
}
return *v, true
}
// OldComputedAt returns the old "computed_at" field's value of the ChannelMonitorDailyRollup entity.
// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorDailyRollupMutation) OldComputedAt(ctx context.Context) (v time.Time, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldComputedAt is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldComputedAt requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldComputedAt: %w", err)
}
return oldValue.ComputedAt, nil
}
// ResetComputedAt resets all changes to the "computed_at" field.
func (m *ChannelMonitorDailyRollupMutation) ResetComputedAt() {
m.computed_at = nil
}
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
func (m *ChannelMonitorDailyRollupMutation) ClearMonitor() {
m.clearedmonitor = true
m.clearedFields[channelmonitordailyrollup.FieldMonitorID] = struct{}{}
}
// MonitorCleared reports if the "monitor" edge to the ChannelMonitor entity was cleared.
func (m *ChannelMonitorDailyRollupMutation) MonitorCleared() bool {
return m.clearedmonitor
}
// MonitorIDs returns the "monitor" edge IDs in the mutation.
// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
// MonitorID instead. It exists only for internal usage by the builders.
func (m *ChannelMonitorDailyRollupMutation) MonitorIDs() (ids []int64) {
if id := m.monitor; id != nil {
ids = append(ids, *id)
}
return
}
// ResetMonitor resets all changes to the "monitor" edge.
func (m *ChannelMonitorDailyRollupMutation) ResetMonitor() {
m.monitor = nil
m.clearedmonitor = false
}
// Where appends a list predicates to the ChannelMonitorDailyRollupMutation builder.
func (m *ChannelMonitorDailyRollupMutation) Where(ps ...predicate.ChannelMonitorDailyRollup) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the ChannelMonitorDailyRollupMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *ChannelMonitorDailyRollupMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.ChannelMonitorDailyRollup, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *ChannelMonitorDailyRollupMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *ChannelMonitorDailyRollupMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (ChannelMonitorDailyRollup).
func (m *ChannelMonitorDailyRollupMutation) Type() string {
return m.typ
}
// Fields returns all fields that were changed during this mutation. Note that in
// order to get all numeric fields that were incremented/decremented, call
// AddedFields().
func (m *ChannelMonitorDailyRollupMutation) Fields() []string {
fields := make([]string, 0, 15)
if m.deleted_at != nil {
fields = append(fields, channelmonitordailyrollup.FieldDeletedAt)
}
if m.monitor != nil {
fields = append(fields, channelmonitordailyrollup.FieldMonitorID)
}
if m.model != nil {
fields = append(fields, channelmonitordailyrollup.FieldModel)
}
if m.bucket_date != nil {
fields = append(fields, channelmonitordailyrollup.FieldBucketDate)
}
if m.total_checks != nil {
fields = append(fields, channelmonitordailyrollup.FieldTotalChecks)
}
if m.ok_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldOkCount)
}
if m.operational_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldOperationalCount)
}
if m.degraded_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldDegradedCount)
}
if m.failed_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldFailedCount)
}
if m.error_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldErrorCount)
}
if m.sum_latency_ms != nil {
fields = append(fields, channelmonitordailyrollup.FieldSumLatencyMs)
}
if m.count_latency != nil {
fields = append(fields, channelmonitordailyrollup.FieldCountLatency)
}
if m.sum_ping_latency_ms != nil {
fields = append(fields, channelmonitordailyrollup.FieldSumPingLatencyMs)
}
if m.count_ping_latency != nil {
fields = append(fields, channelmonitordailyrollup.FieldCountPingLatency)
}
if m.computed_at != nil {
fields = append(fields, channelmonitordailyrollup.FieldComputedAt)
}
return fields
}
// Field returns the value of a field with the given name. The second boolean
// return value indicates that this field was not set, or was not defined in the
// schema.
func (m *ChannelMonitorDailyRollupMutation) Field(name string) (ent.Value, bool) {
switch name {
case channelmonitordailyrollup.FieldDeletedAt:
return m.DeletedAt()
case channelmonitordailyrollup.FieldMonitorID:
return m.MonitorID()
case channelmonitordailyrollup.FieldModel:
return m.Model()
case channelmonitordailyrollup.FieldBucketDate:
return m.BucketDate()
case channelmonitordailyrollup.FieldTotalChecks:
return m.TotalChecks()
case channelmonitordailyrollup.FieldOkCount:
return m.OkCount()
case channelmonitordailyrollup.FieldOperationalCount:
return m.OperationalCount()
case channelmonitordailyrollup.FieldDegradedCount:
return m.DegradedCount()
case channelmonitordailyrollup.FieldFailedCount:
return m.FailedCount()
case channelmonitordailyrollup.FieldErrorCount:
return m.ErrorCount()
case channelmonitordailyrollup.FieldSumLatencyMs:
return m.SumLatencyMs()
case channelmonitordailyrollup.FieldCountLatency:
return m.CountLatency()
case channelmonitordailyrollup.FieldSumPingLatencyMs:
return m.SumPingLatencyMs()
case channelmonitordailyrollup.FieldCountPingLatency:
return m.CountPingLatency()
case channelmonitordailyrollup.FieldComputedAt:
return m.ComputedAt()
}
return nil, false
}
// OldField returns the old value of the field from the database. An error is
// returned if the mutation operation is not UpdateOne, or the query to the
// database failed.
func (m *ChannelMonitorDailyRollupMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
switch name {
case channelmonitordailyrollup.FieldDeletedAt:
return m.OldDeletedAt(ctx)
case channelmonitordailyrollup.FieldMonitorID:
return m.OldMonitorID(ctx)
case channelmonitordailyrollup.FieldModel:
return m.OldModel(ctx)
case channelmonitordailyrollup.FieldBucketDate:
return m.OldBucketDate(ctx)
case channelmonitordailyrollup.FieldTotalChecks:
return m.OldTotalChecks(ctx)
case channelmonitordailyrollup.FieldOkCount:
return m.OldOkCount(ctx)
case channelmonitordailyrollup.FieldOperationalCount:
return m.OldOperationalCount(ctx)
case channelmonitordailyrollup.FieldDegradedCount:
return m.OldDegradedCount(ctx)
case channelmonitordailyrollup.FieldFailedCount:
return m.OldFailedCount(ctx)
case channelmonitordailyrollup.FieldErrorCount:
return m.OldErrorCount(ctx)
case channelmonitordailyrollup.FieldSumLatencyMs:
return m.OldSumLatencyMs(ctx)
case channelmonitordailyrollup.FieldCountLatency:
return m.OldCountLatency(ctx)
case channelmonitordailyrollup.FieldSumPingLatencyMs:
return m.OldSumPingLatencyMs(ctx)
case channelmonitordailyrollup.FieldCountPingLatency:
return m.OldCountPingLatency(ctx)
case channelmonitordailyrollup.FieldComputedAt:
return m.OldComputedAt(ctx)
}
return nil, fmt.Errorf("unknown ChannelMonitorDailyRollup field %s", name)
}
// SetField sets the value of a field with the given name. It returns an error if
// the field is not defined in the schema, or if the type mismatched the field
// type.
func (m *ChannelMonitorDailyRollupMutation) SetField(name string, value ent.Value) error {
switch name {
case channelmonitordailyrollup.FieldDeletedAt:
v, ok := value.(time.Time)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetDeletedAt(v)
return nil
case channelmonitordailyrollup.FieldMonitorID:
v, ok := value.(int64)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetMonitorID(v)
return nil
case channelmonitordailyrollup.FieldModel:
v, ok := value.(string)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetModel(v)
return nil
case channelmonitordailyrollup.FieldBucketDate:
v, ok := value.(time.Time)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetBucketDate(v)
return nil
case channelmonitordailyrollup.FieldTotalChecks:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetTotalChecks(v)
return nil
case channelmonitordailyrollup.FieldOkCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetOkCount(v)
return nil
case channelmonitordailyrollup.FieldOperationalCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetOperationalCount(v)
return nil
case channelmonitordailyrollup.FieldDegradedCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetDegradedCount(v)
return nil
case channelmonitordailyrollup.FieldFailedCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetFailedCount(v)
return nil
case channelmonitordailyrollup.FieldErrorCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetErrorCount(v)
return nil
case channelmonitordailyrollup.FieldSumLatencyMs:
v, ok := value.(int64)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetSumLatencyMs(v)
return nil
case channelmonitordailyrollup.FieldCountLatency:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetCountLatency(v)
return nil
case channelmonitordailyrollup.FieldSumPingLatencyMs:
v, ok := value.(int64)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetSumPingLatencyMs(v)
return nil
case channelmonitordailyrollup.FieldCountPingLatency:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetCountPingLatency(v)
return nil
case channelmonitordailyrollup.FieldComputedAt:
v, ok := value.(time.Time)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetComputedAt(v)
return nil
}
return fmt.Errorf("unknown ChannelMonitorDailyRollup field %s", name)
}
// AddedFields returns all numeric fields that were incremented/decremented during
// this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedFields() []string {
var fields []string
if m.addtotal_checks != nil {
fields = append(fields, channelmonitordailyrollup.FieldTotalChecks)
}
if m.addok_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldOkCount)
}
if m.addoperational_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldOperationalCount)
}
if m.adddegraded_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldDegradedCount)
}
if m.addfailed_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldFailedCount)
}
if m.adderror_count != nil {
fields = append(fields, channelmonitordailyrollup.FieldErrorCount)
}
if m.addsum_latency_ms != nil {
fields = append(fields, channelmonitordailyrollup.FieldSumLatencyMs)
}
if m.addcount_latency != nil {
fields = append(fields, channelmonitordailyrollup.FieldCountLatency)
}
if m.addsum_ping_latency_ms != nil {
fields = append(fields, channelmonitordailyrollup.FieldSumPingLatencyMs)
}
if m.addcount_ping_latency != nil {
fields = append(fields, channelmonitordailyrollup.FieldCountPingLatency)
}
return fields
}
// AddedField returns the numeric value that was incremented/decremented on a field
// with the given name. The second boolean return value indicates that this field
// was not set, or was not defined in the schema.
func (m *ChannelMonitorDailyRollupMutation) AddedField(name string) (ent.Value, bool) {
switch name {
case channelmonitordailyrollup.FieldTotalChecks:
return m.AddedTotalChecks()
case channelmonitordailyrollup.FieldOkCount:
return m.AddedOkCount()
case channelmonitordailyrollup.FieldOperationalCount:
return m.AddedOperationalCount()
case channelmonitordailyrollup.FieldDegradedCount:
return m.AddedDegradedCount()
case channelmonitordailyrollup.FieldFailedCount:
return m.AddedFailedCount()
case channelmonitordailyrollup.FieldErrorCount:
return m.AddedErrorCount()
case channelmonitordailyrollup.FieldSumLatencyMs:
return m.AddedSumLatencyMs()
case channelmonitordailyrollup.FieldCountLatency:
return m.AddedCountLatency()
case channelmonitordailyrollup.FieldSumPingLatencyMs:
return m.AddedSumPingLatencyMs()
case channelmonitordailyrollup.FieldCountPingLatency:
return m.AddedCountPingLatency()
}
return nil, false
}
// AddField adds the value to the field with the given name. It returns an error if
// the field is not defined in the schema, or if the type mismatched the field
// type.
func (m *ChannelMonitorDailyRollupMutation) AddField(name string, value ent.Value) error {
switch name {
case channelmonitordailyrollup.FieldTotalChecks:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddTotalChecks(v)
return nil
case channelmonitordailyrollup.FieldOkCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddOkCount(v)
return nil
case channelmonitordailyrollup.FieldOperationalCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddOperationalCount(v)
return nil
case channelmonitordailyrollup.FieldDegradedCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddDegradedCount(v)
return nil
case channelmonitordailyrollup.FieldFailedCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddFailedCount(v)
return nil
case channelmonitordailyrollup.FieldErrorCount:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddErrorCount(v)
return nil
case channelmonitordailyrollup.FieldSumLatencyMs:
v, ok := value.(int64)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddSumLatencyMs(v)
return nil
case channelmonitordailyrollup.FieldCountLatency:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddCountLatency(v)
return nil
case channelmonitordailyrollup.FieldSumPingLatencyMs:
v, ok := value.(int64)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddSumPingLatencyMs(v)
return nil
case channelmonitordailyrollup.FieldCountPingLatency:
v, ok := value.(int)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.AddCountPingLatency(v)
return nil
}
return fmt.Errorf("unknown ChannelMonitorDailyRollup numeric field %s", name)
}
// ClearedFields returns all nullable fields that were cleared during this
// mutation.
func (m *ChannelMonitorDailyRollupMutation) ClearedFields() []string {
var fields []string
if m.FieldCleared(channelmonitordailyrollup.FieldDeletedAt) {
fields = append(fields, channelmonitordailyrollup.FieldDeletedAt)
}
return fields
}
// FieldCleared returns a boolean indicating if a field with the given name was
// cleared in this mutation.
func (m *ChannelMonitorDailyRollupMutation) FieldCleared(name string) bool {
_, ok := m.clearedFields[name]
return ok
}
// ClearField clears the value of the field with the given name. It returns an
// error if the field is not defined in the schema.
func (m *ChannelMonitorDailyRollupMutation) ClearField(name string) error {
switch name {
case channelmonitordailyrollup.FieldDeletedAt:
m.ClearDeletedAt()
return nil
}
return fmt.Errorf("unknown ChannelMonitorDailyRollup nullable field %s", name)
}
// ResetField resets all changes in the mutation for the field with the given name.
// It returns an error if the field is not defined in the schema.
func (m *ChannelMonitorDailyRollupMutation) ResetField(name string) error {
switch name {
case channelmonitordailyrollup.FieldDeletedAt:
m.ResetDeletedAt()
return nil
case channelmonitordailyrollup.FieldMonitorID:
m.ResetMonitorID()
return nil
case channelmonitordailyrollup.FieldModel:
m.ResetModel()
return nil
case channelmonitordailyrollup.FieldBucketDate:
m.ResetBucketDate()
return nil
case channelmonitordailyrollup.FieldTotalChecks:
m.ResetTotalChecks()
return nil
case channelmonitordailyrollup.FieldOkCount:
m.ResetOkCount()
return nil
case channelmonitordailyrollup.FieldOperationalCount:
m.ResetOperationalCount()
return nil
case channelmonitordailyrollup.FieldDegradedCount:
m.ResetDegradedCount()
return nil
case channelmonitordailyrollup.FieldFailedCount:
m.ResetFailedCount()
return nil
case channelmonitordailyrollup.FieldErrorCount:
m.ResetErrorCount()
return nil
case channelmonitordailyrollup.FieldSumLatencyMs:
m.ResetSumLatencyMs()
return nil
case channelmonitordailyrollup.FieldCountLatency:
m.ResetCountLatency()
return nil
case channelmonitordailyrollup.FieldSumPingLatencyMs:
m.ResetSumPingLatencyMs()
return nil
case channelmonitordailyrollup.FieldCountPingLatency:
m.ResetCountPingLatency()
return nil
case channelmonitordailyrollup.FieldComputedAt:
m.ResetComputedAt()
return nil
}
return fmt.Errorf("unknown ChannelMonitorDailyRollup field %s", name)
}
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedEdges() []string {
edges := make([]string, 0, 1)
if m.monitor != nil {
edges = append(edges, channelmonitordailyrollup.EdgeMonitor)
}
return edges
}
// AddedIDs returns all IDs (to other nodes) that were added for the given edge
// name in this mutation.
func (m *ChannelMonitorDailyRollupMutation) AddedIDs(name string) []ent.Value {
switch name {
case channelmonitordailyrollup.EdgeMonitor:
if id := m.monitor; id != nil {
return []ent.Value{*id}
}
}
return nil
}
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *ChannelMonitorDailyRollupMutation) RemovedEdges() []string {
edges := make([]string, 0, 1)
return edges
}
// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
// the given name in this mutation.
func (m *ChannelMonitorDailyRollupMutation) RemovedIDs(name string) []ent.Value {
return nil
}
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *ChannelMonitorDailyRollupMutation) ClearedEdges() []string {
edges := make([]string, 0, 1)
if m.clearedmonitor {
edges = append(edges, channelmonitordailyrollup.EdgeMonitor)
}
return edges
}
// EdgeCleared returns a boolean which indicates if the edge with the given name
// was cleared in this mutation.
func (m *ChannelMonitorDailyRollupMutation) EdgeCleared(name string) bool {
switch name {
case channelmonitordailyrollup.EdgeMonitor:
return m.clearedmonitor
}
return false
}
// ClearEdge clears the value of the edge with the given name. It returns an error
// if that edge is not defined in the schema.
func (m *ChannelMonitorDailyRollupMutation) ClearEdge(name string) error {
switch name {
case channelmonitordailyrollup.EdgeMonitor:
m.ClearMonitor()
return nil
}
return fmt.Errorf("unknown ChannelMonitorDailyRollup unique edge %s", name)
}
// ResetEdge resets all changes to the edge with the given name in this mutation.
// It returns an error if the edge is not defined in the schema.
func (m *ChannelMonitorDailyRollupMutation) ResetEdge(name string) error {
switch name {
case channelmonitordailyrollup.EdgeMonitor:
m.ResetMonitor()
return nil
}
return fmt.Errorf("unknown ChannelMonitorDailyRollup edge %s", name)
}
// ChannelMonitorHistoryMutation represents an operation that mutates the ChannelMonitorHistory nodes in the graph.
type ChannelMonitorHistoryMutation struct {
config
op Op
typ string
id *int64
deleted_at *time.Time
model *string
status *channelmonitorhistory.Status
latency_ms *int
addlatency_ms *int
ping_latency_ms *int
addping_latency_ms *int
message *string
checked_at *time.Time
clearedFields map[string]struct{}
monitor *int64
clearedmonitor bool
done bool
oldValue func(context.Context) (*ChannelMonitorHistory, error)
predicates []predicate.ChannelMonitorHistory
}
var _ ent.Mutation = (*ChannelMonitorHistoryMutation)(nil)
// channelmonitorhistoryOption allows management of the mutation configuration using functional options.
type channelmonitorhistoryOption func(*ChannelMonitorHistoryMutation)
// newChannelMonitorHistoryMutation creates new mutation for the ChannelMonitorHistory entity.
func newChannelMonitorHistoryMutation(c config, op Op, opts ...channelmonitorhistoryOption) *ChannelMonitorHistoryMutation {
m := &ChannelMonitorHistoryMutation{
config: c,
op: op,
typ: TypeChannelMonitorHistory,
clearedFields: make(map[string]struct{}),
}
for _, opt := range opts {
opt(m)
}
return m
}
// withChannelMonitorHistoryID sets the ID field of the mutation.
func withChannelMonitorHistoryID(id int64) channelmonitorhistoryOption {
return func(m *ChannelMonitorHistoryMutation) {
var (
err error
once sync.Once
value *ChannelMonitorHistory
)
m.oldValue = func(ctx context.Context) (*ChannelMonitorHistory, error) {
once.Do(func() {
if m.done {
err = errors.New("querying old values post mutation is not allowed")
} else {
value, err = m.Client().ChannelMonitorHistory.Get(ctx, id)
}
})
return value, err
}
m.id = &id
}
}
// withChannelMonitorHistory sets the old ChannelMonitorHistory of the mutation.
func withChannelMonitorHistory(node *ChannelMonitorHistory) channelmonitorhistoryOption {
return func(m *ChannelMonitorHistoryMutation) {
m.oldValue = func(context.Context) (*ChannelMonitorHistory, error) {
return node, nil
}
m.id = &node.ID
}
}
// Client returns a new `ent.Client` from the mutation. If the mutation was
// executed in a transaction (ent.Tx), a transactional client is returned.
func (m ChannelMonitorHistoryMutation) Client() *Client {
client := &Client{config: m.config}
client.init()
return client
}
// Tx returns an `ent.Tx` for mutations that were executed in transactions;
// it returns an error otherwise.
func (m ChannelMonitorHistoryMutation) Tx() (*Tx, error) {
if _, ok := m.driver.(*txDriver); !ok {
return nil, errors.New("ent: mutation is not running in a transaction")
}
tx := &Tx{config: m.config}
tx.init()
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *ChannelMonitorHistoryMutation) ID() (id int64, exists bool) {
if m.id == nil {
return
}
return *m.id, true
}
// IDs queries the database and returns the entity ids that match the mutation's predicate.
// That means, if the mutation is applied within a transaction with an isolation level such
// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
// or updated by the mutation.
func (m *ChannelMonitorHistoryMutation) IDs(ctx context.Context) ([]int64, error) {
switch {
case m.op.Is(OpUpdateOne | OpDeleteOne):
id, exists := m.ID()
if exists {
return []int64{id}, nil
}
fallthrough
case m.op.Is(OpUpdate | OpDelete):
return m.Client().ChannelMonitorHistory.Query().Where(m.predicates...).IDs(ctx)
default:
return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
}
}
// SetDeletedAt sets the "deleted_at" field.
func (m *ChannelMonitorHistoryMutation) SetDeletedAt(t time.Time) {
m.deleted_at = &t
}
// DeletedAt returns the value of the "deleted_at" field in the mutation.
func (m *ChannelMonitorHistoryMutation) DeletedAt() (r time.Time, exists bool) {
v := m.deleted_at
if v == nil {
return
}
return *v, true
}
// OldDeletedAt returns the old "deleted_at" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldDeletedAt requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err)
}
return oldValue.DeletedAt, nil
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (m *ChannelMonitorHistoryMutation) ClearDeletedAt() {
m.deleted_at = nil
m.clearedFields[channelmonitorhistory.FieldDeletedAt] = struct{}{}
}
// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation.
func (m *ChannelMonitorHistoryMutation) DeletedAtCleared() bool {
_, ok := m.clearedFields[channelmonitorhistory.FieldDeletedAt]
return ok
}
// ResetDeletedAt resets all changes to the "deleted_at" field.
func (m *ChannelMonitorHistoryMutation) ResetDeletedAt() {
m.deleted_at = nil
delete(m.clearedFields, channelmonitorhistory.FieldDeletedAt)
}
// SetMonitorID sets the "monitor_id" field.
func (m *ChannelMonitorHistoryMutation) SetMonitorID(i int64) {
m.monitor = &i
}
// MonitorID returns the value of the "monitor_id" field in the mutation.
func (m *ChannelMonitorHistoryMutation) MonitorID() (r int64, exists bool) {
v := m.monitor
if v == nil {
return
}
return *v, true
}
// OldMonitorID returns the old "monitor_id" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldMonitorID(ctx context.Context) (v int64, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldMonitorID is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldMonitorID requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldMonitorID: %w", err)
}
return oldValue.MonitorID, nil
}
// ResetMonitorID resets all changes to the "monitor_id" field.
func (m *ChannelMonitorHistoryMutation) ResetMonitorID() {
m.monitor = nil
}
// SetModel sets the "model" field.
func (m *ChannelMonitorHistoryMutation) SetModel(s string) {
m.model = &s
}
// Model returns the value of the "model" field in the mutation.
func (m *ChannelMonitorHistoryMutation) Model() (r string, exists bool) {
v := m.model
if v == nil {
return
}
return *v, true
}
// OldModel returns the old "model" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldModel(ctx context.Context) (v string, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldModel is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldModel requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldModel: %w", err)
}
return oldValue.Model, nil
}
// ResetModel resets all changes to the "model" field.
func (m *ChannelMonitorHistoryMutation) ResetModel() {
m.model = nil
}
// SetStatus sets the "status" field.
func (m *ChannelMonitorHistoryMutation) SetStatus(c channelmonitorhistory.Status) {
m.status = &c
}
// Status returns the value of the "status" field in the mutation.
func (m *ChannelMonitorHistoryMutation) Status() (r channelmonitorhistory.Status, exists bool) {
v := m.status
if v == nil {
return
}
return *v, true
}
// OldStatus returns the old "status" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldStatus(ctx context.Context) (v channelmonitorhistory.Status, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldStatus is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldStatus requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldStatus: %w", err)
}
return oldValue.Status, nil
}
// ResetStatus resets all changes to the "status" field.
func (m *ChannelMonitorHistoryMutation) ResetStatus() {
m.status = nil
}
// SetLatencyMs sets the "latency_ms" field.
func (m *ChannelMonitorHistoryMutation) SetLatencyMs(i int) {
m.latency_ms = &i
m.addlatency_ms = nil
}
// LatencyMs returns the value of the "latency_ms" field in the mutation.
func (m *ChannelMonitorHistoryMutation) LatencyMs() (r int, exists bool) {
v := m.latency_ms
if v == nil {
return
}
return *v, true
}
// OldLatencyMs returns the old "latency_ms" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldLatencyMs(ctx context.Context) (v *int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldLatencyMs is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldLatencyMs requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldLatencyMs: %w", err)
}
return oldValue.LatencyMs, nil
}
// AddLatencyMs adds i to the "latency_ms" field.
func (m *ChannelMonitorHistoryMutation) AddLatencyMs(i int) {
if m.addlatency_ms != nil {
*m.addlatency_ms += i
} else {
m.addlatency_ms = &i
}
}
// AddedLatencyMs returns the value that was added to the "latency_ms" field in this mutation.
func (m *ChannelMonitorHistoryMutation) AddedLatencyMs() (r int, exists bool) {
v := m.addlatency_ms
if v == nil {
return
}
return *v, true
}
// ClearLatencyMs clears the value of the "latency_ms" field.
func (m *ChannelMonitorHistoryMutation) ClearLatencyMs() {
m.latency_ms = nil
m.addlatency_ms = nil
m.clearedFields[channelmonitorhistory.FieldLatencyMs] = struct{}{}
}
// LatencyMsCleared returns if the "latency_ms" field was cleared in this mutation.
func (m *ChannelMonitorHistoryMutation) LatencyMsCleared() bool {
_, ok := m.clearedFields[channelmonitorhistory.FieldLatencyMs]
return ok
}
// ResetLatencyMs resets all changes to the "latency_ms" field.
func (m *ChannelMonitorHistoryMutation) ResetLatencyMs() {
m.latency_ms = nil
m.addlatency_ms = nil
delete(m.clearedFields, channelmonitorhistory.FieldLatencyMs)
}
// SetPingLatencyMs sets the "ping_latency_ms" field.
func (m *ChannelMonitorHistoryMutation) SetPingLatencyMs(i int) {
m.ping_latency_ms = &i
m.addping_latency_ms = nil
}
// PingLatencyMs returns the value of the "ping_latency_ms" field in the mutation.
func (m *ChannelMonitorHistoryMutation) PingLatencyMs() (r int, exists bool) {
v := m.ping_latency_ms
if v == nil {
return
}
return *v, true
}
// OldPingLatencyMs returns the old "ping_latency_ms" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldPingLatencyMs(ctx context.Context) (v *int, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldPingLatencyMs is only allowed on UpdateOne operations")
}
if m.id == nil || m.oldValue == nil {
return v, errors.New("OldPingLatencyMs requires an ID field in the mutation")
}
oldValue, err := m.oldValue(ctx)
if err != nil {
return v, fmt.Errorf("querying old value for OldPingLatencyMs: %w", err)
}
return oldValue.PingLatencyMs, nil
}
// AddPingLatencyMs adds i to the "ping_latency_ms" field.
func (m *ChannelMonitorHistoryMutation) AddPingLatencyMs(i int) {
if m.addping_latency_ms != nil {
*m.addping_latency_ms += i
} else {
m.addping_latency_ms = &i
}
}
// AddedPingLatencyMs returns the value that was added to the "ping_latency_ms" field in this mutation.
func (m *ChannelMonitorHistoryMutation) AddedPingLatencyMs() (r int, exists bool) {
v := m.addping_latency_ms
if v == nil {
return
}
return *v, true
}
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
func (m *ChannelMonitorHistoryMutation) ClearPingLatencyMs() {
m.ping_latency_ms = nil
m.addping_latency_ms = nil
m.clearedFields[channelmonitorhistory.FieldPingLatencyMs] = struct{}{}
}
// PingLatencyMsCleared returns if the "ping_latency_ms" field was cleared in this mutation.
func (m *ChannelMonitorHistoryMutation) PingLatencyMsCleared() bool {
_, ok := m.clearedFields[channelmonitorhistory.FieldPingLatencyMs]
return ok
}
// ResetPingLatencyMs resets all changes to the "ping_latency_ms" field.
func (m *ChannelMonitorHistoryMutation) ResetPingLatencyMs() {
m.ping_latency_ms = nil
m.addping_latency_ms = nil
delete(m.clearedFields, channelmonitorhistory.FieldPingLatencyMs)
}
// SetMessage sets the "message" field.
func (m *ChannelMonitorHistoryMutation) SetMessage(s string) {
m.message = &s
}
// Message returns the value of the "message" field in the mutation.
func (m *ChannelMonitorHistoryMutation) Message() (r string, exists bool) {
v := m.message
if v == nil {
return
}
return *v, true
}
// OldMessage returns the old "message" field's value of the ChannelMonitorHistory entity.
// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
func (m *ChannelMonitorHistoryMutation) OldMessage(ctx context.Context) (v string, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldMessage is only allowed on UpdateOne operations")
} }
if m.id == nil || m.oldValue == nil { if m.id == nil || m.oldValue == nil {
return v, errors.New("OldMessage requires an ID field in the mutation") return v, errors.New("OldMessage requires an ID field in the mutation")
...@@ -10445,7 +12071,10 @@ func (m *ChannelMonitorHistoryMutation) Type() string { ...@@ -10445,7 +12071,10 @@ func (m *ChannelMonitorHistoryMutation) Type() string {
// order to get all numeric fields that were incremented/decremented, call // order to get all numeric fields that were incremented/decremented, call
// AddedFields(). // AddedFields().
func (m *ChannelMonitorHistoryMutation) Fields() []string { func (m *ChannelMonitorHistoryMutation) Fields() []string {
fields := make([]string, 0, 7) fields := make([]string, 0, 8)
if m.deleted_at != nil {
fields = append(fields, channelmonitorhistory.FieldDeletedAt)
}
if m.monitor != nil { if m.monitor != nil {
fields = append(fields, channelmonitorhistory.FieldMonitorID) fields = append(fields, channelmonitorhistory.FieldMonitorID)
} }
...@@ -10475,6 +12104,8 @@ func (m *ChannelMonitorHistoryMutation) Fields() []string { ...@@ -10475,6 +12104,8 @@ func (m *ChannelMonitorHistoryMutation) Fields() []string {
// schema. // schema.
func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) { func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) {
switch name { switch name {
case channelmonitorhistory.FieldDeletedAt:
return m.DeletedAt()
case channelmonitorhistory.FieldMonitorID: case channelmonitorhistory.FieldMonitorID:
return m.MonitorID() return m.MonitorID()
case channelmonitorhistory.FieldModel: case channelmonitorhistory.FieldModel:
...@@ -10498,6 +12129,8 @@ func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) { ...@@ -10498,6 +12129,8 @@ func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) {
// database failed. // database failed.
func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name string) (ent.Value, error) { func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
switch name { switch name {
case channelmonitorhistory.FieldDeletedAt:
return m.OldDeletedAt(ctx)
case channelmonitorhistory.FieldMonitorID: case channelmonitorhistory.FieldMonitorID:
return m.OldMonitorID(ctx) return m.OldMonitorID(ctx)
case channelmonitorhistory.FieldModel: case channelmonitorhistory.FieldModel:
...@@ -10521,6 +12154,13 @@ func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name strin ...@@ -10521,6 +12154,13 @@ func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name strin
// type. // type.
func (m *ChannelMonitorHistoryMutation) SetField(name string, value ent.Value) error { func (m *ChannelMonitorHistoryMutation) SetField(name string, value ent.Value) error {
switch name { switch name {
case channelmonitorhistory.FieldDeletedAt:
v, ok := value.(time.Time)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
m.SetDeletedAt(v)
return nil
case channelmonitorhistory.FieldMonitorID: case channelmonitorhistory.FieldMonitorID:
v, ok := value.(int64) v, ok := value.(int64)
if !ok { if !ok {
...@@ -10627,6 +12267,9 @@ func (m *ChannelMonitorHistoryMutation) AddField(name string, value ent.Value) e ...@@ -10627,6 +12267,9 @@ func (m *ChannelMonitorHistoryMutation) AddField(name string, value ent.Value) e
// mutation. // mutation.
func (m *ChannelMonitorHistoryMutation) ClearedFields() []string { func (m *ChannelMonitorHistoryMutation) ClearedFields() []string {
var fields []string var fields []string
if m.FieldCleared(channelmonitorhistory.FieldDeletedAt) {
fields = append(fields, channelmonitorhistory.FieldDeletedAt)
}
if m.FieldCleared(channelmonitorhistory.FieldLatencyMs) { if m.FieldCleared(channelmonitorhistory.FieldLatencyMs) {
fields = append(fields, channelmonitorhistory.FieldLatencyMs) fields = append(fields, channelmonitorhistory.FieldLatencyMs)
} }
...@@ -10650,6 +12293,9 @@ func (m *ChannelMonitorHistoryMutation) FieldCleared(name string) bool { ...@@ -10650,6 +12293,9 @@ func (m *ChannelMonitorHistoryMutation) FieldCleared(name string) bool {
// error if the field is not defined in the schema. // error if the field is not defined in the schema.
func (m *ChannelMonitorHistoryMutation) ClearField(name string) error { func (m *ChannelMonitorHistoryMutation) ClearField(name string) error {
switch name { switch name {
case channelmonitorhistory.FieldDeletedAt:
m.ClearDeletedAt()
return nil
case channelmonitorhistory.FieldLatencyMs: case channelmonitorhistory.FieldLatencyMs:
m.ClearLatencyMs() m.ClearLatencyMs()
return nil return nil
...@@ -10667,6 +12313,9 @@ func (m *ChannelMonitorHistoryMutation) ClearField(name string) error { ...@@ -10667,6 +12313,9 @@ func (m *ChannelMonitorHistoryMutation) ClearField(name string) error {
// It returns an error if the field is not defined in the schema. // It returns an error if the field is not defined in the schema.
func (m *ChannelMonitorHistoryMutation) ResetField(name string) error { func (m *ChannelMonitorHistoryMutation) ResetField(name string) error {
switch name { switch name {
case channelmonitorhistory.FieldDeletedAt:
m.ResetDeletedAt()
return nil
case channelmonitorhistory.FieldMonitorID: case channelmonitorhistory.FieldMonitorID:
m.ResetMonitorID() m.ResetMonitorID()
return nil return nil
...@@ -30,6 +30,9 @@ type AuthIdentityChannel func(*sql.Selector) ...@@ -30,6 +30,9 @@ type AuthIdentityChannel func(*sql.Selector)
// ChannelMonitor is the predicate function for channelmonitor builders. // ChannelMonitor is the predicate function for channelmonitor builders.
type ChannelMonitor func(*sql.Selector) type ChannelMonitor func(*sql.Selector)
// ChannelMonitorDailyRollup is the predicate function for channelmonitordailyrollup builders.
type ChannelMonitorDailyRollup func(*sql.Selector)
// ChannelMonitorHistory is the predicate function for channelmonitorhistory builders. // ChannelMonitorHistory is the predicate function for channelmonitorhistory builders.
type ChannelMonitorHistory func(*sql.Selector) type ChannelMonitorHistory func(*sql.Selector)
......
...@@ -13,6 +13,7 @@ import ( ...@@ -13,6 +13,7 @@ import (
"github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentity"
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
"github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/group"
...@@ -520,6 +521,82 @@ func init() { ...@@ -520,6 +521,82 @@ func init() {
channelmonitorDescIntervalSeconds := channelmonitorFields[8].Descriptor() channelmonitorDescIntervalSeconds := channelmonitorFields[8].Descriptor()
// channelmonitor.IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save. // channelmonitor.IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save.
channelmonitor.IntervalSecondsValidator = channelmonitorDescIntervalSeconds.Validators[0].(func(int) error) channelmonitor.IntervalSecondsValidator = channelmonitorDescIntervalSeconds.Validators[0].(func(int) error)
channelmonitordailyrollupMixin := schema.ChannelMonitorDailyRollup{}.Mixin()
channelmonitordailyrollupMixinHooks0 := channelmonitordailyrollupMixin[0].Hooks()
channelmonitordailyrollup.Hooks[0] = channelmonitordailyrollupMixinHooks0[0]
channelmonitordailyrollupMixinInters0 := channelmonitordailyrollupMixin[0].Interceptors()
channelmonitordailyrollup.Interceptors[0] = channelmonitordailyrollupMixinInters0[0]
channelmonitordailyrollupFields := schema.ChannelMonitorDailyRollup{}.Fields()
_ = channelmonitordailyrollupFields
// channelmonitordailyrollupDescModel is the schema descriptor for model field.
channelmonitordailyrollupDescModel := channelmonitordailyrollupFields[1].Descriptor()
// channelmonitordailyrollup.ModelValidator is a validator for the "model" field. It is called by the builders before save.
channelmonitordailyrollup.ModelValidator = func() func(string) error {
validators := channelmonitordailyrollupDescModel.Validators
fns := [...]func(string) error{
validators[0].(func(string) error),
validators[1].(func(string) error),
}
return func(model string) error {
for _, fn := range fns {
if err := fn(model); err != nil {
return err
}
}
return nil
}
}()
// channelmonitordailyrollupDescTotalChecks is the schema descriptor for total_checks field.
channelmonitordailyrollupDescTotalChecks := channelmonitordailyrollupFields[3].Descriptor()
// channelmonitordailyrollup.DefaultTotalChecks holds the default value on creation for the total_checks field.
channelmonitordailyrollup.DefaultTotalChecks = channelmonitordailyrollupDescTotalChecks.Default.(int)
// channelmonitordailyrollupDescOkCount is the schema descriptor for ok_count field.
channelmonitordailyrollupDescOkCount := channelmonitordailyrollupFields[4].Descriptor()
// channelmonitordailyrollup.DefaultOkCount holds the default value on creation for the ok_count field.
channelmonitordailyrollup.DefaultOkCount = channelmonitordailyrollupDescOkCount.Default.(int)
// channelmonitordailyrollupDescOperationalCount is the schema descriptor for operational_count field.
channelmonitordailyrollupDescOperationalCount := channelmonitordailyrollupFields[5].Descriptor()
// channelmonitordailyrollup.DefaultOperationalCount holds the default value on creation for the operational_count field.
channelmonitordailyrollup.DefaultOperationalCount = channelmonitordailyrollupDescOperationalCount.Default.(int)
// channelmonitordailyrollupDescDegradedCount is the schema descriptor for degraded_count field.
channelmonitordailyrollupDescDegradedCount := channelmonitordailyrollupFields[6].Descriptor()
// channelmonitordailyrollup.DefaultDegradedCount holds the default value on creation for the degraded_count field.
channelmonitordailyrollup.DefaultDegradedCount = channelmonitordailyrollupDescDegradedCount.Default.(int)
// channelmonitordailyrollupDescFailedCount is the schema descriptor for failed_count field.
channelmonitordailyrollupDescFailedCount := channelmonitordailyrollupFields[7].Descriptor()
// channelmonitordailyrollup.DefaultFailedCount holds the default value on creation for the failed_count field.
channelmonitordailyrollup.DefaultFailedCount = channelmonitordailyrollupDescFailedCount.Default.(int)
// channelmonitordailyrollupDescErrorCount is the schema descriptor for error_count field.
channelmonitordailyrollupDescErrorCount := channelmonitordailyrollupFields[8].Descriptor()
// channelmonitordailyrollup.DefaultErrorCount holds the default value on creation for the error_count field.
channelmonitordailyrollup.DefaultErrorCount = channelmonitordailyrollupDescErrorCount.Default.(int)
// channelmonitordailyrollupDescSumLatencyMs is the schema descriptor for sum_latency_ms field.
channelmonitordailyrollupDescSumLatencyMs := channelmonitordailyrollupFields[9].Descriptor()
// channelmonitordailyrollup.DefaultSumLatencyMs holds the default value on creation for the sum_latency_ms field.
channelmonitordailyrollup.DefaultSumLatencyMs = channelmonitordailyrollupDescSumLatencyMs.Default.(int64)
// channelmonitordailyrollupDescCountLatency is the schema descriptor for count_latency field.
channelmonitordailyrollupDescCountLatency := channelmonitordailyrollupFields[10].Descriptor()
// channelmonitordailyrollup.DefaultCountLatency holds the default value on creation for the count_latency field.
channelmonitordailyrollup.DefaultCountLatency = channelmonitordailyrollupDescCountLatency.Default.(int)
// channelmonitordailyrollupDescSumPingLatencyMs is the schema descriptor for sum_ping_latency_ms field.
channelmonitordailyrollupDescSumPingLatencyMs := channelmonitordailyrollupFields[11].Descriptor()
// channelmonitordailyrollup.DefaultSumPingLatencyMs holds the default value on creation for the sum_ping_latency_ms field.
channelmonitordailyrollup.DefaultSumPingLatencyMs = channelmonitordailyrollupDescSumPingLatencyMs.Default.(int64)
// channelmonitordailyrollupDescCountPingLatency is the schema descriptor for count_ping_latency field.
channelmonitordailyrollupDescCountPingLatency := channelmonitordailyrollupFields[12].Descriptor()
// channelmonitordailyrollup.DefaultCountPingLatency holds the default value on creation for the count_ping_latency field.
channelmonitordailyrollup.DefaultCountPingLatency = channelmonitordailyrollupDescCountPingLatency.Default.(int)
// channelmonitordailyrollupDescComputedAt is the schema descriptor for computed_at field.
channelmonitordailyrollupDescComputedAt := channelmonitordailyrollupFields[13].Descriptor()
// channelmonitordailyrollup.DefaultComputedAt holds the default value on creation for the computed_at field.
channelmonitordailyrollup.DefaultComputedAt = channelmonitordailyrollupDescComputedAt.Default.(func() time.Time)
// channelmonitordailyrollup.UpdateDefaultComputedAt holds the default value on update for the computed_at field.
channelmonitordailyrollup.UpdateDefaultComputedAt = channelmonitordailyrollupDescComputedAt.UpdateDefault.(func() time.Time)
channelmonitorhistoryMixin := schema.ChannelMonitorHistory{}.Mixin()
channelmonitorhistoryMixinHooks0 := channelmonitorhistoryMixin[0].Hooks()
channelmonitorhistory.Hooks[0] = channelmonitorhistoryMixinHooks0[0]
channelmonitorhistoryMixinInters0 := channelmonitorhistoryMixin[0].Interceptors()
channelmonitorhistory.Interceptors[0] = channelmonitorhistoryMixinInters0[0]
channelmonitorhistoryFields := schema.ChannelMonitorHistory{}.Fields() channelmonitorhistoryFields := schema.ChannelMonitorHistory{}.Fields()
_ = channelmonitorhistoryFields _ = channelmonitorhistoryFields
// channelmonitorhistoryDescModel is the schema descriptor for model field. // channelmonitorhistoryDescModel is the schema descriptor for model field.
......
...@@ -69,6 +69,8 @@ func (ChannelMonitor) Edges() []ent.Edge { ...@@ -69,6 +69,8 @@ func (ChannelMonitor) Edges() []ent.Edge {
return []ent.Edge{ return []ent.Edge{
edge.To("history", ChannelMonitorHistory.Type). edge.To("history", ChannelMonitorHistory.Type).
Annotations(entsql.OnDelete(entsql.Cascade)), Annotations(entsql.OnDelete(entsql.Cascade)),
edge.To("daily_rollups", ChannelMonitorDailyRollup.Type).
Annotations(entsql.OnDelete(entsql.Cascade)),
} }
} }
......
package schema
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/entsql"
"entgo.io/ent/schema"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
)
// ChannelMonitorDailyRollup 按 (monitor_id, model, bucket_date) 维度聚合的渠道监控日统计。
// 每天的明细被收敛为一行(保留 status 分布 + 延迟和),用于 7d/15d/30d 窗口的可用率
// 加权计算(avg_latency = sum_latency_ms / count_latency;availability = ok_count / total_checks)。
type ChannelMonitorDailyRollup struct {
ent.Schema
}
func (ChannelMonitorDailyRollup) Annotations() []schema.Annotation {
return []schema.Annotation{
entsql.Annotation{Table: "channel_monitor_daily_rollups"},
}
}
func (ChannelMonitorDailyRollup) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.SoftDeleteMixin{},
}
}
func (ChannelMonitorDailyRollup) Fields() []ent.Field {
return []ent.Field{
field.Int64("monitor_id"),
field.String("model").
NotEmpty().
MaxLen(200),
field.Time("bucket_date").
SchemaType(map[string]string{dialect.Postgres: "date"}),
field.Int("total_checks").Default(0),
field.Int("ok_count").Default(0),
field.Int("operational_count").Default(0),
field.Int("degraded_count").Default(0),
field.Int("failed_count").Default(0),
field.Int("error_count").Default(0),
field.Int64("sum_latency_ms").Default(0),
field.Int("count_latency").Default(0),
field.Int64("sum_ping_latency_ms").Default(0),
field.Int("count_ping_latency").Default(0),
field.Time("computed_at").Default(time.Now).UpdateDefault(time.Now),
}
}
func (ChannelMonitorDailyRollup) Edges() []ent.Edge {
return []ent.Edge{
edge.From("monitor", ChannelMonitor.Type).
Ref("daily_rollups").
Field("monitor_id").
Unique().
Required(),
}
}
func (ChannelMonitorDailyRollup) Indexes() []ent.Index {
return []ent.Index{
index.Fields("monitor_id", "model", "bucket_date").Unique(),
index.Fields("bucket_date"),
}
}
...@@ -9,10 +9,13 @@ import ( ...@@ -9,10 +9,13 @@ import (
"entgo.io/ent/schema/edge" "entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"entgo.io/ent/schema/index" "entgo.io/ent/schema/index"
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
) )
// ChannelMonitorHistory holds the schema definition for the ChannelMonitorHistory entity. // ChannelMonitorHistory holds the schema definition for the ChannelMonitorHistory entity.
// 渠道监控历史:每次检测每个模型一行记录,由调度器写入,定期清理 30 天前的旧数据。 // 渠道监控历史:每次检测每个模型一行记录。明细只保留 1 天,超过 1 天的数据被聚合到
// channel_monitor_daily_rollups 后软删(deleted_at),由后续懒清理任务物理移除。
type ChannelMonitorHistory struct { type ChannelMonitorHistory struct {
ent.Schema ent.Schema
} }
...@@ -23,6 +26,12 @@ func (ChannelMonitorHistory) Annotations() []schema.Annotation { ...@@ -23,6 +26,12 @@ func (ChannelMonitorHistory) Annotations() []schema.Annotation {
} }
} }
func (ChannelMonitorHistory) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.SoftDeleteMixin{},
}
}
func (ChannelMonitorHistory) Fields() []ent.Field { func (ChannelMonitorHistory) Fields() []ent.Field {
return []ent.Field{ return []ent.Field{
field.Int64("monitor_id"), field.Int64("monitor_id"),
......
...@@ -30,6 +30,8 @@ type Tx struct { ...@@ -30,6 +30,8 @@ type Tx struct {
AuthIdentityChannel *AuthIdentityChannelClient AuthIdentityChannel *AuthIdentityChannelClient
// ChannelMonitor is the client for interacting with the ChannelMonitor builders. // ChannelMonitor is the client for interacting with the ChannelMonitor builders.
ChannelMonitor *ChannelMonitorClient ChannelMonitor *ChannelMonitorClient
// ChannelMonitorDailyRollup is the client for interacting with the ChannelMonitorDailyRollup builders.
ChannelMonitorDailyRollup *ChannelMonitorDailyRollupClient
// ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders. // ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders.
ChannelMonitorHistory *ChannelMonitorHistoryClient ChannelMonitorHistory *ChannelMonitorHistoryClient
// ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders. // ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders.
...@@ -217,6 +219,7 @@ func (tx *Tx) init() { ...@@ -217,6 +219,7 @@ func (tx *Tx) init() {
tx.AuthIdentity = NewAuthIdentityClient(tx.config) tx.AuthIdentity = NewAuthIdentityClient(tx.config)
tx.AuthIdentityChannel = NewAuthIdentityChannelClient(tx.config) tx.AuthIdentityChannel = NewAuthIdentityChannelClient(tx.config)
tx.ChannelMonitor = NewChannelMonitorClient(tx.config) tx.ChannelMonitor = NewChannelMonitorClient(tx.config)
tx.ChannelMonitorDailyRollup = NewChannelMonitorDailyRollupClient(tx.config)
tx.ChannelMonitorHistory = NewChannelMonitorHistoryClient(tx.config) tx.ChannelMonitorHistory = NewChannelMonitorHistoryClient(tx.config)
tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config) tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config)
tx.Group = NewGroupClient(tx.config) tx.Group = NewGroupClient(tx.config)
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
dbent "github.com/Wei-Shaw/sub2api/ent" dbent "github.com/Wei-Shaw/sub2api/ent"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/internal/service" "github.com/Wei-Shaw/sub2api/internal/service"
"github.com/lib/pq" "github.com/lib/pq"
...@@ -246,6 +247,7 @@ func (r *channelMonitorRepository) ListLatestPerModel(ctx context.Context, monit ...@@ -246,6 +247,7 @@ func (r *channelMonitorRepository) ListLatestPerModel(ctx context.Context, monit
model, status, latency_ms, ping_latency_ms, checked_at model, status, latency_ms, ping_latency_ms, checked_at
FROM channel_monitor_histories FROM channel_monitor_histories
WHERE monitor_id = $1 WHERE monitor_id = $1
AND deleted_at IS NULL
ORDER BY model, checked_at DESC ORDER BY model, checked_at DESC
` `
rows, err := r.db.QueryContext(ctx, q, monitorID) rows, err := r.db.QueryContext(ctx, q, monitorID)
...@@ -280,23 +282,48 @@ func assignNullInt(dst **int, n sql.NullInt64) { ...@@ -280,23 +282,48 @@ func assignNullInt(dst **int, n sql.NullInt64) {
// ComputeAvailability 计算指定窗口内每个模型的可用率与平均延迟。 // ComputeAvailability 计算指定窗口内每个模型的可用率与平均延迟。
// "可用" = status IN (operational, degraded)。 // "可用" = status IN (operational, degraded)。
//
// 数据来源:明细表只保留 1 天;窗口前其余天数走聚合表。
// - raw = 今天(CURRENT_DATE 起)的未软删明细,按 model 累加
// - rollup = [CURRENT_DATE - windowDays, CURRENT_DATE) 区间的聚合行
//
// 总窗口为 "今天 + 过去 windowDays 天",比 windowDays 字面值大 1 天,但因为聚合
// 是按整 UTC 日切的,这是聚合化无法避免的精度损失,且偏宽不偏窄(数据更全)。
func (r *channelMonitorRepository) ComputeAvailability(ctx context.Context, monitorID int64, windowDays int) ([]*service.ChannelMonitorAvailability, error) { func (r *channelMonitorRepository) ComputeAvailability(ctx context.Context, monitorID int64, windowDays int) ([]*service.ChannelMonitorAvailability, error) {
if windowDays <= 0 { if windowDays <= 0 {
windowDays = 7 windowDays = 7
} }
const q = ` const q = `
SELECT WITH raw AS (
model, SELECT model,
COUNT(*) AS total_checks, COUNT(*) AS total_checks,
COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_checks, COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count,
AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL) AS avg_latency_ms COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms,
FROM channel_monitor_histories COUNT(latency_ms) AS count_latency
WHERE monitor_id = $1 FROM channel_monitor_histories
AND checked_at >= $2 WHERE monitor_id = $1
AND deleted_at IS NULL
AND checked_at >= CURRENT_DATE
GROUP BY model
),
rollup AS (
SELECT model, total_checks, ok_count, sum_latency_ms, count_latency
FROM channel_monitor_daily_rollups
WHERE monitor_id = $1
AND deleted_at IS NULL
AND bucket_date >= (CURRENT_DATE - $2::int)
AND bucket_date < CURRENT_DATE
)
SELECT model,
SUM(total_checks) AS total,
SUM(ok_count) AS ok,
CASE WHEN SUM(count_latency) > 0
THEN SUM(sum_latency_ms)::float8 / SUM(count_latency)
ELSE NULL END AS avg_latency_ms
FROM (SELECT * FROM raw UNION ALL SELECT * FROM rollup) combined
GROUP BY model GROUP BY model
` `
from := time.Now().AddDate(0, 0, -windowDays) rows, err := r.db.QueryContext(ctx, q, monitorID, windowDays)
rows, err := r.db.QueryContext(ctx, q, monitorID, from)
if err != nil { if err != nil {
return nil, fmt.Errorf("query availability: %w", err) return nil, fmt.Errorf("query availability: %w", err)
} }
...@@ -349,6 +376,7 @@ func (r *channelMonitorRepository) ListLatestForMonitorIDs(ctx context.Context, ...@@ -349,6 +376,7 @@ func (r *channelMonitorRepository) ListLatestForMonitorIDs(ctx context.Context,
monitor_id, model, status, latency_ms, ping_latency_ms, checked_at monitor_id, model, status, latency_ms, ping_latency_ms, checked_at
FROM channel_monitor_histories FROM channel_monitor_histories
WHERE monitor_id = ANY($1) WHERE monitor_id = ANY($1)
AND deleted_at IS NULL
ORDER BY monitor_id, model, checked_at DESC ORDER BY monitor_id, model, checked_at DESC
` `
rows, err := r.db.QueryContext(ctx, q, pq.Array(ids)) rows, err := r.db.QueryContext(ctx, q, pq.Array(ids))
...@@ -409,6 +437,7 @@ func (r *channelMonitorRepository) ListRecentHistoryForMonitors( ...@@ -409,6 +437,7 @@ func (r *channelMonitorRepository) ListRecentHistoryForMonitors(
FROM channel_monitor_histories h FROM channel_monitor_histories h
JOIN targets t JOIN targets t
ON t.monitor_id = h.monitor_id AND t.model = h.model ON t.monitor_id = h.monitor_id AND t.model = h.model
WHERE h.deleted_at IS NULL
) )
SELECT monitor_id, status, latency_ms, ping_latency_ms, checked_at SELECT monitor_id, status, latency_ms, ping_latency_ms, checked_at
FROM ranked FROM ranked
...@@ -476,6 +505,7 @@ func clampTimelineLimit(n int) int { ...@@ -476,6 +505,7 @@ func clampTimelineLimit(n int) int {
} }
// ComputeAvailabilityForMonitors 一次性计算多个监控在某个窗口内的每模型可用率与平均延迟。 // ComputeAvailabilityForMonitors 一次性计算多个监控在某个窗口内的每模型可用率与平均延迟。
// 与单 monitor 版本同构:明细只覆盖今天,更早走聚合表 UNION 合并。
func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Context, ids []int64, windowDays int) (map[int64][]*service.ChannelMonitorAvailability, error) { func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Context, ids []int64, windowDays int) (map[int64][]*service.ChannelMonitorAvailability, error) {
out := make(map[int64][]*service.ChannelMonitorAvailability, len(ids)) out := make(map[int64][]*service.ChannelMonitorAvailability, len(ids))
if len(ids) == 0 { if len(ids) == 0 {
...@@ -485,19 +515,38 @@ func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Co ...@@ -485,19 +515,38 @@ func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Co
windowDays = 7 windowDays = 7
} }
const q = ` const q = `
SELECT WITH raw AS (
monitor_id, SELECT monitor_id,
model, model,
COUNT(*) AS total_checks, COUNT(*) AS total_checks,
COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_checks, COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count,
AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL) AS avg_latency_ms COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms,
FROM channel_monitor_histories COUNT(latency_ms) AS count_latency
WHERE monitor_id = ANY($1) FROM channel_monitor_histories
AND checked_at >= $2 WHERE monitor_id = ANY($1)
AND deleted_at IS NULL
AND checked_at >= CURRENT_DATE
GROUP BY monitor_id, model
),
rollup AS (
SELECT monitor_id, model, total_checks, ok_count, sum_latency_ms, count_latency
FROM channel_monitor_daily_rollups
WHERE monitor_id = ANY($1)
AND deleted_at IS NULL
AND bucket_date >= (CURRENT_DATE - $2::int)
AND bucket_date < CURRENT_DATE
)
SELECT monitor_id,
model,
SUM(total_checks) AS total,
SUM(ok_count) AS ok,
CASE WHEN SUM(count_latency) > 0
THEN SUM(sum_latency_ms)::float8 / SUM(count_latency)
ELSE NULL END AS avg_latency_ms
FROM (SELECT * FROM raw UNION ALL SELECT * FROM rollup) combined
GROUP BY monitor_id, model GROUP BY monitor_id, model
` `
from := time.Now().AddDate(0, 0, -windowDays) rows, err := r.db.QueryContext(ctx, q, pq.Array(ids), windowDays)
rows, err := r.db.QueryContext(ctx, q, pq.Array(ids), from)
if err != nil { if err != nil {
return nil, fmt.Errorf("query availability batch: %w", err) return nil, fmt.Errorf("query availability batch: %w", err)
} }
...@@ -521,6 +570,116 @@ func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Co ...@@ -521,6 +570,116 @@ func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Co
return out, nil return out, nil
} }
// ---------- 聚合维护 ----------
// UpsertDailyRollupsFor 把 targetDate 当天([targetDate, targetDate+1d))未软删的明细
// 按 (monitor_id, model, bucket_date) 聚合写入 channel_monitor_daily_rollups。
// - 用 ON CONFLICT (monitor_id, model, bucket_date) DO UPDATE 实现幂等回填,
// 重复执行只会用最新统计覆盖;
// - 同时把 deleted_at 重置为 NULL,避免历史误删后聚合行被持续过滤掉;
// - $1::date 让 PG 自动把入参 truncate 到 UTC 日期,调用方不需要预处理 targetDate。
func (r *channelMonitorRepository) UpsertDailyRollupsFor(ctx context.Context, targetDate time.Time) (int64, error) {
const q = `
INSERT INTO channel_monitor_daily_rollups (
monitor_id, model, bucket_date,
total_checks, ok_count,
operational_count, degraded_count, failed_count, error_count,
sum_latency_ms, count_latency,
sum_ping_latency_ms, count_ping_latency,
computed_at
)
SELECT
monitor_id,
model,
$1::date AS bucket_date,
COUNT(*) AS total_checks,
COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count,
COUNT(*) FILTER (WHERE status = 'operational') AS operational_count,
COUNT(*) FILTER (WHERE status = 'degraded') AS degraded_count,
COUNT(*) FILTER (WHERE status = 'failed') AS failed_count,
COUNT(*) FILTER (WHERE status = 'error') AS error_count,
COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms,
COUNT(latency_ms) AS count_latency,
COALESCE(SUM(ping_latency_ms) FILTER (WHERE ping_latency_ms IS NOT NULL), 0) AS sum_ping_latency_ms,
COUNT(ping_latency_ms) AS count_ping_latency,
NOW()
FROM channel_monitor_histories
WHERE deleted_at IS NULL
AND checked_at >= $1::date
AND checked_at < ($1::date + INTERVAL '1 day')
GROUP BY monitor_id, model
ON CONFLICT (monitor_id, model, bucket_date) DO UPDATE SET
total_checks = EXCLUDED.total_checks,
ok_count = EXCLUDED.ok_count,
operational_count = EXCLUDED.operational_count,
degraded_count = EXCLUDED.degraded_count,
failed_count = EXCLUDED.failed_count,
error_count = EXCLUDED.error_count,
sum_latency_ms = EXCLUDED.sum_latency_ms,
count_latency = EXCLUDED.count_latency,
sum_ping_latency_ms = EXCLUDED.sum_ping_latency_ms,
count_ping_latency = EXCLUDED.count_ping_latency,
computed_at = NOW(),
deleted_at = NULL
`
res, err := r.db.ExecContext(ctx, q, targetDate)
if err != nil {
return 0, fmt.Errorf("upsert daily rollups for %s: %w", targetDate.Format("2006-01-02"), err)
}
n, err := res.RowsAffected()
if err != nil {
return 0, fmt.Errorf("rows affected (upsert rollups): %w", err)
}
return n, nil
}
// DeleteRollupsBefore 软删 bucket_date < beforeDate 的聚合行。
// 走 ent client,利用 SoftDeleteMixin 把 DELETE 自动改写为 UPDATE deleted_at = NOW()。
func (r *channelMonitorRepository) DeleteRollupsBefore(ctx context.Context, beforeDate time.Time) (int64, error) {
client := clientFromContext(ctx, r.client)
n, err := client.ChannelMonitorDailyRollup.Delete().
Where(channelmonitordailyrollup.BucketDateLT(beforeDate)).
Exec(ctx)
if err != nil {
return 0, fmt.Errorf("delete rollups before: %w", err)
}
return int64(n), nil
}
// LoadAggregationWatermark 读 watermark 表(id=1)。
// watermark 表不是 ent schema(只有一行),直接走原生 SQL。
// - 行不存在或 last_aggregated_date IS NULL:返回 (nil, nil),由调用方决定首次回填策略
func (r *channelMonitorRepository) LoadAggregationWatermark(ctx context.Context) (*time.Time, error) {
const q = `SELECT last_aggregated_date FROM channel_monitor_aggregation_watermark WHERE id = 1`
var t sql.NullTime
if err := r.db.QueryRowContext(ctx, q).Scan(&t); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, fmt.Errorf("load aggregation watermark: %w", err)
}
if !t.Valid {
return nil, nil
}
return &t.Time, nil
}
// UpdateAggregationWatermark 更新 watermark(UPSERT 到 id=1)。
// $1::date 让 PG 把入参 truncate 到 UTC 日期,与 last_aggregated_date 列的 DATE 类型一致。
func (r *channelMonitorRepository) UpdateAggregationWatermark(ctx context.Context, date time.Time) error {
const q = `
INSERT INTO channel_monitor_aggregation_watermark (id, last_aggregated_date, updated_at)
VALUES (1, $1::date, NOW())
ON CONFLICT (id) DO UPDATE SET
last_aggregated_date = EXCLUDED.last_aggregated_date,
updated_at = NOW()
`
if _, err := r.db.ExecContext(ctx, q, date); err != nil {
return fmt.Errorf("update aggregation watermark: %w", err)
}
return nil
}
// ---------- helpers ---------- // ---------- helpers ----------
func entToServiceMonitor(row *dbent.ChannelMonitor) *service.ChannelMonitor { func entToServiceMonitor(row *dbent.ChannelMonitor) *service.ChannelMonitor {
......
...@@ -15,8 +15,16 @@ const ( ...@@ -15,8 +15,16 @@ const (
monitorPingTimeout = 8 * time.Second monitorPingTimeout = 8 * time.Second
// monitorDegradedThreshold 主请求成功但耗时超过该阈值视为 degraded。 // monitorDegradedThreshold 主请求成功但耗时超过该阈值视为 degraded。
monitorDegradedThreshold = 6 * time.Second monitorDegradedThreshold = 6 * time.Second
// monitorHistoryRetentionDays 历史保留天数(每天清理一次)。 // monitorHistoryRetentionDays 明细历史保留天数。
monitorHistoryRetentionDays = 30 // 明细只保留 1 天,超出由 SoftDeleteMixin 软删;
// 维护任务每天凌晨跑(由 OpsCleanupService 统一调度)。
monitorHistoryRetentionDays = 1
// monitorRollupRetentionDays 日聚合保留天数。
// 日聚合行由 RunDailyMaintenance 在超过该窗口后软删。
monitorRollupRetentionDays = 30
// monitorMaintenanceMaxDaysPerRun 单次维护任务最多聚合的天数。
// 用于限制首次上线回填(30 天)+ 少量余量,避免长事务。
monitorMaintenanceMaxDaysPerRun = 35
// monitorWorkerConcurrency 调度器并发执行的监控数(pond 池容量)。 // monitorWorkerConcurrency 调度器并发执行的监控数(pond 池容量)。
monitorWorkerConcurrency = 5 monitorWorkerConcurrency = 5
// monitorTickerInterval 调度器扫描"到期监控"的间隔。 // monitorTickerInterval 调度器扫描"到期监控"的间隔。
...@@ -55,11 +63,6 @@ const ( ...@@ -55,11 +63,6 @@ const (
monitorAvailability15Days = 15 monitorAvailability15Days = 15
monitorAvailability30Days = 30 monitorAvailability30Days = 30
// monitorCleanupCheckInterval 历史清理调度器的检查频率(每小时检查"是否到 03:00")。
monitorCleanupCheckInterval = time.Hour
// monitorCleanupHour 凌晨 3 点执行历史清理。
monitorCleanupHour = 3
// MonitorHistoryDefaultLimit 历史查询默认返回条数(handler 层共享)。 // MonitorHistoryDefaultLimit 历史查询默认返回条数(handler 层共享)。
MonitorHistoryDefaultLimit = 100 MonitorHistoryDefaultLimit = 100
// MonitorHistoryMaxLimit 历史查询最大返回条数(handler 层共享)。 // MonitorHistoryMaxLimit 历史查询最大返回条数(handler 层共享)。
...@@ -82,10 +85,6 @@ const ( ...@@ -82,10 +85,6 @@ const (
monitorListDueTimeout = 10 * time.Second monitorListDueTimeout = 10 * time.Second
// monitorRunOneBuffer runOne 的总超时缓冲(除请求超时与 ping 超时外的额外裕量)。 // monitorRunOneBuffer runOne 的总超时缓冲(除请求超时与 ping 超时外的额外裕量)。
monitorRunOneBuffer = 10 * time.Second monitorRunOneBuffer = 10 * time.Second
// monitorCleanupTimeout 历史清理任务的总超时。
monitorCleanupTimeout = 30 * time.Second
// monitorCleanupDayLayout 历史清理用于"今日是否已跑过"判定的日期格式。
monitorCleanupDayLayout = "2006-01-02"
// monitorIdleConnTimeout HTTP transport 空闲连接关闭超时。 // monitorIdleConnTimeout HTTP transport 空闲连接关闭超时。
monitorIdleConnTimeout = 30 * time.Second monitorIdleConnTimeout = 30 * time.Second
......
...@@ -14,10 +14,10 @@ import ( ...@@ -14,10 +14,10 @@ import (
// 职责: // 职责:
// - 每 monitorTickerInterval 扫描一次"到期需要检测"的监控 // - 每 monitorTickerInterval 扫描一次"到期需要检测"的监控
// - 通过 pond 池(容量 monitorWorkerConcurrency)异步执行检测 // - 通过 pond 池(容量 monitorWorkerConcurrency)异步执行检测
// - 每小时检查一次时钟,到 monitorCleanupHour 点时执行历史清理
// - Stop 时优雅关闭:池 drain + ticker.Stop + wg.Wait // - Stop 时优雅关闭:池 drain + ticker.Stop + wg.Wait
// //
// 不引入 cron 库;清理调度通过"每小时检查时间"实现,足够 MVP。 // 历史清理与日聚合维护不再由 runner 负责,由 OpsCleanupService 的统一 cron
// 在凌晨触发 ChannelMonitorService.RunDailyMaintenance(复用 leader lock + heartbeat)。
// //
// 定时任务维护:删除/创建/编辑 monitor 无需显式 reload,每个 tick 都会重新查 DB // 定时任务维护:删除/创建/编辑 monitor 无需显式 reload,每个 tick 都会重新查 DB
// (ListEnabled + listDueForCheck),新 monitor 的 LastCheckedAt 为 nil 天然立即到期, // (ListEnabled + listDueForCheck),新 monitor 的 LastCheckedAt 为 nil 天然立即到期,
...@@ -35,10 +35,6 @@ type ChannelMonitorRunner struct { ...@@ -35,10 +35,6 @@ type ChannelMonitorRunner struct {
// 防止单次检测耗时 > interval 时同一 monitor 被并发执行。 // 防止单次检测耗时 > interval 时同一 monitor 被并发执行。
inFlight map[int64]struct{} inFlight map[int64]struct{}
inFlightMu sync.Mutex inFlightMu sync.Mutex
// 清理状态:lastCleanupDay 记录上次清理的"年-月-日",避免同一天重复跑。
lastCleanupDay string
cleanupMu sync.Mutex
} }
// NewChannelMonitorRunner 构造调度器。Start 在 wire 中调用。 // NewChannelMonitorRunner 构造调度器。Start 在 wire 中调用。
...@@ -52,7 +48,7 @@ func NewChannelMonitorRunner(svc *ChannelMonitorService, settingService *Setting ...@@ -52,7 +48,7 @@ func NewChannelMonitorRunner(svc *ChannelMonitorService, settingService *Setting
} }
} }
// Start 启动 ticker + worker pool + cleanup loop // Start 启动 ticker + worker pool。
// 调用方需保证只调一次(wire ProvideChannelMonitorRunner 内只调一次)。 // 调用方需保证只调一次(wire ProvideChannelMonitorRunner 内只调一次)。
func (r *ChannelMonitorRunner) Start() { func (r *ChannelMonitorRunner) Start() {
if r == nil || r.svc == nil { if r == nil || r.svc == nil {
...@@ -61,12 +57,11 @@ func (r *ChannelMonitorRunner) Start() { ...@@ -61,12 +57,11 @@ func (r *ChannelMonitorRunner) Start() {
// 容量 5 的 pond 池:超出时调用方等待,避免调度堆积无限增长。 // 容量 5 的 pond 池:超出时调用方等待,避免调度堆积无限增长。
r.pool = pond.NewPool(monitorWorkerConcurrency) r.pool = pond.NewPool(monitorWorkerConcurrency)
r.wg.Add(2) r.wg.Add(1)
go r.dueCheckLoop() go r.dueCheckLoop()
go r.cleanupLoop()
} }
// Stop 优雅停止:close stopCh -> 等待两个 loop 退出 -> 池 drain。 // Stop 优雅停止:close stopCh -> 等待 loop 退出 -> 池 drain。
func (r *ChannelMonitorRunner) Stop() { func (r *ChannelMonitorRunner) Stop() {
if r == nil { if r == nil {
return return
...@@ -176,45 +171,3 @@ func (r *ChannelMonitorRunner) runOne(id int64, name string) { ...@@ -176,45 +171,3 @@ func (r *ChannelMonitorRunner) runOne(id int64, name string) {
"monitor_id", id, "name", name, "error", err) "monitor_id", id, "name", name, "error", err)
} }
} }
// cleanupLoop 每小时检查当前时间,到 monitorCleanupHour 点(且当天还没清理过)则跑一次清理。
// 启动时立即检查一次,避免长时间运行才跑首次清理。
func (r *ChannelMonitorRunner) cleanupLoop() {
defer r.wg.Done()
ticker := time.NewTicker(monitorCleanupCheckInterval)
defer ticker.Stop()
r.maybeRunCleanup()
for {
select {
case <-r.stopCh:
return
case <-ticker.C:
r.maybeRunCleanup()
}
}
}
// maybeRunCleanup 如果当前小时是 monitorCleanupHour 且当天未跑过,则执行清理。
func (r *ChannelMonitorRunner) maybeRunCleanup() {
now := time.Now()
if now.Hour() != monitorCleanupHour {
return
}
day := now.Format(monitorCleanupDayLayout)
r.cleanupMu.Lock()
if r.lastCleanupDay == day {
r.cleanupMu.Unlock()
return
}
r.lastCleanupDay = day
r.cleanupMu.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), monitorCleanupTimeout)
defer cancel()
if err := r.svc.cleanupOldHistory(ctx); err != nil {
slog.Warn("channel_monitor: cleanup history failed", "error", err)
}
}
...@@ -41,6 +41,20 @@ type ChannelMonitorRepository interface { ...@@ -41,6 +41,20 @@ type ChannelMonitorRepository interface {
// ListRecentHistoryForMonitors 批量取多个 monitor 各自主模型(primaryModels[monitorID])最近 perMonitorLimit 条历史。 // ListRecentHistoryForMonitors 批量取多个 monitor 各自主模型(primaryModels[monitorID])最近 perMonitorLimit 条历史。
// 返回的 entry 已按 checked_at DESC 排序(最新在前),不含 message 字段。 // 返回的 entry 已按 checked_at DESC 排序(最新在前),不含 message 字段。
ListRecentHistoryForMonitors(ctx context.Context, ids []int64, primaryModels map[int64]string, perMonitorLimit int) (map[int64][]*ChannelMonitorHistoryEntry, error) ListRecentHistoryForMonitors(ctx context.Context, ids []int64, primaryModels map[int64]string, perMonitorLimit int) (map[int64][]*ChannelMonitorHistoryEntry, error)
// ---------- 聚合维护(OpsCleanupService 调用) ----------
// UpsertDailyRollupsFor 把 targetDate 当天的明细按 (monitor_id, model, bucket_date)
// 聚合到 channel_monitor_daily_rollups。targetDate 会被截断到日期;
// 用 ON CONFLICT DO UPDATE 实现幂等回填,返回 upsert 影响的行数。
UpsertDailyRollupsFor(ctx context.Context, targetDate time.Time) (int64, error)
// DeleteRollupsBefore 软删 bucket_date < beforeDate 的聚合行,返回删除行数。
DeleteRollupsBefore(ctx context.Context, beforeDate time.Time) (int64, error)
// LoadAggregationWatermark 读 watermark(id=1)。
// 返回 nil 表示从未聚合过;watermark 表本身预期已存在单行(migration 110 写入)。
LoadAggregationWatermark(ctx context.Context) (*time.Time, error)
// UpdateAggregationWatermark 写 watermark(UPSERT 到 id=1)。
UpdateAggregationWatermark(ctx context.Context, date time.Time) error
} }
// ChannelMonitorService 渠道监控管理服务。 // ChannelMonitorService 渠道监控管理服务。
...@@ -300,9 +314,10 @@ func (s *ChannelMonitorService) listDueForCheck(ctx context.Context) ([]*Channel ...@@ -300,9 +314,10 @@ func (s *ChannelMonitorService) listDueForCheck(ctx context.Context) ([]*Channel
return due, nil return due, nil
} }
// cleanupOldHistory 删除 monitorHistoryRetentionDays 天之前的历史记录。 // cleanupOldHistory 删除 monitorHistoryRetentionDays 天之前的明细历史记录。
// 由 RunDailyMaintenance 调用;SoftDeleteMixin 自动把 DELETE 改为 UPDATE deleted_at。
func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error { func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error {
before := time.Now().AddDate(0, 0, -monitorHistoryRetentionDays) before := time.Now().UTC().AddDate(0, 0, -monitorHistoryRetentionDays)
deleted, err := s.repo.DeleteHistoryBefore(ctx, before) deleted, err := s.repo.DeleteHistoryBefore(ctx, before)
if err != nil { if err != nil {
return fmt.Errorf("delete history before %s: %w", before.Format(time.RFC3339), err) return fmt.Errorf("delete history before %s: %w", before.Format(time.RFC3339), err)
...@@ -314,6 +329,94 @@ func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error { ...@@ -314,6 +329,94 @@ func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error {
return nil return nil
} }
// RunDailyMaintenance 每日维护任务:聚合昨天之前未聚合的明细,软删过期明细和聚合。
// 由 OpsCleanupService 的 cron 调度触发(共享 schedule 和 leader lock)。
//
// 幂等性:
// - watermark 保证已聚合的日期不会重复处理;
// - UpsertDailyRollupsFor 内部使用 ON CONFLICT DO UPDATE,同一日重复跑结果一致。
//
// 每一步失败都只记 slog.Warn,整体函数始终返回 nil 让后续步骤能继续跑
// (与 OpsCleanupService.runCleanupOnce 风格一致)。
func (s *ChannelMonitorService) RunDailyMaintenance(ctx context.Context) error {
now := time.Now().UTC()
today := now.Truncate(24 * time.Hour)
if err := s.runDailyAggregation(ctx, today); err != nil {
slog.Warn("channel_monitor: maintenance step failed",
"step", "aggregate", "error", err)
}
if err := s.cleanupOldHistory(ctx); err != nil {
slog.Warn("channel_monitor: maintenance step failed",
"step", "prune_history", "error", err)
}
if err := s.cleanupOldRollups(ctx, today); err != nil {
slog.Warn("channel_monitor: maintenance step failed",
"step", "prune_rollups", "error", err)
}
return nil
}
// runDailyAggregation 从 watermark+1 聚合到昨天(UTC)。
// 首次跑(watermark nil):从 today-monitorRollupRetentionDays 开始回填。
// 每次最多聚合 monitorMaintenanceMaxDaysPerRun 天,避免长事务。
func (s *ChannelMonitorService) runDailyAggregation(ctx context.Context, today time.Time) error {
watermark, err := s.repo.LoadAggregationWatermark(ctx)
if err != nil {
return fmt.Errorf("load watermark: %w", err)
}
start := s.resolveAggregationStart(watermark, today)
if !start.Before(today) {
return nil // 没有需要聚合的日期
}
iterations := 0
for d := start; d.Before(today); d = d.Add(24 * time.Hour) {
if iterations >= monitorMaintenanceMaxDaysPerRun {
slog.Info("channel_monitor: maintenance aggregation capped",
"max_days", monitorMaintenanceMaxDaysPerRun,
"next_resume", d.Format("2006-01-02"))
break
}
affected, upErr := s.repo.UpsertDailyRollupsFor(ctx, d)
if upErr != nil {
return fmt.Errorf("upsert rollups for %s: %w", d.Format("2006-01-02"), upErr)
}
if err := s.repo.UpdateAggregationWatermark(ctx, d); err != nil {
return fmt.Errorf("update watermark to %s: %w", d.Format("2006-01-02"), err)
}
slog.Info("channel_monitor: rollups upserted",
"date", d.Format("2006-01-02"), "affected_rows", affected)
iterations++
}
return nil
}
// resolveAggregationStart 计算本次聚合起点:
// - watermark == nil:today - monitorRollupRetentionDays(首次回填最多 30 天)
// - watermark != nil:*watermark + 1 day
func (s *ChannelMonitorService) resolveAggregationStart(watermark *time.Time, today time.Time) time.Time {
if watermark == nil {
return today.AddDate(0, 0, -monitorRollupRetentionDays)
}
return watermark.UTC().Truncate(24 * time.Hour).Add(24 * time.Hour)
}
// cleanupOldRollups 软删 bucket_date < today - monitorRollupRetentionDays 的日聚合行。
func (s *ChannelMonitorService) cleanupOldRollups(ctx context.Context, today time.Time) error {
cutoff := today.AddDate(0, 0, -monitorRollupRetentionDays)
deleted, err := s.repo.DeleteRollupsBefore(ctx, cutoff)
if err != nil {
return fmt.Errorf("delete rollups before %s: %w", cutoff.Format("2006-01-02"), err)
}
if deleted > 0 {
slog.Info("channel_monitor: rollups cleanup",
"deleted_rows", deleted, "before", cutoff.Format("2006-01-02"))
}
return nil
}
// ---------- helpers ---------- // ---------- helpers ----------
// decryptInPlace 把 ChannelMonitor.APIKey 从密文解密为明文。 // decryptInPlace 把 ChannelMonitor.APIKey 从密文解密为明文。
......
...@@ -36,11 +36,15 @@ return 0 ...@@ -36,11 +36,15 @@ return 0
// - Scheduling: 5-field cron spec (minute hour dom month dow). // - Scheduling: 5-field cron spec (minute hour dom month dow).
// - Multi-instance: best-effort Redis leader lock so only one node runs cleanup. // - Multi-instance: best-effort Redis leader lock so only one node runs cleanup.
// - Safety: deletes in batches to avoid long transactions. // - Safety: deletes in batches to avoid long transactions.
//
// 附带:在 runCleanupOnce 末尾调用 ChannelMonitorService.RunDailyMaintenance,
// 统一共享 cron schedule + leader lock + heartbeat,避免再引一套调度。
type OpsCleanupService struct { type OpsCleanupService struct {
opsRepo OpsRepository opsRepo OpsRepository
db *sql.DB db *sql.DB
redisClient *redis.Client redisClient *redis.Client
cfg *config.Config cfg *config.Config
channelMonitorSvc *ChannelMonitorService
instanceID string instanceID string
...@@ -57,13 +61,15 @@ func NewOpsCleanupService( ...@@ -57,13 +61,15 @@ func NewOpsCleanupService(
db *sql.DB, db *sql.DB,
redisClient *redis.Client, redisClient *redis.Client,
cfg *config.Config, cfg *config.Config,
channelMonitorSvc *ChannelMonitorService,
) *OpsCleanupService { ) *OpsCleanupService {
return &OpsCleanupService{ return &OpsCleanupService{
opsRepo: opsRepo, opsRepo: opsRepo,
db: db, db: db,
redisClient: redisClient, redisClient: redisClient,
cfg: cfg, cfg: cfg,
instanceID: uuid.NewString(), channelMonitorSvc: channelMonitorSvc,
instanceID: uuid.NewString(),
} }
} }
...@@ -248,6 +254,15 @@ func (s *OpsCleanupService) runCleanupOnce(ctx context.Context) (opsCleanupDelet ...@@ -248,6 +254,15 @@ func (s *OpsCleanupService) runCleanupOnce(ctx context.Context) (opsCleanupDelet
out.dailyPreagg = n out.dailyPreagg = n
} }
// Channel monitor 每日维护(聚合昨日明细 + 软删过期明细/聚合)。
// 失败只记日志,不影响 ops 清理的成功状态(与 ops 各步骤风格一致);
// 维护本身已经把每步错误打到 slog,heartbeat result 不再分项记录。
if s.channelMonitorSvc != nil {
if err := s.channelMonitorSvc.RunDailyMaintenance(ctx); err != nil {
logger.LegacyPrintf("service.ops_cleanup", "[OpsCleanup] channel monitor maintenance failed: %v", err)
}
}
return out, nil return out, nil
} }
......
...@@ -262,13 +262,16 @@ func ProvideOpsAlertEvaluatorService( ...@@ -262,13 +262,16 @@ func ProvideOpsAlertEvaluatorService(
} }
// ProvideOpsCleanupService creates and starts OpsCleanupService (cron scheduled). // ProvideOpsCleanupService creates and starts OpsCleanupService (cron scheduled).
// channelMonitorSvc 让维护任务(聚合 + 历史/聚合软删)跟随 ops 清理 cron 一起跑,
// 共享 leader lock + heartbeat。
func ProvideOpsCleanupService( func ProvideOpsCleanupService(
opsRepo OpsRepository, opsRepo OpsRepository,
db *sql.DB, db *sql.DB,
redisClient *redis.Client, redisClient *redis.Client,
cfg *config.Config, cfg *config.Config,
channelMonitorSvc *ChannelMonitorService,
) *OpsCleanupService { ) *OpsCleanupService {
svc := NewOpsCleanupService(opsRepo, db, redisClient, cfg) svc := NewOpsCleanupService(opsRepo, db, redisClient, cfg, channelMonitorSvc)
svc.Start() svc.Start()
return svc return svc
} }
......
-- Migration: 126_add_channel_monitor_aggregation
-- 渠道监控日聚合:把 channel_monitor_histories 的明细按天聚合,明细只保留 1 天,
-- 聚合保留 30 天。明细和聚合表都用软删除(deleted_at),由 ops cleanup 任务每天
-- 凌晨随运维监控清理一起跑(共享 cron)。
--
-- 设计要点:
-- - channel_monitor_histories 加 deleted_at 软删除字段(SoftDeleteMixin 全局
-- Hook 会把 DELETE 自动改写成 UPDATE deleted_at = NOW())。
-- - channel_monitor_daily_rollups 按 (monitor_id, model, bucket_date) 唯一,
-- 用 ON CONFLICT DO UPDATE 实现幂等回填,状态分布和延迟分子分母都保留,
-- 方便后续按窗口任意求加权可用率和均值。
-- - watermark 表只有一行(id=1),记录最近一次聚合到达的日期,避免重启后重复
-- 扫全表。
-- - rollup 上 (bucket_date) 索引服务清理任务的 DELETE WHERE bucket_date < cutoff。
-- 1) 给历史明细表加软删除字段
ALTER TABLE channel_monitor_histories
ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMPTZ;
CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_deleted_at
ON channel_monitor_histories (deleted_at);
-- 2) 创建日聚合表
CREATE TABLE IF NOT EXISTS channel_monitor_daily_rollups (
id BIGSERIAL PRIMARY KEY,
monitor_id BIGINT NOT NULL REFERENCES channel_monitors(id) ON DELETE CASCADE,
model VARCHAR(200) NOT NULL,
bucket_date DATE NOT NULL,
total_checks INT NOT NULL DEFAULT 0,
ok_count INT NOT NULL DEFAULT 0,
operational_count INT NOT NULL DEFAULT 0,
degraded_count INT NOT NULL DEFAULT 0,
failed_count INT NOT NULL DEFAULT 0,
error_count INT NOT NULL DEFAULT 0,
sum_latency_ms BIGINT NOT NULL DEFAULT 0,
count_latency INT NOT NULL DEFAULT 0,
sum_ping_latency_ms BIGINT NOT NULL DEFAULT 0,
count_ping_latency INT NOT NULL DEFAULT 0,
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
deleted_at TIMESTAMPTZ
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_unique
ON channel_monitor_daily_rollups (monitor_id, model, bucket_date);
CREATE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_bucket
ON channel_monitor_daily_rollups (bucket_date);
CREATE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_deleted_at
ON channel_monitor_daily_rollups (deleted_at);
-- 3) 创建 watermark 表(单行:id=1)
CREATE TABLE IF NOT EXISTS channel_monitor_aggregation_watermark (
id INT PRIMARY KEY DEFAULT 1,
last_aggregated_date DATE,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT channel_monitor_aggregation_watermark_singleton CHECK (id = 1)
);
INSERT INTO channel_monitor_aggregation_watermark (id, last_aggregated_date, updated_at)
VALUES (1, NULL, NOW())
ON CONFLICT (id) DO NOTHING;
...@@ -113,6 +113,7 @@ ...@@ -113,6 +113,7 @@
:loading="myKeysLoading" :loading="myKeysLoading"
:keys="myActiveKeys" :keys="myActiveKeys"
:provider="form.provider" :provider="form.provider"
:user-group-rates="userGroupRates"
@close="showKeyPicker = false" @close="showKeyPicker = false"
@pick="pickMyKey" @pick="pickMyKey"
/> />
...@@ -125,6 +126,7 @@ import { useAppStore } from '@/stores/app' ...@@ -125,6 +126,7 @@ import { useAppStore } from '@/stores/app'
import { extractApiErrorMessage } from '@/utils/apiError' import { extractApiErrorMessage } from '@/utils/apiError'
import { adminAPI } from '@/api/admin' import { adminAPI } from '@/api/admin'
import { keysAPI } from '@/api/keys' import { keysAPI } from '@/api/keys'
import { userGroupsAPI } from '@/api/groups'
import type { import type {
ChannelMonitor, ChannelMonitor,
CreateParams, CreateParams,
...@@ -175,6 +177,7 @@ const submitting = ref(false) ...@@ -175,6 +177,7 @@ const submitting = ref(false)
const showKeyPicker = ref(false) const showKeyPicker = ref(false)
const myKeysLoading = ref(false) const myKeysLoading = ref(false)
const myActiveKeys = ref<ApiKey[]>([]) const myActiveKeys = ref<ApiKey[]>([])
const userGroupRates = ref<Record<number, number>>({})
interface MonitorForm { interface MonitorForm {
name: string name: string
...@@ -263,7 +266,10 @@ async function openMyKeyPicker() { ...@@ -263,7 +266,10 @@ async function openMyKeyPicker() {
if (myActiveKeys.value.length > 0) return if (myActiveKeys.value.length > 0) return
myKeysLoading.value = true myKeysLoading.value = true
try { try {
const res = await keysAPI.list(1, 100, { status: 'active' }) const [res, rates] = await Promise.all([
keysAPI.list(1, 100, { status: 'active' }),
userGroupsAPI.getUserGroupRates(),
])
const items = res.items || [] const items = res.items || []
const now = Date.now() const now = Date.now()
myActiveKeys.value = items.filter(k => { myActiveKeys.value = items.filter(k => {
...@@ -271,6 +277,7 @@ async function openMyKeyPicker() { ...@@ -271,6 +277,7 @@ async function openMyKeyPicker() {
if (!k.expires_at) return true if (!k.expires_at) return true
return new Date(k.expires_at).getTime() > now return new Date(k.expires_at).getTime() > now
}) })
userGroupRates.value = rates
} catch (err: unknown) { } catch (err: unknown) {
appStore.showError(extractApiErrorMessage(err, t('admin.channelMonitor.form.noActiveKey'))) appStore.showError(extractApiErrorMessage(err, t('admin.channelMonitor.form.noActiveKey')))
} finally { } finally {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment