Commit 8cf83c98 authored by erio's avatar erio
Browse files

feat(channel-monitor): aggregate history to daily rollups + soft delete

明细只保留 1 天,超过 1 天聚合到新表 channel_monitor_daily_rollups(按
monitor_id/model/bucket_date 维度),聚合保留 30 天。两张表都用 SoftDeleteMixin
软删除(DELETE 自动改为 UPDATE deleted_at = NOW())。

聚合 + 清理任务由 OpsCleanupService 的 cron 统一调度,与运维监控的清理共享
schedule(默认 0 2 * * *)和 leader lock。ChannelMonitorRunner 的 cleanupLoop
被移除,只保留 dueCheckLoop。

读取路径 ComputeAvailability* 改为 UNION 明细(今天 deleted_at IS NULL)+
聚合(过去 windowDays 天 deleted_at IS NULL),SUM(ok)/SUM(total) 自然加权
计算可用率,AVG latency 用 SUM(sum_latency_ms)/SUM(count_latency)。

watermark 表 channel_monitor_aggregation_watermark 单行(id=1),记录
last_aggregated_date,重启后从该日期 +1 继续聚合,首次为 nil 则从
today - 30d 开始回填,单次最多 35 天上限避免长事务。

raw SQL 的 ListLatestPerModel / ListLatestForMonitorIDs / ListRecentHistoryForMonitors
都补上 deleted_at IS NULL 过滤(SoftDeleteMixin interceptor 只对 ent query 生效)。

bump version to 0.1.114.28

GroupBadge 在 MonitorKeyPickerDialog 中复用平台主题色 + 倍率/专属倍率
(顺手优化)。
parent ba98243c
...@@ -252,7 +252,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { ...@@ -252,7 +252,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig) opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig)
opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig) opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig)
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig) opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig, channelMonitorService)
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oAuthRefreshAPI) tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oAuthRefreshAPI)
accountExpiryService := service.ProvideAccountExpiryService(accountRepository) accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
......
...@@ -54,9 +54,11 @@ type ChannelMonitor struct { ...@@ -54,9 +54,11 @@ type ChannelMonitor struct {
type ChannelMonitorEdges struct { type ChannelMonitorEdges struct {
// History holds the value of the history edge. // History holds the value of the history edge.
History []*ChannelMonitorHistory `json:"history,omitempty"` History []*ChannelMonitorHistory `json:"history,omitempty"`
// DailyRollups holds the value of the daily_rollups edge.
DailyRollups []*ChannelMonitorDailyRollup `json:"daily_rollups,omitempty"`
// loadedTypes holds the information for reporting if a // loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not. // type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool loadedTypes [2]bool
} }
// HistoryOrErr returns the History value or an error if the edge // HistoryOrErr returns the History value or an error if the edge
...@@ -68,6 +70,15 @@ func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) { ...@@ -68,6 +70,15 @@ func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) {
return nil, &NotLoadedError{edge: "history"} return nil, &NotLoadedError{edge: "history"}
} }
// DailyRollupsOrErr returns the DailyRollups value or an error if the edge
// was not loaded in eager-loading.
func (e ChannelMonitorEdges) DailyRollupsOrErr() ([]*ChannelMonitorDailyRollup, error) {
if e.loadedTypes[1] {
return e.DailyRollups, nil
}
return nil, &NotLoadedError{edge: "daily_rollups"}
}
// scanValues returns the types for scanning values from sql.Rows. // scanValues returns the types for scanning values from sql.Rows.
func (*ChannelMonitor) scanValues(columns []string) ([]any, error) { func (*ChannelMonitor) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns)) values := make([]any, len(columns))
...@@ -203,6 +214,11 @@ func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery { ...@@ -203,6 +214,11 @@ func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery {
return NewChannelMonitorClient(_m.config).QueryHistory(_m) return NewChannelMonitorClient(_m.config).QueryHistory(_m)
} }
// QueryDailyRollups queries the "daily_rollups" edge of the ChannelMonitor entity.
func (_m *ChannelMonitor) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
return NewChannelMonitorClient(_m.config).QueryDailyRollups(_m)
}
// Update returns a builder for updating this ChannelMonitor. // Update returns a builder for updating this ChannelMonitor.
// Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor // Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
......
...@@ -43,6 +43,8 @@ const ( ...@@ -43,6 +43,8 @@ const (
FieldCreatedBy = "created_by" FieldCreatedBy = "created_by"
// EdgeHistory holds the string denoting the history edge name in mutations. // EdgeHistory holds the string denoting the history edge name in mutations.
EdgeHistory = "history" EdgeHistory = "history"
// EdgeDailyRollups holds the string denoting the daily_rollups edge name in mutations.
EdgeDailyRollups = "daily_rollups"
// Table holds the table name of the channelmonitor in the database. // Table holds the table name of the channelmonitor in the database.
Table = "channel_monitors" Table = "channel_monitors"
// HistoryTable is the table that holds the history relation/edge. // HistoryTable is the table that holds the history relation/edge.
...@@ -52,6 +54,13 @@ const ( ...@@ -52,6 +54,13 @@ const (
HistoryInverseTable = "channel_monitor_histories" HistoryInverseTable = "channel_monitor_histories"
// HistoryColumn is the table column denoting the history relation/edge. // HistoryColumn is the table column denoting the history relation/edge.
HistoryColumn = "monitor_id" HistoryColumn = "monitor_id"
// DailyRollupsTable is the table that holds the daily_rollups relation/edge.
DailyRollupsTable = "channel_monitor_daily_rollups"
// DailyRollupsInverseTable is the table name for the ChannelMonitorDailyRollup entity.
// It exists in this package in order to avoid circular dependency with the "channelmonitordailyrollup" package.
DailyRollupsInverseTable = "channel_monitor_daily_rollups"
// DailyRollupsColumn is the table column denoting the daily_rollups relation/edge.
DailyRollupsColumn = "monitor_id"
) )
// Columns holds all SQL columns for channelmonitor fields. // Columns holds all SQL columns for channelmonitor fields.
...@@ -214,6 +223,20 @@ func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { ...@@ -214,6 +223,20 @@ func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...) sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...)
} }
} }
// ByDailyRollupsCount orders the results by daily_rollups count.
func ByDailyRollupsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newDailyRollupsStep(), opts...)
}
}
// ByDailyRollups orders the results by daily_rollups terms.
func ByDailyRollups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDailyRollupsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newHistoryStep() *sqlgraph.Step { func newHistoryStep() *sqlgraph.Step {
return sqlgraph.NewStep( return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID), sqlgraph.From(Table, FieldID),
...@@ -221,3 +244,10 @@ func newHistoryStep() *sqlgraph.Step { ...@@ -221,3 +244,10 @@ func newHistoryStep() *sqlgraph.Step {
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn), sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
) )
} }
func newDailyRollupsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DailyRollupsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
)
}
...@@ -708,6 +708,29 @@ func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelM ...@@ -708,6 +708,29 @@ func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelM
}) })
} }
// HasDailyRollups applies the HasEdge predicate on the "daily_rollups" edge.
func HasDailyRollups() predicate.ChannelMonitor {
return predicate.ChannelMonitor(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasDailyRollupsWith applies the HasEdge predicate on the "daily_rollups" edge with a given conditions (other predicates).
func HasDailyRollupsWith(preds ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitor {
return predicate.ChannelMonitor(func(s *sql.Selector) {
step := newDailyRollupsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them. // And groups predicates with the AND operator between them.
func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor { func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
return predicate.ChannelMonitor(sql.AndPredicates(predicates...)) return predicate.ChannelMonitor(sql.AndPredicates(predicates...))
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
) )
...@@ -156,6 +157,21 @@ func (_c *ChannelMonitorCreate) AddHistory(v ...*ChannelMonitorHistory) *Channel ...@@ -156,6 +157,21 @@ func (_c *ChannelMonitorCreate) AddHistory(v ...*ChannelMonitorHistory) *Channel
return _c.AddHistoryIDs(ids...) return _c.AddHistoryIDs(ids...)
} }
// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs.
func (_c *ChannelMonitorCreate) AddDailyRollupIDs(ids ...int64) *ChannelMonitorCreate {
_c.mutation.AddDailyRollupIDs(ids...)
return _c
}
// AddDailyRollups adds the "daily_rollups" edges to the ChannelMonitorDailyRollup entity.
func (_c *ChannelMonitorCreate) AddDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorCreate {
ids := make([]int64, len(v))
for i := range v {
ids[i] = v[i].ID
}
return _c.AddDailyRollupIDs(ids...)
}
// Mutation returns the ChannelMonitorMutation object of the builder. // Mutation returns the ChannelMonitorMutation object of the builder.
func (_c *ChannelMonitorCreate) Mutation() *ChannelMonitorMutation { func (_c *ChannelMonitorCreate) Mutation() *ChannelMonitorMutation {
return _c.mutation return _c.mutation
...@@ -378,6 +394,22 @@ func (_c *ChannelMonitorCreate) createSpec() (*ChannelMonitor, *sqlgraph.CreateS ...@@ -378,6 +394,22 @@ func (_c *ChannelMonitorCreate) createSpec() (*ChannelMonitor, *sqlgraph.CreateS
} }
_spec.Edges = append(_spec.Edges, edge) _spec.Edges = append(_spec.Edges, edge)
} }
if nodes := _c.mutation.DailyRollupsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec return _node, _spec
} }
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/predicate" "github.com/Wei-Shaw/sub2api/ent/predicate"
) )
...@@ -21,12 +22,13 @@ import ( ...@@ -21,12 +22,13 @@ import (
// ChannelMonitorQuery is the builder for querying ChannelMonitor entities. // ChannelMonitorQuery is the builder for querying ChannelMonitor entities.
type ChannelMonitorQuery struct { type ChannelMonitorQuery struct {
config config
ctx *QueryContext ctx *QueryContext
order []channelmonitor.OrderOption order []channelmonitor.OrderOption
inters []Interceptor inters []Interceptor
predicates []predicate.ChannelMonitor predicates []predicate.ChannelMonitor
withHistory *ChannelMonitorHistoryQuery withHistory *ChannelMonitorHistoryQuery
modifiers []func(*sql.Selector) withDailyRollups *ChannelMonitorDailyRollupQuery
modifiers []func(*sql.Selector)
// intermediate query (i.e. traversal path). // intermediate query (i.e. traversal path).
sql *sql.Selector sql *sql.Selector
path func(context.Context) (*sql.Selector, error) path func(context.Context) (*sql.Selector, error)
...@@ -85,6 +87,28 @@ func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery { ...@@ -85,6 +87,28 @@ func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery {
return query return query
} }
// QueryDailyRollups chains the current query on the "daily_rollups" edge.
func (_q *ChannelMonitorQuery) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := _q.prepareQuery(ctx); err != nil {
return nil, err
}
selector := _q.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn),
)
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first ChannelMonitor entity from the query. // First returns the first ChannelMonitor entity from the query.
// Returns a *NotFoundError when no ChannelMonitor was found. // Returns a *NotFoundError when no ChannelMonitor was found.
func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) { func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) {
...@@ -272,12 +296,13 @@ func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery { ...@@ -272,12 +296,13 @@ func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery {
return nil return nil
} }
return &ChannelMonitorQuery{ return &ChannelMonitorQuery{
config: _q.config, config: _q.config,
ctx: _q.ctx.Clone(), ctx: _q.ctx.Clone(),
order: append([]channelmonitor.OrderOption{}, _q.order...), order: append([]channelmonitor.OrderOption{}, _q.order...),
inters: append([]Interceptor{}, _q.inters...), inters: append([]Interceptor{}, _q.inters...),
predicates: append([]predicate.ChannelMonitor{}, _q.predicates...), predicates: append([]predicate.ChannelMonitor{}, _q.predicates...),
withHistory: _q.withHistory.Clone(), withHistory: _q.withHistory.Clone(),
withDailyRollups: _q.withDailyRollups.Clone(),
// clone intermediate query. // clone intermediate query.
sql: _q.sql.Clone(), sql: _q.sql.Clone(),
path: _q.path, path: _q.path,
...@@ -295,6 +320,17 @@ func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQu ...@@ -295,6 +320,17 @@ func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQu
return _q return _q
} }
// WithDailyRollups tells the query-builder to eager-load the nodes that are connected to
// the "daily_rollups" edge. The optional arguments are used to configure the query builder of the edge.
func (_q *ChannelMonitorQuery) WithDailyRollups(opts ...func(*ChannelMonitorDailyRollupQuery)) *ChannelMonitorQuery {
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
for _, opt := range opts {
opt(query)
}
_q.withDailyRollups = query
return _q
}
// GroupBy is used to group vertices by one or more fields/columns. // GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum. // It is often used with aggregate functions, like: count, max, mean, min, sum.
// //
...@@ -373,8 +409,9 @@ func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ( ...@@ -373,8 +409,9 @@ func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) (
var ( var (
nodes = []*ChannelMonitor{} nodes = []*ChannelMonitor{}
_spec = _q.querySpec() _spec = _q.querySpec()
loadedTypes = [1]bool{ loadedTypes = [2]bool{
_q.withHistory != nil, _q.withHistory != nil,
_q.withDailyRollups != nil,
} }
) )
_spec.ScanValues = func(columns []string) ([]any, error) { _spec.ScanValues = func(columns []string) ([]any, error) {
...@@ -405,6 +442,15 @@ func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ( ...@@ -405,6 +442,15 @@ func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) (
return nil, err return nil, err
} }
} }
if query := _q.withDailyRollups; query != nil {
if err := _q.loadDailyRollups(ctx, query, nodes,
func(n *ChannelMonitor) { n.Edges.DailyRollups = []*ChannelMonitorDailyRollup{} },
func(n *ChannelMonitor, e *ChannelMonitorDailyRollup) {
n.Edges.DailyRollups = append(n.Edges.DailyRollups, e)
}); err != nil {
return nil, err
}
}
return nodes, nil return nodes, nil
} }
...@@ -438,6 +484,36 @@ func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMo ...@@ -438,6 +484,36 @@ func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMo
} }
return nil return nil
} }
func (_q *ChannelMonitorQuery) loadDailyRollups(ctx context.Context, query *ChannelMonitorDailyRollupQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorDailyRollup)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int64]*ChannelMonitor)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
if init != nil {
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(channelmonitordailyrollup.FieldMonitorID)
}
query.Where(predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
s.Where(sql.InValues(s.C(channelmonitor.DailyRollupsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
fk := n.MonitorID
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
return nil
}
func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) { func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) {
_spec := _q.querySpec() _spec := _q.querySpec()
......
...@@ -13,6 +13,7 @@ import ( ...@@ -13,6 +13,7 @@ import (
"entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/dialect/sql/sqljson"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor" "github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
"github.com/Wei-Shaw/sub2api/ent/predicate" "github.com/Wei-Shaw/sub2api/ent/predicate"
) )
...@@ -229,6 +230,21 @@ func (_u *ChannelMonitorUpdate) AddHistory(v ...*ChannelMonitorHistory) *Channel ...@@ -229,6 +230,21 @@ func (_u *ChannelMonitorUpdate) AddHistory(v ...*ChannelMonitorHistory) *Channel
return _u.AddHistoryIDs(ids...) return _u.AddHistoryIDs(ids...)
} }
// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs.
func (_u *ChannelMonitorUpdate) AddDailyRollupIDs(ids ...int64) *ChannelMonitorUpdate {
_u.mutation.AddDailyRollupIDs(ids...)
return _u
}
// AddDailyRollups adds the "daily_rollups" edges to the ChannelMonitorDailyRollup entity.
func (_u *ChannelMonitorUpdate) AddDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdate {
ids := make([]int64, len(v))
for i := range v {
ids[i] = v[i].ID
}
return _u.AddDailyRollupIDs(ids...)
}
// Mutation returns the ChannelMonitorMutation object of the builder. // Mutation returns the ChannelMonitorMutation object of the builder.
func (_u *ChannelMonitorUpdate) Mutation() *ChannelMonitorMutation { func (_u *ChannelMonitorUpdate) Mutation() *ChannelMonitorMutation {
return _u.mutation return _u.mutation
...@@ -255,6 +271,27 @@ func (_u *ChannelMonitorUpdate) RemoveHistory(v ...*ChannelMonitorHistory) *Chan ...@@ -255,6 +271,27 @@ func (_u *ChannelMonitorUpdate) RemoveHistory(v ...*ChannelMonitorHistory) *Chan
return _u.RemoveHistoryIDs(ids...) return _u.RemoveHistoryIDs(ids...)
} }
// ClearDailyRollups clears all "daily_rollups" edges to the ChannelMonitorDailyRollup entity.
func (_u *ChannelMonitorUpdate) ClearDailyRollups() *ChannelMonitorUpdate {
_u.mutation.ClearDailyRollups()
return _u
}
// RemoveDailyRollupIDs removes the "daily_rollups" edge to ChannelMonitorDailyRollup entities by IDs.
func (_u *ChannelMonitorUpdate) RemoveDailyRollupIDs(ids ...int64) *ChannelMonitorUpdate {
_u.mutation.RemoveDailyRollupIDs(ids...)
return _u
}
// RemoveDailyRollups removes "daily_rollups" edges to ChannelMonitorDailyRollup entities.
func (_u *ChannelMonitorUpdate) RemoveDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdate {
ids := make([]int64, len(v))
for i := range v {
ids[i] = v[i].ID
}
return _u.RemoveDailyRollupIDs(ids...)
}
// Save executes the query and returns the number of nodes affected by the update operation. // Save executes the query and returns the number of nodes affected by the update operation.
func (_u *ChannelMonitorUpdate) Save(ctx context.Context) (int, error) { func (_u *ChannelMonitorUpdate) Save(ctx context.Context) (int, error) {
_u.defaults() _u.defaults()
...@@ -441,6 +478,51 @@ func (_u *ChannelMonitorUpdate) sqlSave(ctx context.Context) (_node int, err err ...@@ -441,6 +478,51 @@ func (_u *ChannelMonitorUpdate) sqlSave(ctx context.Context) (_node int, err err
} }
_spec.Edges.Add = append(_spec.Edges.Add, edge) _spec.Edges.Add = append(_spec.Edges.Add, edge)
} }
if _u.mutation.DailyRollupsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := _u.mutation.RemovedDailyRollupsIDs(); len(nodes) > 0 && !_u.mutation.DailyRollupsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := _u.mutation.DailyRollupsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok { if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{channelmonitor.Label} err = &NotFoundError{channelmonitor.Label}
...@@ -660,6 +742,21 @@ func (_u *ChannelMonitorUpdateOne) AddHistory(v ...*ChannelMonitorHistory) *Chan ...@@ -660,6 +742,21 @@ func (_u *ChannelMonitorUpdateOne) AddHistory(v ...*ChannelMonitorHistory) *Chan
return _u.AddHistoryIDs(ids...) return _u.AddHistoryIDs(ids...)
} }
// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs.
func (_u *ChannelMonitorUpdateOne) AddDailyRollupIDs(ids ...int64) *ChannelMonitorUpdateOne {
_u.mutation.AddDailyRollupIDs(ids...)
return _u
}
// AddDailyRollups adds the "daily_rollups" edges to the ChannelMonitorDailyRollup entity.
func (_u *ChannelMonitorUpdateOne) AddDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdateOne {
ids := make([]int64, len(v))
for i := range v {
ids[i] = v[i].ID
}
return _u.AddDailyRollupIDs(ids...)
}
// Mutation returns the ChannelMonitorMutation object of the builder. // Mutation returns the ChannelMonitorMutation object of the builder.
func (_u *ChannelMonitorUpdateOne) Mutation() *ChannelMonitorMutation { func (_u *ChannelMonitorUpdateOne) Mutation() *ChannelMonitorMutation {
return _u.mutation return _u.mutation
...@@ -686,6 +783,27 @@ func (_u *ChannelMonitorUpdateOne) RemoveHistory(v ...*ChannelMonitorHistory) *C ...@@ -686,6 +783,27 @@ func (_u *ChannelMonitorUpdateOne) RemoveHistory(v ...*ChannelMonitorHistory) *C
return _u.RemoveHistoryIDs(ids...) return _u.RemoveHistoryIDs(ids...)
} }
// ClearDailyRollups clears all "daily_rollups" edges to the ChannelMonitorDailyRollup entity.
func (_u *ChannelMonitorUpdateOne) ClearDailyRollups() *ChannelMonitorUpdateOne {
_u.mutation.ClearDailyRollups()
return _u
}
// RemoveDailyRollupIDs removes the "daily_rollups" edge to ChannelMonitorDailyRollup entities by IDs.
func (_u *ChannelMonitorUpdateOne) RemoveDailyRollupIDs(ids ...int64) *ChannelMonitorUpdateOne {
_u.mutation.RemoveDailyRollupIDs(ids...)
return _u
}
// RemoveDailyRollups removes "daily_rollups" edges to ChannelMonitorDailyRollup entities.
func (_u *ChannelMonitorUpdateOne) RemoveDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdateOne {
ids := make([]int64, len(v))
for i := range v {
ids[i] = v[i].ID
}
return _u.RemoveDailyRollupIDs(ids...)
}
// Where appends a list predicates to the ChannelMonitorUpdate builder. // Where appends a list predicates to the ChannelMonitorUpdate builder.
func (_u *ChannelMonitorUpdateOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorUpdateOne { func (_u *ChannelMonitorUpdateOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorUpdateOne {
_u.mutation.Where(ps...) _u.mutation.Where(ps...)
...@@ -902,6 +1020,51 @@ func (_u *ChannelMonitorUpdateOne) sqlSave(ctx context.Context) (_node *ChannelM ...@@ -902,6 +1020,51 @@ func (_u *ChannelMonitorUpdateOne) sqlSave(ctx context.Context) (_node *ChannelM
} }
_spec.Edges.Add = append(_spec.Edges.Add, edge) _spec.Edges.Add = append(_spec.Edges.Add, edge)
} }
if _u.mutation.DailyRollupsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := _u.mutation.RemovedDailyRollupsIDs(); len(nodes) > 0 && !_u.mutation.DailyRollupsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := _u.mutation.DailyRollupsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: channelmonitor.DailyRollupsTable,
Columns: []string{channelmonitor.DailyRollupsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &ChannelMonitor{config: _u.config} _node = &ChannelMonitor{config: _u.config}
_spec.Assign = _node.assignValues _spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues _spec.ScanValues = _node.scanValues
......
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
)
// ChannelMonitorDailyRollup is the model entity for the ChannelMonitorDailyRollup schema.
type ChannelMonitorDailyRollup struct {
config `json:"-"`
// ID of the ent.
ID int64 `json:"id,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// MonitorID holds the value of the "monitor_id" field.
MonitorID int64 `json:"monitor_id,omitempty"`
// Model holds the value of the "model" field.
Model string `json:"model,omitempty"`
// BucketDate holds the value of the "bucket_date" field.
BucketDate time.Time `json:"bucket_date,omitempty"`
// TotalChecks holds the value of the "total_checks" field.
TotalChecks int `json:"total_checks,omitempty"`
// OkCount holds the value of the "ok_count" field.
OkCount int `json:"ok_count,omitempty"`
// OperationalCount holds the value of the "operational_count" field.
OperationalCount int `json:"operational_count,omitempty"`
// DegradedCount holds the value of the "degraded_count" field.
DegradedCount int `json:"degraded_count,omitempty"`
// FailedCount holds the value of the "failed_count" field.
FailedCount int `json:"failed_count,omitempty"`
// ErrorCount holds the value of the "error_count" field.
ErrorCount int `json:"error_count,omitempty"`
// SumLatencyMs holds the value of the "sum_latency_ms" field.
SumLatencyMs int64 `json:"sum_latency_ms,omitempty"`
// CountLatency holds the value of the "count_latency" field.
CountLatency int `json:"count_latency,omitempty"`
// SumPingLatencyMs holds the value of the "sum_ping_latency_ms" field.
SumPingLatencyMs int64 `json:"sum_ping_latency_ms,omitempty"`
// CountPingLatency holds the value of the "count_ping_latency" field.
CountPingLatency int `json:"count_ping_latency,omitempty"`
// ComputedAt holds the value of the "computed_at" field.
ComputedAt time.Time `json:"computed_at,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the ChannelMonitorDailyRollupQuery when eager-loading is set.
Edges ChannelMonitorDailyRollupEdges `json:"edges"`
selectValues sql.SelectValues
}
// ChannelMonitorDailyRollupEdges holds the relations/edges for other nodes in the graph.
type ChannelMonitorDailyRollupEdges struct {
// Monitor holds the value of the monitor edge.
Monitor *ChannelMonitor `json:"monitor,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// MonitorOrErr returns the Monitor value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e ChannelMonitorDailyRollupEdges) MonitorOrErr() (*ChannelMonitor, error) {
if e.Monitor != nil {
return e.Monitor, nil
} else if e.loadedTypes[0] {
return nil, &NotFoundError{label: channelmonitor.Label}
}
return nil, &NotLoadedError{edge: "monitor"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*ChannelMonitorDailyRollup) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case channelmonitordailyrollup.FieldID, channelmonitordailyrollup.FieldMonitorID, channelmonitordailyrollup.FieldTotalChecks, channelmonitordailyrollup.FieldOkCount, channelmonitordailyrollup.FieldOperationalCount, channelmonitordailyrollup.FieldDegradedCount, channelmonitordailyrollup.FieldFailedCount, channelmonitordailyrollup.FieldErrorCount, channelmonitordailyrollup.FieldSumLatencyMs, channelmonitordailyrollup.FieldCountLatency, channelmonitordailyrollup.FieldSumPingLatencyMs, channelmonitordailyrollup.FieldCountPingLatency:
values[i] = new(sql.NullInt64)
case channelmonitordailyrollup.FieldModel:
values[i] = new(sql.NullString)
case channelmonitordailyrollup.FieldDeletedAt, channelmonitordailyrollup.FieldBucketDate, channelmonitordailyrollup.FieldComputedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the ChannelMonitorDailyRollup fields.
func (_m *ChannelMonitorDailyRollup) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case channelmonitordailyrollup.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
_m.ID = int64(value.Int64)
case channelmonitordailyrollup.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
_m.DeletedAt = new(time.Time)
*_m.DeletedAt = value.Time
}
case channelmonitordailyrollup.FieldMonitorID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
} else if value.Valid {
_m.MonitorID = value.Int64
}
case channelmonitordailyrollup.FieldModel:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field model", values[i])
} else if value.Valid {
_m.Model = value.String
}
case channelmonitordailyrollup.FieldBucketDate:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field bucket_date", values[i])
} else if value.Valid {
_m.BucketDate = value.Time
}
case channelmonitordailyrollup.FieldTotalChecks:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field total_checks", values[i])
} else if value.Valid {
_m.TotalChecks = int(value.Int64)
}
case channelmonitordailyrollup.FieldOkCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field ok_count", values[i])
} else if value.Valid {
_m.OkCount = int(value.Int64)
}
case channelmonitordailyrollup.FieldOperationalCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field operational_count", values[i])
} else if value.Valid {
_m.OperationalCount = int(value.Int64)
}
case channelmonitordailyrollup.FieldDegradedCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field degraded_count", values[i])
} else if value.Valid {
_m.DegradedCount = int(value.Int64)
}
case channelmonitordailyrollup.FieldFailedCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field failed_count", values[i])
} else if value.Valid {
_m.FailedCount = int(value.Int64)
}
case channelmonitordailyrollup.FieldErrorCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field error_count", values[i])
} else if value.Valid {
_m.ErrorCount = int(value.Int64)
}
case channelmonitordailyrollup.FieldSumLatencyMs:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field sum_latency_ms", values[i])
} else if value.Valid {
_m.SumLatencyMs = value.Int64
}
case channelmonitordailyrollup.FieldCountLatency:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field count_latency", values[i])
} else if value.Valid {
_m.CountLatency = int(value.Int64)
}
case channelmonitordailyrollup.FieldSumPingLatencyMs:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field sum_ping_latency_ms", values[i])
} else if value.Valid {
_m.SumPingLatencyMs = value.Int64
}
case channelmonitordailyrollup.FieldCountPingLatency:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field count_ping_latency", values[i])
} else if value.Valid {
_m.CountPingLatency = int(value.Int64)
}
case channelmonitordailyrollup.FieldComputedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field computed_at", values[i])
} else if value.Valid {
_m.ComputedAt = value.Time
}
default:
_m.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorDailyRollup.
// This includes values selected through modifiers, order, etc.
func (_m *ChannelMonitorDailyRollup) Value(name string) (ent.Value, error) {
return _m.selectValues.Get(name)
}
// QueryMonitor queries the "monitor" edge of the ChannelMonitorDailyRollup entity.
func (_m *ChannelMonitorDailyRollup) QueryMonitor() *ChannelMonitorQuery {
return NewChannelMonitorDailyRollupClient(_m.config).QueryMonitor(_m)
}
// Update returns a builder for updating this ChannelMonitorDailyRollup.
// Note that you need to call ChannelMonitorDailyRollup.Unwrap() before calling this method if this ChannelMonitorDailyRollup
// was returned from a transaction, and the transaction was committed or rolled back.
func (_m *ChannelMonitorDailyRollup) Update() *ChannelMonitorDailyRollupUpdateOne {
return NewChannelMonitorDailyRollupClient(_m.config).UpdateOne(_m)
}
// Unwrap unwraps the ChannelMonitorDailyRollup entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (_m *ChannelMonitorDailyRollup) Unwrap() *ChannelMonitorDailyRollup {
_tx, ok := _m.config.driver.(*txDriver)
if !ok {
panic("ent: ChannelMonitorDailyRollup is not a transactional entity")
}
_m.config.driver = _tx.drv
return _m
}
// String implements the fmt.Stringer.
func (_m *ChannelMonitorDailyRollup) String() string {
var builder strings.Builder
builder.WriteString("ChannelMonitorDailyRollup(")
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
if v := _m.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("monitor_id=")
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
builder.WriteString(", ")
builder.WriteString("model=")
builder.WriteString(_m.Model)
builder.WriteString(", ")
builder.WriteString("bucket_date=")
builder.WriteString(_m.BucketDate.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("total_checks=")
builder.WriteString(fmt.Sprintf("%v", _m.TotalChecks))
builder.WriteString(", ")
builder.WriteString("ok_count=")
builder.WriteString(fmt.Sprintf("%v", _m.OkCount))
builder.WriteString(", ")
builder.WriteString("operational_count=")
builder.WriteString(fmt.Sprintf("%v", _m.OperationalCount))
builder.WriteString(", ")
builder.WriteString("degraded_count=")
builder.WriteString(fmt.Sprintf("%v", _m.DegradedCount))
builder.WriteString(", ")
builder.WriteString("failed_count=")
builder.WriteString(fmt.Sprintf("%v", _m.FailedCount))
builder.WriteString(", ")
builder.WriteString("error_count=")
builder.WriteString(fmt.Sprintf("%v", _m.ErrorCount))
builder.WriteString(", ")
builder.WriteString("sum_latency_ms=")
builder.WriteString(fmt.Sprintf("%v", _m.SumLatencyMs))
builder.WriteString(", ")
builder.WriteString("count_latency=")
builder.WriteString(fmt.Sprintf("%v", _m.CountLatency))
builder.WriteString(", ")
builder.WriteString("sum_ping_latency_ms=")
builder.WriteString(fmt.Sprintf("%v", _m.SumPingLatencyMs))
builder.WriteString(", ")
builder.WriteString("count_ping_latency=")
builder.WriteString(fmt.Sprintf("%v", _m.CountPingLatency))
builder.WriteString(", ")
builder.WriteString("computed_at=")
builder.WriteString(_m.ComputedAt.Format(time.ANSIC))
builder.WriteByte(')')
return builder.String()
}
// ChannelMonitorDailyRollups is a parsable slice of ChannelMonitorDailyRollup.
type ChannelMonitorDailyRollups []*ChannelMonitorDailyRollup
// Code generated by ent, DO NOT EDIT.
package channelmonitordailyrollup
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the channelmonitordailyrollup type in the database.
Label = "channel_monitor_daily_rollup"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldMonitorID holds the string denoting the monitor_id field in the database.
FieldMonitorID = "monitor_id"
// FieldModel holds the string denoting the model field in the database.
FieldModel = "model"
// FieldBucketDate holds the string denoting the bucket_date field in the database.
FieldBucketDate = "bucket_date"
// FieldTotalChecks holds the string denoting the total_checks field in the database.
FieldTotalChecks = "total_checks"
// FieldOkCount holds the string denoting the ok_count field in the database.
FieldOkCount = "ok_count"
// FieldOperationalCount holds the string denoting the operational_count field in the database.
FieldOperationalCount = "operational_count"
// FieldDegradedCount holds the string denoting the degraded_count field in the database.
FieldDegradedCount = "degraded_count"
// FieldFailedCount holds the string denoting the failed_count field in the database.
FieldFailedCount = "failed_count"
// FieldErrorCount holds the string denoting the error_count field in the database.
FieldErrorCount = "error_count"
// FieldSumLatencyMs holds the string denoting the sum_latency_ms field in the database.
FieldSumLatencyMs = "sum_latency_ms"
// FieldCountLatency holds the string denoting the count_latency field in the database.
FieldCountLatency = "count_latency"
// FieldSumPingLatencyMs holds the string denoting the sum_ping_latency_ms field in the database.
FieldSumPingLatencyMs = "sum_ping_latency_ms"
// FieldCountPingLatency holds the string denoting the count_ping_latency field in the database.
FieldCountPingLatency = "count_ping_latency"
// FieldComputedAt holds the string denoting the computed_at field in the database.
FieldComputedAt = "computed_at"
// EdgeMonitor holds the string denoting the monitor edge name in mutations.
EdgeMonitor = "monitor"
// Table holds the table name of the channelmonitordailyrollup in the database.
Table = "channel_monitor_daily_rollups"
// MonitorTable is the table that holds the monitor relation/edge.
MonitorTable = "channel_monitor_daily_rollups"
// MonitorInverseTable is the table name for the ChannelMonitor entity.
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
MonitorInverseTable = "channel_monitors"
// MonitorColumn is the table column denoting the monitor relation/edge.
MonitorColumn = "monitor_id"
)
// Columns holds all SQL columns for channelmonitordailyrollup fields.
var Columns = []string{
FieldID,
FieldDeletedAt,
FieldMonitorID,
FieldModel,
FieldBucketDate,
FieldTotalChecks,
FieldOkCount,
FieldOperationalCount,
FieldDegradedCount,
FieldFailedCount,
FieldErrorCount,
FieldSumLatencyMs,
FieldCountLatency,
FieldSumPingLatencyMs,
FieldCountPingLatency,
FieldComputedAt,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
ModelValidator func(string) error
// DefaultTotalChecks holds the default value on creation for the "total_checks" field.
DefaultTotalChecks int
// DefaultOkCount holds the default value on creation for the "ok_count" field.
DefaultOkCount int
// DefaultOperationalCount holds the default value on creation for the "operational_count" field.
DefaultOperationalCount int
// DefaultDegradedCount holds the default value on creation for the "degraded_count" field.
DefaultDegradedCount int
// DefaultFailedCount holds the default value on creation for the "failed_count" field.
DefaultFailedCount int
// DefaultErrorCount holds the default value on creation for the "error_count" field.
DefaultErrorCount int
// DefaultSumLatencyMs holds the default value on creation for the "sum_latency_ms" field.
DefaultSumLatencyMs int64
// DefaultCountLatency holds the default value on creation for the "count_latency" field.
DefaultCountLatency int
// DefaultSumPingLatencyMs holds the default value on creation for the "sum_ping_latency_ms" field.
DefaultSumPingLatencyMs int64
// DefaultCountPingLatency holds the default value on creation for the "count_ping_latency" field.
DefaultCountPingLatency int
// DefaultComputedAt holds the default value on creation for the "computed_at" field.
DefaultComputedAt func() time.Time
// UpdateDefaultComputedAt holds the default value on update for the "computed_at" field.
UpdateDefaultComputedAt func() time.Time
)
// OrderOption defines the ordering options for the ChannelMonitorDailyRollup queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByMonitorID orders the results by the monitor_id field.
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
}
// ByModel orders the results by the model field.
func ByModel(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldModel, opts...).ToFunc()
}
// ByBucketDate orders the results by the bucket_date field.
func ByBucketDate(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldBucketDate, opts...).ToFunc()
}
// ByTotalChecks orders the results by the total_checks field.
func ByTotalChecks(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldTotalChecks, opts...).ToFunc()
}
// ByOkCount orders the results by the ok_count field.
func ByOkCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOkCount, opts...).ToFunc()
}
// ByOperationalCount orders the results by the operational_count field.
func ByOperationalCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOperationalCount, opts...).ToFunc()
}
// ByDegradedCount orders the results by the degraded_count field.
func ByDegradedCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDegradedCount, opts...).ToFunc()
}
// ByFailedCount orders the results by the failed_count field.
func ByFailedCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldFailedCount, opts...).ToFunc()
}
// ByErrorCount orders the results by the error_count field.
func ByErrorCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldErrorCount, opts...).ToFunc()
}
// BySumLatencyMs orders the results by the sum_latency_ms field.
func BySumLatencyMs(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSumLatencyMs, opts...).ToFunc()
}
// ByCountLatency orders the results by the count_latency field.
func ByCountLatency(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCountLatency, opts...).ToFunc()
}
// BySumPingLatencyMs orders the results by the sum_ping_latency_ms field.
func BySumPingLatencyMs(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSumPingLatencyMs, opts...).ToFunc()
}
// ByCountPingLatency orders the results by the count_ping_latency field.
func ByCountPingLatency(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCountPingLatency, opts...).ToFunc()
}
// ByComputedAt orders the results by the computed_at field.
func ByComputedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldComputedAt, opts...).ToFunc()
}
// ByMonitorField orders the results by monitor field.
func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...))
}
}
func newMonitorStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(MonitorInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
)
}
This diff is collapsed.
This diff is collapsed.
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
"github.com/Wei-Shaw/sub2api/ent/predicate"
)
// ChannelMonitorDailyRollupDelete is the builder for deleting a ChannelMonitorDailyRollup entity.
type ChannelMonitorDailyRollupDelete struct {
config
hooks []Hook
mutation *ChannelMonitorDailyRollupMutation
}
// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder.
func (_d *ChannelMonitorDailyRollupDelete) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDelete {
_d.mutation.Where(ps...)
return _d
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (_d *ChannelMonitorDailyRollupDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (_d *ChannelMonitorDailyRollupDelete) ExecX(ctx context.Context) int {
n, err := _d.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (_d *ChannelMonitorDailyRollupDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(channelmonitordailyrollup.Table, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
if ps := _d.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
_d.mutation.done = true
return affected, err
}
// ChannelMonitorDailyRollupDeleteOne is the builder for deleting a single ChannelMonitorDailyRollup entity.
type ChannelMonitorDailyRollupDeleteOne struct {
_d *ChannelMonitorDailyRollupDelete
}
// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder.
func (_d *ChannelMonitorDailyRollupDeleteOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne {
_d._d.mutation.Where(ps...)
return _d
}
// Exec executes the deletion query.
func (_d *ChannelMonitorDailyRollupDeleteOne) Exec(ctx context.Context) error {
n, err := _d._d.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{channelmonitordailyrollup.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (_d *ChannelMonitorDailyRollupDeleteOne) ExecX(ctx context.Context) {
if err := _d.Exec(ctx); err != nil {
panic(err)
}
}
This diff is collapsed.
This diff is collapsed.
...@@ -18,6 +18,8 @@ type ChannelMonitorHistory struct { ...@@ -18,6 +18,8 @@ type ChannelMonitorHistory struct {
config `json:"-"` config `json:"-"`
// ID of the ent. // ID of the ent.
ID int64 `json:"id,omitempty"` ID int64 `json:"id,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// MonitorID holds the value of the "monitor_id" field. // MonitorID holds the value of the "monitor_id" field.
MonitorID int64 `json:"monitor_id,omitempty"` MonitorID int64 `json:"monitor_id,omitempty"`
// Model holds the value of the "model" field. // Model holds the value of the "model" field.
...@@ -67,7 +69,7 @@ func (*ChannelMonitorHistory) scanValues(columns []string) ([]any, error) { ...@@ -67,7 +69,7 @@ func (*ChannelMonitorHistory) scanValues(columns []string) ([]any, error) {
values[i] = new(sql.NullInt64) values[i] = new(sql.NullInt64)
case channelmonitorhistory.FieldModel, channelmonitorhistory.FieldStatus, channelmonitorhistory.FieldMessage: case channelmonitorhistory.FieldModel, channelmonitorhistory.FieldStatus, channelmonitorhistory.FieldMessage:
values[i] = new(sql.NullString) values[i] = new(sql.NullString)
case channelmonitorhistory.FieldCheckedAt: case channelmonitorhistory.FieldDeletedAt, channelmonitorhistory.FieldCheckedAt:
values[i] = new(sql.NullTime) values[i] = new(sql.NullTime)
default: default:
values[i] = new(sql.UnknownType) values[i] = new(sql.UnknownType)
...@@ -90,6 +92,13 @@ func (_m *ChannelMonitorHistory) assignValues(columns []string, values []any) er ...@@ -90,6 +92,13 @@ func (_m *ChannelMonitorHistory) assignValues(columns []string, values []any) er
return fmt.Errorf("unexpected type %T for field id", value) return fmt.Errorf("unexpected type %T for field id", value)
} }
_m.ID = int64(value.Int64) _m.ID = int64(value.Int64)
case channelmonitorhistory.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
_m.DeletedAt = new(time.Time)
*_m.DeletedAt = value.Time
}
case channelmonitorhistory.FieldMonitorID: case channelmonitorhistory.FieldMonitorID:
if value, ok := values[i].(*sql.NullInt64); !ok { if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field monitor_id", values[i]) return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
...@@ -175,6 +184,11 @@ func (_m *ChannelMonitorHistory) String() string { ...@@ -175,6 +184,11 @@ func (_m *ChannelMonitorHistory) String() string {
var builder strings.Builder var builder strings.Builder
builder.WriteString("ChannelMonitorHistory(") builder.WriteString("ChannelMonitorHistory(")
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
if v := _m.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("monitor_id=") builder.WriteString("monitor_id=")
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID)) builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
builder.WriteString(", ") builder.WriteString(", ")
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"time" "time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
) )
...@@ -15,6 +16,8 @@ const ( ...@@ -15,6 +16,8 @@ const (
Label = "channel_monitor_history" Label = "channel_monitor_history"
// FieldID holds the string denoting the id field in the database. // FieldID holds the string denoting the id field in the database.
FieldID = "id" FieldID = "id"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldMonitorID holds the string denoting the monitor_id field in the database. // FieldMonitorID holds the string denoting the monitor_id field in the database.
FieldMonitorID = "monitor_id" FieldMonitorID = "monitor_id"
// FieldModel holds the string denoting the model field in the database. // FieldModel holds the string denoting the model field in the database.
...@@ -45,6 +48,7 @@ const ( ...@@ -45,6 +48,7 @@ const (
// Columns holds all SQL columns for channelmonitorhistory fields. // Columns holds all SQL columns for channelmonitorhistory fields.
var Columns = []string{ var Columns = []string{
FieldID, FieldID,
FieldDeletedAt,
FieldMonitorID, FieldMonitorID,
FieldModel, FieldModel,
FieldStatus, FieldStatus,
...@@ -64,7 +68,14 @@ func ValidColumn(column string) bool { ...@@ -64,7 +68,14 @@ func ValidColumn(column string) bool {
return false return false
} }
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
var ( var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// ModelValidator is a validator for the "model" field. It is called by the builders before save. // ModelValidator is a validator for the "model" field. It is called by the builders before save.
ModelValidator func(string) error ModelValidator func(string) error
// DefaultMessage holds the default value on creation for the "message" field. // DefaultMessage holds the default value on creation for the "message" field.
...@@ -108,6 +119,11 @@ func ByID(opts ...sql.OrderTermOption) OrderOption { ...@@ -108,6 +119,11 @@ func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc() return sql.OrderByField(FieldID, opts...).ToFunc()
} }
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByMonitorID orders the results by the monitor_id field. // ByMonitorID orders the results by the monitor_id field.
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption { func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldMonitorID, opts...).ToFunc() return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
......
...@@ -55,6 +55,11 @@ func IDLTE(id int64) predicate.ChannelMonitorHistory { ...@@ -55,6 +55,11 @@ func IDLTE(id int64) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldID, id)) return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldID, id))
} }
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldDeletedAt, v))
}
// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ. // MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ.
func MonitorID(v int64) predicate.ChannelMonitorHistory { func MonitorID(v int64) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v)) return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v))
...@@ -85,6 +90,56 @@ func CheckedAt(v time.Time) predicate.ChannelMonitorHistory { ...@@ -85,6 +90,56 @@ func CheckedAt(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v)) return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v))
} }
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldDeletedAt))
}
// MonitorIDEQ applies the EQ predicate on the "monitor_id" field. // MonitorIDEQ applies the EQ predicate on the "monitor_id" field.
func MonitorIDEQ(v int64) predicate.ChannelMonitorHistory { func MonitorIDEQ(v int64) predicate.ChannelMonitorHistory {
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v)) return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v))
......
...@@ -23,6 +23,20 @@ type ChannelMonitorHistoryCreate struct { ...@@ -23,6 +23,20 @@ type ChannelMonitorHistoryCreate struct {
conflict []sql.ConflictOption conflict []sql.ConflictOption
} }
// SetDeletedAt sets the "deleted_at" field.
func (_c *ChannelMonitorHistoryCreate) SetDeletedAt(v time.Time) *ChannelMonitorHistoryCreate {
_c.mutation.SetDeletedAt(v)
return _c
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (_c *ChannelMonitorHistoryCreate) SetNillableDeletedAt(v *time.Time) *ChannelMonitorHistoryCreate {
if v != nil {
_c.SetDeletedAt(*v)
}
return _c
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (_c *ChannelMonitorHistoryCreate) SetMonitorID(v int64) *ChannelMonitorHistoryCreate { func (_c *ChannelMonitorHistoryCreate) SetMonitorID(v int64) *ChannelMonitorHistoryCreate {
_c.mutation.SetMonitorID(v) _c.mutation.SetMonitorID(v)
...@@ -109,7 +123,9 @@ func (_c *ChannelMonitorHistoryCreate) Mutation() *ChannelMonitorHistoryMutation ...@@ -109,7 +123,9 @@ func (_c *ChannelMonitorHistoryCreate) Mutation() *ChannelMonitorHistoryMutation
// Save creates the ChannelMonitorHistory in the database. // Save creates the ChannelMonitorHistory in the database.
func (_c *ChannelMonitorHistoryCreate) Save(ctx context.Context) (*ChannelMonitorHistory, error) { func (_c *ChannelMonitorHistoryCreate) Save(ctx context.Context) (*ChannelMonitorHistory, error) {
_c.defaults() if err := _c.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
} }
...@@ -136,15 +152,19 @@ func (_c *ChannelMonitorHistoryCreate) ExecX(ctx context.Context) { ...@@ -136,15 +152,19 @@ func (_c *ChannelMonitorHistoryCreate) ExecX(ctx context.Context) {
} }
// defaults sets the default values of the builder before save. // defaults sets the default values of the builder before save.
func (_c *ChannelMonitorHistoryCreate) defaults() { func (_c *ChannelMonitorHistoryCreate) defaults() error {
if _, ok := _c.mutation.Message(); !ok { if _, ok := _c.mutation.Message(); !ok {
v := channelmonitorhistory.DefaultMessage v := channelmonitorhistory.DefaultMessage
_c.mutation.SetMessage(v) _c.mutation.SetMessage(v)
} }
if _, ok := _c.mutation.CheckedAt(); !ok { if _, ok := _c.mutation.CheckedAt(); !ok {
if channelmonitorhistory.DefaultCheckedAt == nil {
return fmt.Errorf("ent: uninitialized channelmonitorhistory.DefaultCheckedAt (forgotten import ent/runtime?)")
}
v := channelmonitorhistory.DefaultCheckedAt() v := channelmonitorhistory.DefaultCheckedAt()
_c.mutation.SetCheckedAt(v) _c.mutation.SetCheckedAt(v)
} }
return nil
} }
// check runs all checks and user-defined validators on the builder. // check runs all checks and user-defined validators on the builder.
...@@ -206,6 +226,10 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq ...@@ -206,6 +226,10 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq
_spec = sqlgraph.NewCreateSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) _spec = sqlgraph.NewCreateSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
) )
_spec.OnConflict = _c.conflict _spec.OnConflict = _c.conflict
if value, ok := _c.mutation.DeletedAt(); ok {
_spec.SetField(channelmonitorhistory.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := _c.mutation.Model(); ok { if value, ok := _c.mutation.Model(); ok {
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
_node.Model = value _node.Model = value
...@@ -254,7 +278,7 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq ...@@ -254,7 +278,7 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq
// of the `INSERT` statement. For example: // of the `INSERT` statement. For example:
// //
// client.ChannelMonitorHistory.Create(). // client.ChannelMonitorHistory.Create().
// SetMonitorID(v). // SetDeletedAt(v).
// OnConflict( // OnConflict(
// // Update the row with the new values // // Update the row with the new values
// // the was proposed for insertion. // // the was proposed for insertion.
...@@ -263,7 +287,7 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq ...@@ -263,7 +287,7 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq
// // Override some of the fields with custom // // Override some of the fields with custom
// // update values. // // update values.
// Update(func(u *ent.ChannelMonitorHistoryUpsert) { // Update(func(u *ent.ChannelMonitorHistoryUpsert) {
// SetMonitorID(v+v). // SetDeletedAt(v+v).
// }). // }).
// Exec(ctx) // Exec(ctx)
func (_c *ChannelMonitorHistoryCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertOne { func (_c *ChannelMonitorHistoryCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertOne {
...@@ -299,6 +323,24 @@ type ( ...@@ -299,6 +323,24 @@ type (
} }
) )
// SetDeletedAt sets the "deleted_at" field.
func (u *ChannelMonitorHistoryUpsert) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpsert {
u.Set(channelmonitorhistory.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *ChannelMonitorHistoryUpsert) UpdateDeletedAt() *ChannelMonitorHistoryUpsert {
u.SetExcluded(channelmonitorhistory.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *ChannelMonitorHistoryUpsert) ClearDeletedAt() *ChannelMonitorHistoryUpsert {
u.SetNull(channelmonitorhistory.FieldDeletedAt)
return u
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (u *ChannelMonitorHistoryUpsert) SetMonitorID(v int64) *ChannelMonitorHistoryUpsert { func (u *ChannelMonitorHistoryUpsert) SetMonitorID(v int64) *ChannelMonitorHistoryUpsert {
u.Set(channelmonitorhistory.FieldMonitorID, v) u.Set(channelmonitorhistory.FieldMonitorID, v)
...@@ -453,6 +495,27 @@ func (u *ChannelMonitorHistoryUpsertOne) Update(set func(*ChannelMonitorHistoryU ...@@ -453,6 +495,27 @@ func (u *ChannelMonitorHistoryUpsertOne) Update(set func(*ChannelMonitorHistoryU
return u return u
} }
// SetDeletedAt sets the "deleted_at" field.
func (u *ChannelMonitorHistoryUpsertOne) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpsertOne {
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *ChannelMonitorHistoryUpsertOne) UpdateDeletedAt() *ChannelMonitorHistoryUpsertOne {
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *ChannelMonitorHistoryUpsertOne) ClearDeletedAt() *ChannelMonitorHistoryUpsertOne {
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
s.ClearDeletedAt()
})
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (u *ChannelMonitorHistoryUpsertOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertOne { func (u *ChannelMonitorHistoryUpsertOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertOne {
return u.Update(func(s *ChannelMonitorHistoryUpsert) { return u.Update(func(s *ChannelMonitorHistoryUpsert) {
...@@ -721,7 +784,7 @@ func (_c *ChannelMonitorHistoryCreateBulk) ExecX(ctx context.Context) { ...@@ -721,7 +784,7 @@ func (_c *ChannelMonitorHistoryCreateBulk) ExecX(ctx context.Context) {
// // Override some of the fields with custom // // Override some of the fields with custom
// // update values. // // update values.
// Update(func(u *ent.ChannelMonitorHistoryUpsert) { // Update(func(u *ent.ChannelMonitorHistoryUpsert) {
// SetMonitorID(v+v). // SetDeletedAt(v+v).
// }). // }).
// Exec(ctx) // Exec(ctx)
func (_c *ChannelMonitorHistoryCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertBulk { func (_c *ChannelMonitorHistoryCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertBulk {
...@@ -790,6 +853,27 @@ func (u *ChannelMonitorHistoryUpsertBulk) Update(set func(*ChannelMonitorHistory ...@@ -790,6 +853,27 @@ func (u *ChannelMonitorHistoryUpsertBulk) Update(set func(*ChannelMonitorHistory
return u return u
} }
// SetDeletedAt sets the "deleted_at" field.
func (u *ChannelMonitorHistoryUpsertBulk) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpsertBulk {
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *ChannelMonitorHistoryUpsertBulk) UpdateDeletedAt() *ChannelMonitorHistoryUpsertBulk {
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *ChannelMonitorHistoryUpsertBulk) ClearDeletedAt() *ChannelMonitorHistoryUpsertBulk {
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
s.ClearDeletedAt()
})
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (u *ChannelMonitorHistoryUpsertBulk) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertBulk { func (u *ChannelMonitorHistoryUpsertBulk) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertBulk {
return u.Update(func(s *ChannelMonitorHistoryUpsert) { return u.Update(func(s *ChannelMonitorHistoryUpsert) {
......
...@@ -300,12 +300,12 @@ func (_q *ChannelMonitorHistoryQuery) WithMonitor(opts ...func(*ChannelMonitorQu ...@@ -300,12 +300,12 @@ func (_q *ChannelMonitorHistoryQuery) WithMonitor(opts ...func(*ChannelMonitorQu
// Example: // Example:
// //
// var v []struct { // var v []struct {
// MonitorID int64 `json:"monitor_id,omitempty"` // DeletedAt time.Time `json:"deleted_at,omitempty"`
// Count int `json:"count,omitempty"` // Count int `json:"count,omitempty"`
// } // }
// //
// client.ChannelMonitorHistory.Query(). // client.ChannelMonitorHistory.Query().
// GroupBy(channelmonitorhistory.FieldMonitorID). // GroupBy(channelmonitorhistory.FieldDeletedAt).
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *ChannelMonitorHistoryGroupBy { func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *ChannelMonitorHistoryGroupBy {
...@@ -323,11 +323,11 @@ func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *C ...@@ -323,11 +323,11 @@ func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *C
// Example: // Example:
// //
// var v []struct { // var v []struct {
// MonitorID int64 `json:"monitor_id,omitempty"` // DeletedAt time.Time `json:"deleted_at,omitempty"`
// } // }
// //
// client.ChannelMonitorHistory.Query(). // client.ChannelMonitorHistory.Query().
// Select(channelmonitorhistory.FieldMonitorID). // Select(channelmonitorhistory.FieldDeletedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (_q *ChannelMonitorHistoryQuery) Select(fields ...string) *ChannelMonitorHistorySelect { func (_q *ChannelMonitorHistoryQuery) Select(fields ...string) *ChannelMonitorHistorySelect {
_q.ctx.Fields = append(_q.ctx.Fields, fields...) _q.ctx.Fields = append(_q.ctx.Fields, fields...)
......
...@@ -29,6 +29,26 @@ func (_u *ChannelMonitorHistoryUpdate) Where(ps ...predicate.ChannelMonitorHisto ...@@ -29,6 +29,26 @@ func (_u *ChannelMonitorHistoryUpdate) Where(ps ...predicate.ChannelMonitorHisto
return _u return _u
} }
// SetDeletedAt sets the "deleted_at" field.
func (_u *ChannelMonitorHistoryUpdate) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpdate {
_u.mutation.SetDeletedAt(v)
return _u
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (_u *ChannelMonitorHistoryUpdate) SetNillableDeletedAt(v *time.Time) *ChannelMonitorHistoryUpdate {
if v != nil {
_u.SetDeletedAt(*v)
}
return _u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (_u *ChannelMonitorHistoryUpdate) ClearDeletedAt() *ChannelMonitorHistoryUpdate {
_u.mutation.ClearDeletedAt()
return _u
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (_u *ChannelMonitorHistoryUpdate) SetMonitorID(v int64) *ChannelMonitorHistoryUpdate { func (_u *ChannelMonitorHistoryUpdate) SetMonitorID(v int64) *ChannelMonitorHistoryUpdate {
_u.mutation.SetMonitorID(v) _u.mutation.SetMonitorID(v)
...@@ -237,6 +257,12 @@ func (_u *ChannelMonitorHistoryUpdate) sqlSave(ctx context.Context) (_node int, ...@@ -237,6 +257,12 @@ func (_u *ChannelMonitorHistoryUpdate) sqlSave(ctx context.Context) (_node int,
} }
} }
} }
if value, ok := _u.mutation.DeletedAt(); ok {
_spec.SetField(channelmonitorhistory.FieldDeletedAt, field.TypeTime, value)
}
if _u.mutation.DeletedAtCleared() {
_spec.ClearField(channelmonitorhistory.FieldDeletedAt, field.TypeTime)
}
if value, ok := _u.mutation.Model(); ok { if value, ok := _u.mutation.Model(); ok {
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
} }
...@@ -319,6 +345,26 @@ type ChannelMonitorHistoryUpdateOne struct { ...@@ -319,6 +345,26 @@ type ChannelMonitorHistoryUpdateOne struct {
mutation *ChannelMonitorHistoryMutation mutation *ChannelMonitorHistoryMutation
} }
// SetDeletedAt sets the "deleted_at" field.
func (_u *ChannelMonitorHistoryUpdateOne) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpdateOne {
_u.mutation.SetDeletedAt(v)
return _u
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableDeletedAt(v *time.Time) *ChannelMonitorHistoryUpdateOne {
if v != nil {
_u.SetDeletedAt(*v)
}
return _u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (_u *ChannelMonitorHistoryUpdateOne) ClearDeletedAt() *ChannelMonitorHistoryUpdateOne {
_u.mutation.ClearDeletedAt()
return _u
}
// SetMonitorID sets the "monitor_id" field. // SetMonitorID sets the "monitor_id" field.
func (_u *ChannelMonitorHistoryUpdateOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpdateOne { func (_u *ChannelMonitorHistoryUpdateOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpdateOne {
_u.mutation.SetMonitorID(v) _u.mutation.SetMonitorID(v)
...@@ -557,6 +603,12 @@ func (_u *ChannelMonitorHistoryUpdateOne) sqlSave(ctx context.Context) (_node *C ...@@ -557,6 +603,12 @@ func (_u *ChannelMonitorHistoryUpdateOne) sqlSave(ctx context.Context) (_node *C
} }
} }
} }
if value, ok := _u.mutation.DeletedAt(); ok {
_spec.SetField(channelmonitorhistory.FieldDeletedAt, field.TypeTime, value)
}
if _u.mutation.DeletedAtCleared() {
_spec.ClearField(channelmonitorhistory.FieldDeletedAt, field.TypeTime)
}
if value, ok := _u.mutation.Model(); ok { if value, ok := _u.mutation.Model(); ok {
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment