satellite/satellitedb/dbx: split accounting.dbx by project and node
Change-Id: Ia3dee52abf98a41ea62d94af0a155f4f09661224
This commit is contained in:
parent
8842985571
commit
4e70ef83fb
@ -1,5 +1,3 @@
|
|||||||
//--- accounting ---//
|
|
||||||
|
|
||||||
// accounting_timestamps just allows us to save the last time/thing that happened
|
// accounting_timestamps just allows us to save the last time/thing that happened
|
||||||
model accounting_timestamps (
|
model accounting_timestamps (
|
||||||
key name
|
key name
|
||||||
@ -18,238 +16,3 @@ read scalar (
|
|||||||
select accounting_timestamps.value
|
select accounting_timestamps.value
|
||||||
where accounting_timestamps.name = ?
|
where accounting_timestamps.name = ?
|
||||||
)
|
)
|
||||||
|
|
||||||
model accounting_rollup (
|
|
||||||
key node_id start_time
|
|
||||||
index ( fields start_time )
|
|
||||||
|
|
||||||
field node_id blob
|
|
||||||
field start_time timestamp
|
|
||||||
field put_total int64
|
|
||||||
field get_total int64
|
|
||||||
field get_audit_total int64
|
|
||||||
field get_repair_total int64
|
|
||||||
field put_repair_total int64
|
|
||||||
field at_rest_total float64
|
|
||||||
field interval_end_time timestamp ( updatable, nullable )
|
|
||||||
)
|
|
||||||
|
|
||||||
// --- bucket accounting tables --- //
|
|
||||||
|
|
||||||
model bucket_bandwidth_rollup (
|
|
||||||
key bucket_name project_id interval_start action
|
|
||||||
index (
|
|
||||||
name bucket_bandwidth_rollups_project_id_action_interval_index
|
|
||||||
fields project_id action interval_start
|
|
||||||
)
|
|
||||||
index (
|
|
||||||
name bucket_bandwidth_rollups_action_interval_project_id_index
|
|
||||||
fields action interval_start project_id
|
|
||||||
)
|
|
||||||
|
|
||||||
field bucket_name blob
|
|
||||||
field project_id blob
|
|
||||||
|
|
||||||
field interval_start timestamp
|
|
||||||
field interval_seconds uint
|
|
||||||
field action uint
|
|
||||||
|
|
||||||
field inline uint64 ( updatable )
|
|
||||||
field allocated uint64 ( updatable )
|
|
||||||
field settled uint64 ( updatable )
|
|
||||||
)
|
|
||||||
|
|
||||||
read paged (
|
|
||||||
select bucket_bandwidth_rollup
|
|
||||||
where bucket_bandwidth_rollup.interval_start >= ?
|
|
||||||
)
|
|
||||||
|
|
||||||
model bucket_bandwidth_rollup_archive (
|
|
||||||
key bucket_name project_id interval_start action
|
|
||||||
index (
|
|
||||||
name bucket_bandwidth_rollups_archive_project_id_action_interval_index
|
|
||||||
fields project_id action interval_start
|
|
||||||
)
|
|
||||||
index (
|
|
||||||
name bucket_bandwidth_rollups_archive_action_interval_project_id_index
|
|
||||||
fields action interval_start project_id
|
|
||||||
)
|
|
||||||
|
|
||||||
field bucket_name blob
|
|
||||||
field project_id blob
|
|
||||||
|
|
||||||
field interval_start timestamp
|
|
||||||
field interval_seconds uint
|
|
||||||
field action uint
|
|
||||||
|
|
||||||
field inline uint64 ( updatable )
|
|
||||||
field allocated uint64 ( updatable )
|
|
||||||
field settled uint64 ( updatable )
|
|
||||||
)
|
|
||||||
|
|
||||||
read paged (
|
|
||||||
select bucket_bandwidth_rollup_archive
|
|
||||||
where bucket_bandwidth_rollup_archive.interval_start >= ?
|
|
||||||
)
|
|
||||||
|
|
||||||
model project_bandwidth_daily_rollup (
|
|
||||||
key project_id interval_day
|
|
||||||
index (
|
|
||||||
name project_bandwidth_daily_rollup_interval_day_index
|
|
||||||
fields interval_day
|
|
||||||
)
|
|
||||||
|
|
||||||
field project_id blob
|
|
||||||
field interval_day date
|
|
||||||
field egress_allocated uint64 ( updatable )
|
|
||||||
field egress_settled uint64 ( updatable )
|
|
||||||
field egress_dead uint64 ( updatable, default 0 )
|
|
||||||
)
|
|
||||||
|
|
||||||
model bucket_storage_tally (
|
|
||||||
key bucket_name project_id interval_start
|
|
||||||
|
|
||||||
index (
|
|
||||||
name bucket_storage_tallies_project_id_interval_start_index
|
|
||||||
fields project_id interval_start
|
|
||||||
)
|
|
||||||
|
|
||||||
field bucket_name blob
|
|
||||||
field project_id blob
|
|
||||||
|
|
||||||
field interval_start timestamp
|
|
||||||
|
|
||||||
field total_bytes uint64 ( default 0)
|
|
||||||
field inline uint64
|
|
||||||
field remote uint64
|
|
||||||
|
|
||||||
field total_segments_count uint ( default 0)
|
|
||||||
field remote_segments_count uint
|
|
||||||
field inline_segments_count uint
|
|
||||||
field object_count uint
|
|
||||||
|
|
||||||
field metadata_size uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
read all (
|
|
||||||
select bucket_storage_tally
|
|
||||||
orderby desc bucket_storage_tally.interval_start
|
|
||||||
)
|
|
||||||
|
|
||||||
read all (
|
|
||||||
select bucket_storage_tally
|
|
||||||
where bucket_storage_tally.project_id = ?
|
|
||||||
where bucket_storage_tally.bucket_name = ?
|
|
||||||
where bucket_storage_tally.interval_start >= ?
|
|
||||||
where bucket_storage_tally.interval_start <= ?
|
|
||||||
orderby desc bucket_storage_tally.interval_start
|
|
||||||
)
|
|
||||||
|
|
||||||
// --- storage node accounting tables --- //
|
|
||||||
|
|
||||||
model storagenode_bandwidth_rollup (
|
|
||||||
key storagenode_id interval_start action
|
|
||||||
|
|
||||||
index (
|
|
||||||
name storagenode_bandwidth_rollups_interval_start_index
|
|
||||||
fields interval_start
|
|
||||||
)
|
|
||||||
|
|
||||||
field storagenode_id blob
|
|
||||||
field interval_start timestamp
|
|
||||||
field interval_seconds uint
|
|
||||||
field action uint
|
|
||||||
|
|
||||||
field allocated uint64 ( updatable, nullable, default 0 )
|
|
||||||
field settled uint64 ( updatable )
|
|
||||||
)
|
|
||||||
|
|
||||||
create storagenode_bandwidth_rollup()
|
|
||||||
|
|
||||||
read all (
|
|
||||||
select storagenode_bandwidth_rollup
|
|
||||||
where storagenode_bandwidth_rollup.storagenode_id = ?
|
|
||||||
where storagenode_bandwidth_rollup.interval_start = ?
|
|
||||||
)
|
|
||||||
|
|
||||||
read paged (
|
|
||||||
select storagenode_bandwidth_rollup
|
|
||||||
where storagenode_bandwidth_rollup.interval_start >= ?
|
|
||||||
)
|
|
||||||
|
|
||||||
read paged (
|
|
||||||
select storagenode_bandwidth_rollup
|
|
||||||
where storagenode_bandwidth_rollup.storagenode_id = ?
|
|
||||||
where storagenode_bandwidth_rollup.interval_start >= ?
|
|
||||||
)
|
|
||||||
|
|
||||||
model storagenode_bandwidth_rollup_archive (
|
|
||||||
key storagenode_id interval_start action
|
|
||||||
|
|
||||||
index (
|
|
||||||
name storagenode_bandwidth_rollup_archives_interval_start_index
|
|
||||||
fields interval_start
|
|
||||||
)
|
|
||||||
|
|
||||||
field storagenode_id blob
|
|
||||||
field interval_start timestamp
|
|
||||||
field interval_seconds uint
|
|
||||||
field action uint
|
|
||||||
|
|
||||||
field allocated uint64 ( updatable, nullable, default 0 )
|
|
||||||
field settled uint64 ( updatable )
|
|
||||||
)
|
|
||||||
|
|
||||||
read paged (
|
|
||||||
select storagenode_bandwidth_rollup_archive
|
|
||||||
where storagenode_bandwidth_rollup_archive.interval_start >= ?
|
|
||||||
)
|
|
||||||
|
|
||||||
///////////////////////////////////////
|
|
||||||
// orders phase2->phase3 rollout table
|
|
||||||
///////////////////////////////////////
|
|
||||||
|
|
||||||
model storagenode_bandwidth_rollup_phase2 (
|
|
||||||
table storagenode_bandwidth_rollups_phase2 // make the pluralization consistent
|
|
||||||
|
|
||||||
key storagenode_id interval_start action
|
|
||||||
|
|
||||||
field storagenode_id blob
|
|
||||||
field interval_start timestamp
|
|
||||||
field interval_seconds uint
|
|
||||||
field action uint
|
|
||||||
|
|
||||||
field allocated uint64 ( updatable, nullable, default 0 )
|
|
||||||
field settled uint64 ( updatable )
|
|
||||||
)
|
|
||||||
|
|
||||||
read paged (
|
|
||||||
select storagenode_bandwidth_rollup_phase2
|
|
||||||
where storagenode_bandwidth_rollup_phase2.storagenode_id = ?
|
|
||||||
where storagenode_bandwidth_rollup_phase2.interval_start >= ?
|
|
||||||
)
|
|
||||||
|
|
||||||
model storagenode_storage_tally (
|
|
||||||
// this primary key will enforce uniqueness on interval_end_time,node_id
|
|
||||||
// and also creates an index on interval_end_time implicitly.
|
|
||||||
// the interval_end_time will be the same value for many rows so
|
|
||||||
// we put that first so we can use cockroachdb prefix compression.
|
|
||||||
// node_id is also used many times but interval_end_time is more
|
|
||||||
// repetative and will benefit greater.
|
|
||||||
key interval_end_time node_id
|
|
||||||
|
|
||||||
index ( fields node_id )
|
|
||||||
|
|
||||||
field node_id blob
|
|
||||||
field interval_end_time timestamp
|
|
||||||
field data_total float64
|
|
||||||
)
|
|
||||||
|
|
||||||
read all (
|
|
||||||
select storagenode_storage_tally
|
|
||||||
)
|
|
||||||
|
|
||||||
read all (
|
|
||||||
select storagenode_storage_tally
|
|
||||||
where storagenode_storage_tally.interval_end_time >= ?
|
|
||||||
)
|
|
||||||
|
123
satellite/satellitedb/dbx/accounting_node.dbx
Normal file
123
satellite/satellitedb/dbx/accounting_node.dbx
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
model accounting_rollup (
|
||||||
|
key node_id start_time
|
||||||
|
index ( fields start_time )
|
||||||
|
|
||||||
|
field node_id blob
|
||||||
|
field start_time timestamp
|
||||||
|
field put_total int64
|
||||||
|
field get_total int64
|
||||||
|
field get_audit_total int64
|
||||||
|
field get_repair_total int64
|
||||||
|
field put_repair_total int64
|
||||||
|
field at_rest_total float64
|
||||||
|
field interval_end_time timestamp ( updatable, nullable )
|
||||||
|
)
|
||||||
|
|
||||||
|
// --- storage node accounting tables --- //
|
||||||
|
|
||||||
|
model storagenode_bandwidth_rollup (
|
||||||
|
key storagenode_id interval_start action
|
||||||
|
|
||||||
|
index (
|
||||||
|
name storagenode_bandwidth_rollups_interval_start_index
|
||||||
|
fields interval_start
|
||||||
|
)
|
||||||
|
|
||||||
|
field storagenode_id blob
|
||||||
|
field interval_start timestamp
|
||||||
|
field interval_seconds uint
|
||||||
|
field action uint
|
||||||
|
|
||||||
|
field allocated uint64 ( updatable, nullable, default 0 )
|
||||||
|
field settled uint64 ( updatable )
|
||||||
|
)
|
||||||
|
|
||||||
|
create storagenode_bandwidth_rollup()
|
||||||
|
|
||||||
|
read all (
|
||||||
|
select storagenode_bandwidth_rollup
|
||||||
|
where storagenode_bandwidth_rollup.storagenode_id = ?
|
||||||
|
where storagenode_bandwidth_rollup.interval_start = ?
|
||||||
|
)
|
||||||
|
|
||||||
|
read paged (
|
||||||
|
select storagenode_bandwidth_rollup
|
||||||
|
where storagenode_bandwidth_rollup.interval_start >= ?
|
||||||
|
)
|
||||||
|
|
||||||
|
read paged (
|
||||||
|
select storagenode_bandwidth_rollup
|
||||||
|
where storagenode_bandwidth_rollup.storagenode_id = ?
|
||||||
|
where storagenode_bandwidth_rollup.interval_start >= ?
|
||||||
|
)
|
||||||
|
|
||||||
|
model storagenode_bandwidth_rollup_archive (
|
||||||
|
key storagenode_id interval_start action
|
||||||
|
|
||||||
|
index (
|
||||||
|
name storagenode_bandwidth_rollup_archives_interval_start_index
|
||||||
|
fields interval_start
|
||||||
|
)
|
||||||
|
|
||||||
|
field storagenode_id blob
|
||||||
|
field interval_start timestamp
|
||||||
|
field interval_seconds uint
|
||||||
|
field action uint
|
||||||
|
|
||||||
|
field allocated uint64 ( updatable, nullable, default 0 )
|
||||||
|
field settled uint64 ( updatable )
|
||||||
|
)
|
||||||
|
|
||||||
|
read paged (
|
||||||
|
select storagenode_bandwidth_rollup_archive
|
||||||
|
where storagenode_bandwidth_rollup_archive.interval_start >= ?
|
||||||
|
)
|
||||||
|
|
||||||
|
///////////////////////////////////////
|
||||||
|
// orders phase2->phase3 rollout table
|
||||||
|
///////////////////////////////////////
|
||||||
|
|
||||||
|
model storagenode_bandwidth_rollup_phase2 (
|
||||||
|
table storagenode_bandwidth_rollups_phase2 // make the pluralization consistent
|
||||||
|
|
||||||
|
key storagenode_id interval_start action
|
||||||
|
|
||||||
|
field storagenode_id blob
|
||||||
|
field interval_start timestamp
|
||||||
|
field interval_seconds uint
|
||||||
|
field action uint
|
||||||
|
|
||||||
|
field allocated uint64 ( updatable, nullable, default 0 )
|
||||||
|
field settled uint64 ( updatable )
|
||||||
|
)
|
||||||
|
|
||||||
|
read paged (
|
||||||
|
select storagenode_bandwidth_rollup_phase2
|
||||||
|
where storagenode_bandwidth_rollup_phase2.storagenode_id = ?
|
||||||
|
where storagenode_bandwidth_rollup_phase2.interval_start >= ?
|
||||||
|
)
|
||||||
|
|
||||||
|
model storagenode_storage_tally (
|
||||||
|
// this primary key will enforce uniqueness on interval_end_time,node_id
|
||||||
|
// and also creates an index on interval_end_time implicitly.
|
||||||
|
// the interval_end_time will be the same value for many rows so
|
||||||
|
// we put that first so we can use cockroachdb prefix compression.
|
||||||
|
// node_id is also used many times but interval_end_time is more
|
||||||
|
// repetative and will benefit greater.
|
||||||
|
key interval_end_time node_id
|
||||||
|
|
||||||
|
index ( fields node_id )
|
||||||
|
|
||||||
|
field node_id blob
|
||||||
|
field interval_end_time timestamp
|
||||||
|
field data_total float64
|
||||||
|
)
|
||||||
|
|
||||||
|
read all (
|
||||||
|
select storagenode_storage_tally
|
||||||
|
)
|
||||||
|
|
||||||
|
read all (
|
||||||
|
select storagenode_storage_tally
|
||||||
|
where storagenode_storage_tally.interval_end_time >= ?
|
||||||
|
)
|
110
satellite/satellitedb/dbx/accounting_project.dbx
Normal file
110
satellite/satellitedb/dbx/accounting_project.dbx
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// --- bucket accounting tables --- //
|
||||||
|
|
||||||
|
model bucket_bandwidth_rollup (
|
||||||
|
key bucket_name project_id interval_start action
|
||||||
|
index (
|
||||||
|
name bucket_bandwidth_rollups_project_id_action_interval_index
|
||||||
|
fields project_id action interval_start
|
||||||
|
)
|
||||||
|
index (
|
||||||
|
name bucket_bandwidth_rollups_action_interval_project_id_index
|
||||||
|
fields action interval_start project_id
|
||||||
|
)
|
||||||
|
|
||||||
|
field bucket_name blob
|
||||||
|
field project_id blob
|
||||||
|
|
||||||
|
field interval_start timestamp
|
||||||
|
field interval_seconds uint
|
||||||
|
field action uint
|
||||||
|
|
||||||
|
field inline uint64 ( updatable )
|
||||||
|
field allocated uint64 ( updatable )
|
||||||
|
field settled uint64 ( updatable )
|
||||||
|
)
|
||||||
|
|
||||||
|
read paged (
|
||||||
|
select bucket_bandwidth_rollup
|
||||||
|
where bucket_bandwidth_rollup.interval_start >= ?
|
||||||
|
)
|
||||||
|
|
||||||
|
model bucket_bandwidth_rollup_archive (
|
||||||
|
key bucket_name project_id interval_start action
|
||||||
|
index (
|
||||||
|
name bucket_bandwidth_rollups_archive_project_id_action_interval_index
|
||||||
|
fields project_id action interval_start
|
||||||
|
)
|
||||||
|
index (
|
||||||
|
name bucket_bandwidth_rollups_archive_action_interval_project_id_index
|
||||||
|
fields action interval_start project_id
|
||||||
|
)
|
||||||
|
|
||||||
|
field bucket_name blob
|
||||||
|
field project_id blob
|
||||||
|
|
||||||
|
field interval_start timestamp
|
||||||
|
field interval_seconds uint
|
||||||
|
field action uint
|
||||||
|
|
||||||
|
field inline uint64 ( updatable )
|
||||||
|
field allocated uint64 ( updatable )
|
||||||
|
field settled uint64 ( updatable )
|
||||||
|
)
|
||||||
|
|
||||||
|
read paged (
|
||||||
|
select bucket_bandwidth_rollup_archive
|
||||||
|
where bucket_bandwidth_rollup_archive.interval_start >= ?
|
||||||
|
)
|
||||||
|
|
||||||
|
model project_bandwidth_daily_rollup (
|
||||||
|
key project_id interval_day
|
||||||
|
index (
|
||||||
|
name project_bandwidth_daily_rollup_interval_day_index
|
||||||
|
fields interval_day
|
||||||
|
)
|
||||||
|
|
||||||
|
field project_id blob
|
||||||
|
field interval_day date
|
||||||
|
field egress_allocated uint64 ( updatable )
|
||||||
|
field egress_settled uint64 ( updatable )
|
||||||
|
field egress_dead uint64 ( updatable, default 0 )
|
||||||
|
)
|
||||||
|
|
||||||
|
model bucket_storage_tally (
|
||||||
|
key bucket_name project_id interval_start
|
||||||
|
|
||||||
|
index (
|
||||||
|
name bucket_storage_tallies_project_id_interval_start_index
|
||||||
|
fields project_id interval_start
|
||||||
|
)
|
||||||
|
|
||||||
|
field bucket_name blob
|
||||||
|
field project_id blob
|
||||||
|
|
||||||
|
field interval_start timestamp
|
||||||
|
|
||||||
|
field total_bytes uint64 ( default 0)
|
||||||
|
field inline uint64
|
||||||
|
field remote uint64
|
||||||
|
|
||||||
|
field total_segments_count uint ( default 0)
|
||||||
|
field remote_segments_count uint
|
||||||
|
field inline_segments_count uint
|
||||||
|
field object_count uint
|
||||||
|
|
||||||
|
field metadata_size uint64
|
||||||
|
)
|
||||||
|
|
||||||
|
read all (
|
||||||
|
select bucket_storage_tally
|
||||||
|
orderby desc bucket_storage_tally.interval_start
|
||||||
|
)
|
||||||
|
|
||||||
|
read all (
|
||||||
|
select bucket_storage_tally
|
||||||
|
where bucket_storage_tally.project_id = ?
|
||||||
|
where bucket_storage_tally.bucket_name = ?
|
||||||
|
where bucket_storage_tally.interval_start >= ?
|
||||||
|
where bucket_storage_tally.interval_start <= ?
|
||||||
|
orderby desc bucket_storage_tally.interval_start
|
||||||
|
)
|
@ -13094,219 +13094,6 @@ func (obj *pgxImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *pgxImpl) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
||||||
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
|
||||||
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
||||||
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? AND (bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
||||||
|
|
||||||
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
||||||
|
|
||||||
var __stmt string
|
|
||||||
if start != nil && start._set {
|
|
||||||
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
} else {
|
|
||||||
__values = append(__values, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
||||||
}
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, next, err = func() (rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
var __continuation Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
||||||
__continuation._set = true
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_bandwidth_rollup := &BucketBandwidthRollup{}
|
|
||||||
err = __rows.Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_bandwidth_rollup)
|
|
||||||
next = &__continuation
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return rows, next, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, next, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxImpl) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
||||||
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
|
||||||
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
||||||
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? AND (bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
||||||
|
|
||||||
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
|
||||||
|
|
||||||
var __stmt string
|
|
||||||
if start != nil && start._set {
|
|
||||||
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
} else {
|
|
||||||
__values = append(__values, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
||||||
}
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, next, err = func() (rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
var __continuation Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
||||||
__continuation._set = true
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_bandwidth_rollup_archive := &BucketBandwidthRollupArchive{}
|
|
||||||
err = __rows.Scan(&bucket_bandwidth_rollup_archive.BucketName, &bucket_bandwidth_rollup_archive.ProjectId, &bucket_bandwidth_rollup_archive.IntervalStart, &bucket_bandwidth_rollup_archive.IntervalSeconds, &bucket_bandwidth_rollup_archive.Action, &bucket_bandwidth_rollup_archive.Inline, &bucket_bandwidth_rollup_archive.Allocated, &bucket_bandwidth_rollup_archive.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_bandwidth_rollup_archive)
|
|
||||||
next = &__continuation
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return rows, next, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, next, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxImpl) All_BucketStorageTally_OrderBy_Desc_IntervalStart(ctx context.Context) (
|
|
||||||
rows []*BucketStorageTally, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_storage_tally := &BucketStorageTally{}
|
|
||||||
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_storage_tally)
|
|
||||||
}
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
||||||
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
||||||
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
||||||
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
||||||
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
||||||
rows []*BucketStorageTally, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_storage_tally := &BucketStorageTally{}
|
|
||||||
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_storage_tally)
|
|
||||||
}
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxImpl) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
func (obj *pgxImpl) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
||||||
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
||||||
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
||||||
@ -13687,6 +13474,219 @@ func (obj *pgxImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqua
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (obj *pgxImpl) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
||||||
|
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
||||||
|
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
||||||
|
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? AND (bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
||||||
|
|
||||||
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, bucket_bandwidth_rollup_interval_start_greater_or_equal.value())
|
||||||
|
|
||||||
|
var __stmt string
|
||||||
|
if start != nil && start._set {
|
||||||
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
} else {
|
||||||
|
__values = append(__values, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
||||||
|
}
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, next, err = func() (rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
var __continuation Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
||||||
|
__continuation._set = true
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_bandwidth_rollup := &BucketBandwidthRollup{}
|
||||||
|
err = __rows.Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_bandwidth_rollup)
|
||||||
|
next = &__continuation
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows, next, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, next, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *pgxImpl) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
||||||
|
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
||||||
|
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
||||||
|
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? AND (bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
||||||
|
|
||||||
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
||||||
|
|
||||||
|
var __stmt string
|
||||||
|
if start != nil && start._set {
|
||||||
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
} else {
|
||||||
|
__values = append(__values, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
||||||
|
}
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, next, err = func() (rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
var __continuation Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
||||||
|
__continuation._set = true
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_bandwidth_rollup_archive := &BucketBandwidthRollupArchive{}
|
||||||
|
err = __rows.Scan(&bucket_bandwidth_rollup_archive.BucketName, &bucket_bandwidth_rollup_archive.ProjectId, &bucket_bandwidth_rollup_archive.IntervalStart, &bucket_bandwidth_rollup_archive.IntervalSeconds, &bucket_bandwidth_rollup_archive.Action, &bucket_bandwidth_rollup_archive.Inline, &bucket_bandwidth_rollup_archive.Allocated, &bucket_bandwidth_rollup_archive.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_bandwidth_rollup_archive)
|
||||||
|
next = &__continuation
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows, next, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, next, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *pgxImpl) All_BucketStorageTally_OrderBy_Desc_IntervalStart(ctx context.Context) (
|
||||||
|
rows []*BucketStorageTally, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies ORDER BY bucket_storage_tallies.interval_start DESC")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_storage_tally := &BucketStorageTally{}
|
||||||
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_storage_tally)
|
||||||
|
}
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *pgxImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
||||||
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
||||||
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
||||||
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
||||||
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
||||||
|
rows []*BucketStorageTally, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_storage_tally := &BucketStorageTally{}
|
||||||
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_storage_tally)
|
||||||
|
}
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (obj *pgxImpl) First_ReverificationAudits_By_NodeId_OrderBy_Asc_StreamId_Asc_Position(ctx context.Context,
|
func (obj *pgxImpl) First_ReverificationAudits_By_NodeId_OrderBy_Asc_StreamId_Asc_Position(ctx context.Context,
|
||||||
reverification_audits_node_id ReverificationAudits_NodeId_Field) (
|
reverification_audits_node_id ReverificationAudits_NodeId_Field) (
|
||||||
reverification_audits *ReverificationAudits, err error) {
|
reverification_audits *ReverificationAudits, err error) {
|
||||||
@ -20607,219 +20607,6 @@ func (obj *pgxcockroachImpl) Find_AccountingTimestamps_Value_By_Name(ctx context
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *pgxcockroachImpl) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
||||||
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
|
||||||
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
||||||
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? AND (bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
||||||
|
|
||||||
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
||||||
|
|
||||||
var __stmt string
|
|
||||||
if start != nil && start._set {
|
|
||||||
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
} else {
|
|
||||||
__values = append(__values, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
||||||
}
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, next, err = func() (rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
var __continuation Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
||||||
__continuation._set = true
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_bandwidth_rollup := &BucketBandwidthRollup{}
|
|
||||||
err = __rows.Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_bandwidth_rollup)
|
|
||||||
next = &__continuation
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return rows, next, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, next, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxcockroachImpl) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
||||||
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
|
||||||
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
||||||
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? AND (bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
||||||
|
|
||||||
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
|
||||||
|
|
||||||
var __stmt string
|
|
||||||
if start != nil && start._set {
|
|
||||||
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
} else {
|
|
||||||
__values = append(__values, limit)
|
|
||||||
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
||||||
}
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, next, err = func() (rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
var __continuation Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
||||||
__continuation._set = true
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_bandwidth_rollup_archive := &BucketBandwidthRollupArchive{}
|
|
||||||
err = __rows.Scan(&bucket_bandwidth_rollup_archive.BucketName, &bucket_bandwidth_rollup_archive.ProjectId, &bucket_bandwidth_rollup_archive.IntervalStart, &bucket_bandwidth_rollup_archive.IntervalSeconds, &bucket_bandwidth_rollup_archive.Action, &bucket_bandwidth_rollup_archive.Inline, &bucket_bandwidth_rollup_archive.Allocated, &bucket_bandwidth_rollup_archive.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_bandwidth_rollup_archive)
|
|
||||||
next = &__continuation
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return rows, next, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, next, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxcockroachImpl) All_BucketStorageTally_OrderBy_Desc_IntervalStart(ctx context.Context) (
|
|
||||||
rows []*BucketStorageTally, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_storage_tally := &BucketStorageTally{}
|
|
||||||
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_storage_tally)
|
|
||||||
}
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxcockroachImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
||||||
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
||||||
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
||||||
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
||||||
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
||||||
rows []*BucketStorageTally, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
for {
|
|
||||||
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
||||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_storage_tally := &BucketStorageTally{}
|
|
||||||
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_storage_tally)
|
|
||||||
}
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
if obj.shouldRetry(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *pgxcockroachImpl) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
func (obj *pgxcockroachImpl) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
||||||
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
||||||
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
||||||
@ -21200,6 +20987,219 @@ func (obj *pgxcockroachImpl) All_StoragenodeStorageTally_By_IntervalEndTime_Grea
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (obj *pgxcockroachImpl) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
||||||
|
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
||||||
|
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
||||||
|
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? AND (bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
||||||
|
|
||||||
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, bucket_bandwidth_rollup_interval_start_greater_or_equal.value())
|
||||||
|
|
||||||
|
var __stmt string
|
||||||
|
if start != nil && start._set {
|
||||||
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
} else {
|
||||||
|
__values = append(__values, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
||||||
|
}
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, next, err = func() (rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
var __continuation Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
||||||
|
__continuation._set = true
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_bandwidth_rollup := &BucketBandwidthRollup{}
|
||||||
|
err = __rows.Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_bandwidth_rollup)
|
||||||
|
next = &__continuation
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows, next, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, next, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *pgxcockroachImpl) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
||||||
|
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
||||||
|
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
||||||
|
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? AND (bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
||||||
|
|
||||||
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
||||||
|
|
||||||
|
var __stmt string
|
||||||
|
if start != nil && start._set {
|
||||||
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
} else {
|
||||||
|
__values = append(__values, limit)
|
||||||
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
||||||
|
}
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, next, err = func() (rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
var __continuation Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
||||||
|
__continuation._set = true
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_bandwidth_rollup_archive := &BucketBandwidthRollupArchive{}
|
||||||
|
err = __rows.Scan(&bucket_bandwidth_rollup_archive.BucketName, &bucket_bandwidth_rollup_archive.ProjectId, &bucket_bandwidth_rollup_archive.IntervalStart, &bucket_bandwidth_rollup_archive.IntervalSeconds, &bucket_bandwidth_rollup_archive.Action, &bucket_bandwidth_rollup_archive.Inline, &bucket_bandwidth_rollup_archive.Allocated, &bucket_bandwidth_rollup_archive.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_bandwidth_rollup_archive)
|
||||||
|
next = &__continuation
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows, next, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, next, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *pgxcockroachImpl) All_BucketStorageTally_OrderBy_Desc_IntervalStart(ctx context.Context) (
|
||||||
|
rows []*BucketStorageTally, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies ORDER BY bucket_storage_tallies.interval_start DESC")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_storage_tally := &BucketStorageTally{}
|
||||||
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_storage_tally)
|
||||||
|
}
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *pgxcockroachImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
||||||
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
||||||
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
||||||
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
||||||
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
||||||
|
rows []*BucketStorageTally, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.total_bytes, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.total_segments_count, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
||||||
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
bucket_storage_tally := &BucketStorageTally{}
|
||||||
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.TotalBytes, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.TotalSegmentsCount, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, bucket_storage_tally)
|
||||||
|
}
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if obj.shouldRetry(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (obj *pgxcockroachImpl) First_ReverificationAudits_By_NodeId_OrderBy_Asc_StreamId_Asc_Position(ctx context.Context,
|
func (obj *pgxcockroachImpl) First_ReverificationAudits_By_NodeId_OrderBy_Asc_StreamId_Asc_Position(ctx context.Context,
|
||||||
reverification_audits_node_id ReverificationAudits_NodeId_Field) (
|
reverification_audits_node_id ReverificationAudits_NodeId_Field) (
|
||||||
reverification_audits *ReverificationAudits, err error) {
|
reverification_audits *ReverificationAudits, err error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user