storj/satellite/satellitedb/dbx/accounting.dbx
Yaroslav Vorobiov 5644fb1a7e satellite/accounting/nodetally: add ranged loop
Add node tally ranged loop observer and partial.
Add node tally randed observer to range loop peer.
Add config flag to select which loop to use for node tally.
Update satellite core to use segement/ranged loop based on a flag.
Duplicate existing node tally test but using ranged loop.

Change-Id: I6786f1a16933463fab5f79601bf438203a7a5f9e
2023-01-17 13:50:18 +01:00

252 lines
6.3 KiB
Plaintext

//--- accounting ---//
// accounting_timestamps just allows us to save the last time/thing that happened
model accounting_timestamps (
key name
field name text
field value timestamp ( updatable )
)
create accounting_timestamps ( noreturn, replace )
update accounting_timestamps (
where accounting_timestamps.name = ?
noreturn
)
read scalar (
select accounting_timestamps.value
where accounting_timestamps.name = ?
)
model accounting_rollup (
key node_id start_time
index ( fields start_time )
field node_id blob
field start_time timestamp
field put_total int64
field get_total int64
field get_audit_total int64
field get_repair_total int64
field put_repair_total int64
field at_rest_total float64
field interval_end_time timestamp ( updatable, nullable )
)
// --- bucket accounting tables --- //
model bucket_bandwidth_rollup (
key bucket_name project_id interval_start action
index (
name bucket_bandwidth_rollups_project_id_action_interval_index
fields project_id action interval_start
)
index (
name bucket_bandwidth_rollups_action_interval_project_id_index
fields action interval_start project_id
)
field bucket_name blob
field project_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field inline uint64 ( updatable )
field allocated uint64 ( updatable )
field settled uint64 ( updatable )
)
read paged (
select bucket_bandwidth_rollup
where bucket_bandwidth_rollup.interval_start >= ?
)
model bucket_bandwidth_rollup_archive (
key bucket_name project_id interval_start action
index (
name bucket_bandwidth_rollups_archive_project_id_action_interval_index
fields project_id action interval_start
)
index (
name bucket_bandwidth_rollups_archive_action_interval_project_id_index
fields action interval_start project_id
)
field bucket_name blob
field project_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field inline uint64 ( updatable )
field allocated uint64 ( updatable )
field settled uint64 ( updatable )
)
read paged (
select bucket_bandwidth_rollup_archive
where bucket_bandwidth_rollup_archive.interval_start >= ?
)
model project_bandwidth_daily_rollup (
key project_id interval_day
field project_id blob
field interval_day date
field egress_allocated uint64 ( updatable )
field egress_settled uint64 ( updatable )
field egress_dead uint64 ( updatable, default 0 )
)
model bucket_storage_tally (
key bucket_name project_id interval_start
index (
name bucket_storage_tallies_project_id_interval_start_index
fields project_id interval_start
)
field bucket_name blob
field project_id blob
field interval_start timestamp
field total_bytes uint64 ( default 0)
field inline uint64
field remote uint64
field total_segments_count uint ( default 0)
field remote_segments_count uint
field inline_segments_count uint
field object_count uint
field metadata_size uint64
)
read all (
select bucket_storage_tally
orderby desc bucket_storage_tally.interval_start
)
read all (
select bucket_storage_tally
where bucket_storage_tally.project_id = ?
where bucket_storage_tally.bucket_name = ?
where bucket_storage_tally.interval_start >= ?
where bucket_storage_tally.interval_start <= ?
orderby desc bucket_storage_tally.interval_start
)
// --- storage node accounting tables --- //
model storagenode_bandwidth_rollup (
key storagenode_id interval_start action
index (
name storagenode_bandwidth_rollups_interval_start_index
fields interval_start
)
field storagenode_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field allocated uint64 ( updatable, nullable, default 0 )
field settled uint64 ( updatable )
)
create storagenode_bandwidth_rollup()
read all (
select storagenode_bandwidth_rollup
where storagenode_bandwidth_rollup.storagenode_id = ?
where storagenode_bandwidth_rollup.interval_start = ?
)
read paged (
select storagenode_bandwidth_rollup
where storagenode_bandwidth_rollup.interval_start >= ?
)
read paged (
select storagenode_bandwidth_rollup
where storagenode_bandwidth_rollup.storagenode_id = ?
where storagenode_bandwidth_rollup.interval_start >= ?
)
model storagenode_bandwidth_rollup_archive (
key storagenode_id interval_start action
index (
name storagenode_bandwidth_rollup_archives_interval_start_index
fields interval_start
)
field storagenode_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field allocated uint64 ( updatable, nullable, default 0 )
field settled uint64 ( updatable )
)
read paged (
select storagenode_bandwidth_rollup_archive
where storagenode_bandwidth_rollup_archive.interval_start >= ?
)
///////////////////////////////////////
// orders phase2->phase3 rollout table
///////////////////////////////////////
model storagenode_bandwidth_rollup_phase2 (
table storagenode_bandwidth_rollups_phase2 // make the pluralization consistent
key storagenode_id interval_start action
field storagenode_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field allocated uint64 ( updatable, nullable, default 0 )
field settled uint64 ( updatable )
)
read paged (
select storagenode_bandwidth_rollup_phase2
where storagenode_bandwidth_rollup_phase2.storagenode_id = ?
where storagenode_bandwidth_rollup_phase2.interval_start >= ?
)
model storagenode_storage_tally (
// this primary key will enforce uniqueness on interval_end_time,node_id
// and also creates an index on interval_end_time implicitly.
// the interval_end_time will be the same value for many rows so
// we put that first so we can use cockroachdb prefix compression.
// node_id is also used many times but interval_end_time is more
// repetative and will benefit greater.
key interval_end_time node_id
index ( fields node_id )
field node_id blob
field interval_end_time timestamp
field data_total float64
)
read all (
select storagenode_storage_tally
)
read all (
select storagenode_storage_tally
where storagenode_storage_tally.interval_end_time >= ?
)