storj/satellite/satellitedb/dbx/satellitedb.dbx

1306 lines
34 KiB
Plaintext
Raw Normal View History

// dbx.v1 golang satellitedb.dbx .
//--- Value Attribution ---//
model value_attribution (
key project_id bucket_name
field project_id blob
field bucket_name blob
field partner_id blob
field user_agent blob ( nullable )
field last_updated timestamp ( autoinsert, autoupdate )
)
create value_attribution ()
read one (
select value_attribution
where value_attribution.project_id = ?
2019-06-19 13:50:50 +01:00
where value_attribution.bucket_name = ?
)
//--- containment ---//
model segment_pending_audits (
key node_id
field node_id blob
field stream_id blob
field position uint64
field piece_id blob
field stripe_index int64
field share_size int64
field expected_share_hash blob
field reverify_count int64 ( updatable )
)
delete segment_pending_audits ( where segment_pending_audits.node_id = ? )
read one (
select segment_pending_audits
where segment_pending_audits.node_id = ?
)
//--- accounting ---//
// accounting_timestamps just allows us to save the last time/thing that happened
model accounting_timestamps (
key name
field name text
field value timestamp ( updatable )
)
create accounting_timestamps ( noreturn )
update accounting_timestamps (
where accounting_timestamps.name = ?
noreturn
)
read scalar (
select accounting_timestamps.value
where accounting_timestamps.name = ?
)
model accounting_rollup (
key node_id start_time
index ( fields start_time )
field node_id blob
field start_time timestamp
field put_total int64
field get_total int64
field get_audit_total int64
Add Version Information into KAD Network and SatelliteDB & Change Selection Process (#1648) * Initial Webserver Draft for Version Controlling * Rename type to avoid confusion * Move Function Calls into Version Package * Fix Linting and Language Typos * Fix Linting and Spelling Mistakes * Include Copyright * Include Copyright * Adjust Version-Control Server to return list of Versions * Linting * Improve Request Handling and Readability * Add Configuration File Option Add Systemd Service file * Add Logging to File * Smaller Changes * Add Semantic Versioning and refuses outdated Software from Startup (#1612) * implements internal Semantic Version library * adds version logging + reporting to process * Advance SemVer struct for easier handling * Add Accepted Version Store * Fix Function * Restructure * Type Conversion * Handle Version String properly * Add Note about array index * Set temporary Default Version * Add Copyright * Adding Version to Dashboard * Adding Version Info Log * Renaming and adding CheckerProcess * Iteration Sync * Iteration V2 * linting * made LogAndReportVersion a go routine * Refactor to Go Routine * Add Context to Go Routine and allow Operation if Lookup to Control Server fails * Handle Unmarshal properly * Linting * Relocate Version Checks * Relocating Version Check and specified default Version for now * Linting Error Prevention * Refuse Startup on outdated Version * Add Startup Check Function * Straighten Logging * Dont force Shutdown if --dev flag is set * Create full Service/Peer Structure for ControlServer * Linting * Straighting Naming * Finish VersionControl Service Layout * Improve Error Handling * Change Listening Address * Move Checker Function * Remove VersionControl Peer * Linting * Linting * Create VersionClient Service * Renaming * Add Version Client to Peer Definitions * Linting and Renaming * Linting * Remove Transport Checks for now * Move to Client Side Flag * Remove check * Linting * Transport Client Version Intro * Adding Version Client to Transport Client * Add missing parameter * Adding Version Check, to set Allowed = true * Set Default to true, testing * Restructuring Code * Uplink Changes * Add more proper Defaults * Renaming of Version struct * Dont pass Service use Pointer * Set Defaults for Versioning Checks * Put HTTP Server in go routine * Add Versioncontrol to Storj-Sim * Testplanet Fixes * Linting * Add Error Handling and new Server Struct * Move Lock slightly * Reduce Race Potentials * Remove unnecessary files * Linting * Add Proper Transport Handling * small fixes * add fence for allowed check * Add Startup Version Check and Service Naming * make errormessage private * Add Comments about VersionedClient * Linting * Remove Checks that refuse outgoing connections * Remove release cmd * Add Release Script * Linting * Update to use correct Values * Change Timestamp handling * Adding Protobuf changes back in * Adding SatelliteDB Changes and adding Storj Node Version to PB * Add Migration Table * Add Default Stats for Creation * Move to BigInt * Proper SQL Migration * Ensure minimum Version is passed to the node selection * Linting... * Remove VersionedClient and adjust smaller changes from prior merge * Linting * Fix PB Message Handling and Query for Node Selection * some future-proofing type changes Change-Id: I3cb5018dcccdbc9739fe004d859065992720caaf * fix a compiler error Change-Id: If66bb92d8b98e31cd618ecec9c6448ab9b037fa5 * Comment on Constant for Overlay * Remove NOT NULL and add epoch call as function * add versions to bootstrap and satellites Change-Id: I436944589ea5f21600cdd997742a84fe0b16e47b * Change Update Migration * Fix DB Migration * Increase Timeout temporarily, to see whats going on * Remove unnecessary const and vars Cleanup Function calls from deprecated NodeVersion struct * Updated Protopuf, removed depcreated Code from Inspector * Implement NodeVersion into InfoResponse * Regenerated locked.go * Linting * Fix Tests * Remove unnecessary constant * Update Function and Flag Description * Remove Empty Stat Creation * return properly with error * Remove unnecessary struct * simplify migration step * Update Inspector to return Version Info * Update local Endpoint Version Handling * Reset Travis Timeout * Add Default for CommitHash * single quotes
2019-04-10 07:04:24 +01:00
field get_repair_total int64
field put_repair_total int64
field at_rest_total float64
)
//--- overlay cache ---//
model node (
key id
index (
name node_last_ip
2019-06-24 16:33:18 +01:00
fields last_net
)
index (
name nodes_dis_unk_off_exit_fin_last_success_index
fields disqualified unknown_audit_suspended offline_suspended exit_finished_at last_contact_success
)
index (
// N.B. the index doesn't have a name which clarifies its purpose because
// it was created in the production DBs as it's before being added here
name nodes_type_last_cont_success_free_disk_ma_mi_patch_vetted_partial_index
fields type last_contact_success free_disk major minor patch vetted_at
where node.disqualified = null
where node.unknown_audit_suspended = null
where node.exit_initiated_at = null
where node.release = true
where node.last_net != ""
)
index (
// N.B. the index doesn't have a name which clarifies its purpose because
// it was created in the production DBs as it's before being added here
name nodes_dis_unk_aud_exit_init_rel_type_last_cont_success_stored_index
fields disqualified unknown_audit_suspended exit_initiated_at release type last_contact_success
where node.disqualified = null
where node.unknown_audit_suspended = null
where node.exit_initiated_at = null
where node.release = true
)
field id blob
// address is how to contact the node, this can be a hostname or IP and it contains the port
field address text ( updatable, default "" ) // TODO: use compressed format
// last_net is the /24 subnet of the IP
field last_net text ( updatable )
field last_ip_port text ( updatable, nullable )
field country_code text ( updatable, nullable )
field protocol int ( updatable, default 0 )
field type int ( updatable, default 0 )
field email text ( updatable )
field wallet text ( updatable ) // TODO: use compressed format
field wallet_features text ( updatable, default "" )
field free_disk int64 ( updatable, default -1 )
field piece_count int64 ( autoinsert, updatable, default 0 )
field major int64 ( updatable, default 0 )
field minor int64 ( updatable, default 0 )
field patch int64 ( updatable, default 0 )
field hash text ( updatable, default "" )
field timestamp timestamp ( updatable, default "0001-01-01 00:00:00+00" )
field release bool ( updatable, default false )
field latency_90 int64 ( updatable, default 0 )
field vetted_at timestamp ( updatable, nullable )
field created_at timestamp ( autoinsert, default current_timestamp )
field updated_at timestamp ( autoinsert, autoupdate, default current_timestamp )
field last_contact_success timestamp ( updatable, default "epoch" )
field last_contact_failure timestamp ( updatable, default "epoch" )
// node is disqualified when it fails too many audits or is offline for too long
field disqualified timestamp ( updatable, nullable )
field disqualification_reason int ( updatable, nullable )
// node is placed under inspection when it has too many unknown-error audits
field suspended timestamp ( updatable, nullable )
// renamed column from suspended
field unknown_audit_suspended timestamp ( updatable, nullable )
// node is considered unhealthy if it is offline for too many audits
field offline_suspended timestamp ( updatable, nullable )
// once a node becomes offline_suspended, mark it as under review so we check it again later
field under_review timestamp ( updatable, nullable )
field exit_initiated_at timestamp ( updatable, nullable )
field exit_loop_completed_at timestamp ( updatable, nullable )
field exit_finished_at timestamp ( updatable, nullable )
field exit_success bool ( updatable, default false )
)
update node ( where node.id = ? )
update node (
where node.id = ?
noreturn
)
update node (
where node.id = ?
where node.disqualified = null
where node.exit_finished_at = null
noreturn
)
// "Get" query; fails if node not found
read one (
select node
where node.id = ?
)
2019-01-17 18:34:13 +00:00
read all (
select node.id
)
read paged (
select node
)
read all (
select node.id node.piece_count
where node.piece_count != 0
)
//--- reputation store ---//
model reputation (
key id
field id blob
field audit_success_count int64 ( updatable, default 0 )
field total_audit_count int64 ( updatable, default 0 )
field vetted_at timestamp ( updatable, nullable )
field created_at timestamp ( autoinsert, default current_timestamp )
field updated_at timestamp ( autoinsert, autoupdate, default current_timestamp )
// node is disqualified when it fails too many audits or is offline for too long
field disqualified timestamp ( updatable, nullable )
// node is placed under inspection when it has too many unknown-error audits
field suspended timestamp ( updatable, nullable )
// renamed column from suspended
field unknown_audit_suspended timestamp ( updatable, nullable )
// node is considered unhealthy if it is offline for too many audits
field offline_suspended timestamp ( updatable, nullable )
// once a node becomes offline_suspended, mark it as under review so we check it again later
field under_review timestamp ( updatable, nullable )
field online_score float64 ( updatable, default 1 )
field audit_history blob ( updatable )
// audit_reputation_fields track information related to successful vs. failed error audits
field audit_reputation_alpha float64 ( updatable, default 1 )
field audit_reputation_beta float64 ( updatable, default 0 )
// unknown_audit_reputation fields track information related to successful vs. unknown error audits
field unknown_audit_reputation_alpha float64 ( updatable, default 1 )
field unknown_audit_reputation_beta float64 ( updatable, default 0 )
)
create reputation ()
update reputation ( where reputation.id = ? )
update reputation (
where reputation.id = ?
where reputation.audit_history = ?
)
update reputation (
where reputation.id = ?
noreturn
)
// "Get" query; fails if reputation not found
read one (
select reputation
where reputation.id = ?
)
//--- repairqueue ---//
model repair_queue (
table repair_queue
key stream_id position
field stream_id blob
field position uint64
field attempted_at timestamp (updatable, nullable)
field updated_at timestamp ( updatable, default current_timestamp )
field inserted_at timestamp ( default current_timestamp )
field segment_health float64 (default 1)
index (
fields updated_at
)
index (
name repair_queue_num_healthy_pieces_attempted_at_index
fields segment_health attempted_at
)
)
delete repair_queue ( where repair_queue.updated_at < ? )
//--- satellite console ---//
model user (
key id
field id blob
field email text ( updatable )
field normalized_email text ( updatable )
field full_name text ( updatable )
field short_name text ( updatable, nullable )
field password_hash blob ( updatable )
field status int ( updatable, autoinsert )
field partner_id blob ( nullable )
field user_agent blob ( nullable )
field created_at timestamp ( autoinsert )
field project_limit int ( updatable, default 0 )
field project_bandwidth_limit int64 ( updatable, default 0 )
field project_storage_limit int64 ( updatable, default 0 )
field project_segment_limit int64 ( updatable, default 0 )
field paid_tier bool ( updatable, default false )
field position text ( updatable, nullable )
field company_name text ( updatable, nullable )
field company_size int ( updatable, nullable )
field working_on text ( updatable, nullable )
field is_professional bool ( updatable, default false )
field employee_count text ( updatable, nullable )
field have_sales_contact bool ( updatable, default false )
field mfa_enabled bool ( updatable, default false )
field mfa_secret_key text ( updatable, nullable )
field mfa_recovery_codes text ( updatable, nullable )
field signup_promo_code text ( updatable, nullable )
)
create user ( )
update user ( where user.id = ? )
delete user ( where user.id = ? )
read all (
select user
where user.normalized_email = ?
)
read one (
select user
where user.normalized_email = ?
where user.status != 0
)
read one (
select user
where user.id = ?
)
read one (
select user.project_limit
where user.id = ?
)
read one (
select user.project_storage_limit user.project_bandwidth_limit user.project_segment_limit
where user.id = ?
)
model project (
key id
field id blob
field name text ( updatable )
field description text ( updatable )
field usage_limit int64 ( nullable, updatable )
field bandwidth_limit int64 ( nullable, updatable )
field segment_limit int64 ( nullable, updatable, default 1000000)
field rate_limit int ( nullable, updatable )
field burst_limit int ( nullable, updatable )
field max_buckets int ( nullable, updatable )
field partner_id blob ( nullable )
field user_agent blob ( nullable )
field owner_id blob
field created_at timestamp ( autoinsert )
)
create project ( )
update project ( where project.id = ? )
delete project ( where project.id = ? )
read one (
select project
where project.id = ?
)
read one (
select project.usage_limit
where project.id = ?
)
read one (
select project.bandwidth_limit
where project.id = ?
)
read one (
select project.segment_limit
where project.id = ?
)
read one (
select project.max_buckets
where project.id = ?
)
satellite/accounting: add cache for getting project storage and bw limits This PR adds the following items: 1) an in-memory read-only cache thats stores project limit info for projectIDs This cache is stored in-memory since this is expected to be a small amount of data. In this implementation we are only storing in the cache projects that have been accessed. Currently for the largest Satellite (eu-west) there is about 4500 total projects. So storing the storage limit (int64) and the bandwidth limit (int64), this would end up being about 200kb (including the 32 byte project ID) if all 4500 projectIDs were in the cache. So this all fits in memory for the time being. At some point it may not as usage grows, but that seems years out. The cache is a read only cache. When requests come in to upload/download a file, we will read from the cache what the current limits are for that project. If the cache does not contain the projectID, it will get the info from the database (satellitedb project table), then add it to the cache. The only time the values in the cache are modified is when either a) the project ID is not in the cache, or b) the item in the cache has expired (default 10mins), then the data gets refreshed out of the database. This occurs by default every 10 mins. This means that if we update the usage limits in the database, that change might not show up in the cache for 10 mins which mean it will not be reflected to limit end users uploading/downloading files for that time period.. Change-Id: I3fd7056cf963676009834fcbcf9c4a0922ca4a8f
2020-09-09 20:20:44 +01:00
read one (
select project.bandwidth_limit project.usage_limit project.segment_limit
satellite/accounting: add cache for getting project storage and bw limits This PR adds the following items: 1) an in-memory read-only cache thats stores project limit info for projectIDs This cache is stored in-memory since this is expected to be a small amount of data. In this implementation we are only storing in the cache projects that have been accessed. Currently for the largest Satellite (eu-west) there is about 4500 total projects. So storing the storage limit (int64) and the bandwidth limit (int64), this would end up being about 200kb (including the 32 byte project ID) if all 4500 projectIDs were in the cache. So this all fits in memory for the time being. At some point it may not as usage grows, but that seems years out. The cache is a read only cache. When requests come in to upload/download a file, we will read from the cache what the current limits are for that project. If the cache does not contain the projectID, it will get the info from the database (satellitedb project table), then add it to the cache. The only time the values in the cache are modified is when either a) the project ID is not in the cache, or b) the item in the cache has expired (default 10mins), then the data gets refreshed out of the database. This occurs by default every 10 mins. This means that if we update the usage limits in the database, that change might not show up in the cache for 10 mins which mean it will not be reflected to limit end users uploading/downloading files for that time period.. Change-Id: I3fd7056cf963676009834fcbcf9c4a0922ca4a8f
2020-09-09 20:20:44 +01:00
where project.id = ?
)
read all (
select project
)
read all (
select project
where project.created_at < ?
orderby asc project.created_at
)
read all (
select project
where project.owner_id = ?
orderby asc project.created_at
)
read all (
select project
join project.id = project_member.project_id
where project_member.member_id = ?
orderby asc project.name
)
read limitoffset (
select project
where project.created_at < ?
orderby asc project.created_at
)
model project_member (
key member_id project_id
field member_id user.id cascade
field project_id project.id cascade
field created_at timestamp ( autoinsert )
)
create project_member ( )
delete project_member (
where project_member.member_id = ?
where project_member.project_id = ?
)
read all (
select project_member
where project_member.member_id = ?
)
model api_key (
key id
unique head
unique name project_id
field id blob
field project_id project.id cascade
field head blob
field name text (updatable)
field secret blob
field partner_id blob (nullable)
field user_agent blob (nullable)
field created_at timestamp (autoinsert)
)
create api_key ( )
update api_key (
where api_key.id = ?
noreturn
)
delete api_key ( where api_key.id = ? )
read one (
select api_key
where api_key.id = ?
)
read one (
select api_key
where api_key.head = ?
)
read one (
select api_key
where api_key.name = ?
where api_key.project_id = ?
)
// --- bucket accounting tables --- //
model bucket_bandwidth_rollup (
key bucket_name project_id interval_start action
index (
name bucket_bandwidth_rollups_project_id_action_interval_index
fields project_id action interval_start
)
index (
name bucket_bandwidth_rollups_action_interval_project_id_index
fields action interval_start project_id
)
field bucket_name blob
field project_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field inline uint64 ( updatable )
field allocated uint64 ( updatable )
field settled uint64 ( updatable )
)
read paged (
select bucket_bandwidth_rollup
where bucket_bandwidth_rollup.interval_start >= ?
)
model bucket_bandwidth_rollup_archive (
key bucket_name project_id interval_start action
index (
name bucket_bandwidth_rollups_archive_project_id_action_interval_index
fields project_id action interval_start
)
index (
name bucket_bandwidth_rollups_archive_action_interval_project_id_index
fields action interval_start project_id
)
field bucket_name blob
field project_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field inline uint64 ( updatable )
field allocated uint64 ( updatable )
field settled uint64 ( updatable )
)
read paged (
select bucket_bandwidth_rollup_archive
where bucket_bandwidth_rollup_archive.interval_start >= ?
)
model revocation (
key revoked
field revoked blob
field api_key_id blob
)
create revocation ( noreturn )
model project_bandwidth_rollup (
key project_id interval_month
field project_id blob
field interval_month date
field egress_allocated uint64 ( updatable )
)
model project_bandwidth_daily_rollup (
key project_id interval_day
field project_id blob
field interval_day date
field egress_allocated uint64 ( updatable )
field egress_settled uint64 ( updatable )
field egress_dead uint64 ( updatable, default 0 )
)
model bucket_storage_tally (
key bucket_name project_id interval_start
index (
name bucket_storage_tallies_project_id_interval_start_index
fields project_id interval_start
)
field bucket_name blob
field project_id blob
field interval_start timestamp
field total_bytes uint64 ( default 0)
field inline uint64
field remote uint64
Add Version Information into KAD Network and SatelliteDB & Change Selection Process (#1648) * Initial Webserver Draft for Version Controlling * Rename type to avoid confusion * Move Function Calls into Version Package * Fix Linting and Language Typos * Fix Linting and Spelling Mistakes * Include Copyright * Include Copyright * Adjust Version-Control Server to return list of Versions * Linting * Improve Request Handling and Readability * Add Configuration File Option Add Systemd Service file * Add Logging to File * Smaller Changes * Add Semantic Versioning and refuses outdated Software from Startup (#1612) * implements internal Semantic Version library * adds version logging + reporting to process * Advance SemVer struct for easier handling * Add Accepted Version Store * Fix Function * Restructure * Type Conversion * Handle Version String properly * Add Note about array index * Set temporary Default Version * Add Copyright * Adding Version to Dashboard * Adding Version Info Log * Renaming and adding CheckerProcess * Iteration Sync * Iteration V2 * linting * made LogAndReportVersion a go routine * Refactor to Go Routine * Add Context to Go Routine and allow Operation if Lookup to Control Server fails * Handle Unmarshal properly * Linting * Relocate Version Checks * Relocating Version Check and specified default Version for now * Linting Error Prevention * Refuse Startup on outdated Version * Add Startup Check Function * Straighten Logging * Dont force Shutdown if --dev flag is set * Create full Service/Peer Structure for ControlServer * Linting * Straighting Naming * Finish VersionControl Service Layout * Improve Error Handling * Change Listening Address * Move Checker Function * Remove VersionControl Peer * Linting * Linting * Create VersionClient Service * Renaming * Add Version Client to Peer Definitions * Linting and Renaming * Linting * Remove Transport Checks for now * Move to Client Side Flag * Remove check * Linting * Transport Client Version Intro * Adding Version Client to Transport Client * Add missing parameter * Adding Version Check, to set Allowed = true * Set Default to true, testing * Restructuring Code * Uplink Changes * Add more proper Defaults * Renaming of Version struct * Dont pass Service use Pointer * Set Defaults for Versioning Checks * Put HTTP Server in go routine * Add Versioncontrol to Storj-Sim * Testplanet Fixes * Linting * Add Error Handling and new Server Struct * Move Lock slightly * Reduce Race Potentials * Remove unnecessary files * Linting * Add Proper Transport Handling * small fixes * add fence for allowed check * Add Startup Version Check and Service Naming * make errormessage private * Add Comments about VersionedClient * Linting * Remove Checks that refuse outgoing connections * Remove release cmd * Add Release Script * Linting * Update to use correct Values * Change Timestamp handling * Adding Protobuf changes back in * Adding SatelliteDB Changes and adding Storj Node Version to PB * Add Migration Table * Add Default Stats for Creation * Move to BigInt * Proper SQL Migration * Ensure minimum Version is passed to the node selection * Linting... * Remove VersionedClient and adjust smaller changes from prior merge * Linting * Fix PB Message Handling and Query for Node Selection * some future-proofing type changes Change-Id: I3cb5018dcccdbc9739fe004d859065992720caaf * fix a compiler error Change-Id: If66bb92d8b98e31cd618ecec9c6448ab9b037fa5 * Comment on Constant for Overlay * Remove NOT NULL and add epoch call as function * add versions to bootstrap and satellites Change-Id: I436944589ea5f21600cdd997742a84fe0b16e47b * Change Update Migration * Fix DB Migration * Increase Timeout temporarily, to see whats going on * Remove unnecessary const and vars Cleanup Function calls from deprecated NodeVersion struct * Updated Protopuf, removed depcreated Code from Inspector * Implement NodeVersion into InfoResponse * Regenerated locked.go * Linting * Fix Tests * Remove unnecessary constant * Update Function and Flag Description * Remove Empty Stat Creation * return properly with error * Remove unnecessary struct * simplify migration step * Update Inspector to return Version Info * Update local Endpoint Version Handling * Reset Travis Timeout * Add Default for CommitHash * single quotes
2019-04-10 07:04:24 +01:00
field total_segments_count uint ( default 0)
field remote_segments_count uint
field inline_segments_count uint
field object_count uint
field metadata_size uint64
)
read all (
select bucket_storage_tally
)
2019-04-04 15:56:20 +01:00
read all (
select bucket_storage_tally
where bucket_storage_tally.project_id = ?
where bucket_storage_tally.bucket_name = ?
where bucket_storage_tally.interval_start >= ?
where bucket_storage_tally.interval_start <= ?
orderby desc bucket_storage_tally.interval_start
)
// --- storage node accounting tables --- //
model storagenode_bandwidth_rollup (
key storagenode_id interval_start action
index (
name storagenode_bandwidth_rollups_interval_start_index
fields interval_start
)
field storagenode_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field allocated uint64 ( updatable, nullable, default 0 )
field settled uint64 ( updatable )
)
create storagenode_bandwidth_rollup()
read all (
select storagenode_bandwidth_rollup
where storagenode_bandwidth_rollup.storagenode_id = ?
where storagenode_bandwidth_rollup.interval_start = ?
)
read paged (
select storagenode_bandwidth_rollup
where storagenode_bandwidth_rollup.interval_start >= ?
)
read paged (
select storagenode_bandwidth_rollup
where storagenode_bandwidth_rollup.storagenode_id = ?
where storagenode_bandwidth_rollup.interval_start >= ?
)
model storagenode_bandwidth_rollup_archive (
key storagenode_id interval_start action
index (
name storagenode_bandwidth_rollup_archives_interval_start_index
fields interval_start
)
field storagenode_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field allocated uint64 ( updatable, nullable, default 0 )
field settled uint64 ( updatable )
)
read paged (
select storagenode_bandwidth_rollup_archive
where storagenode_bandwidth_rollup_archive.interval_start >= ?
)
///////////////////////////////////////
// orders phase2->phase3 rollout table
///////////////////////////////////////
model storagenode_bandwidth_rollup_phase2 (
table storagenode_bandwidth_rollups_phase2 // make the pluralization consistent
key storagenode_id interval_start action
field storagenode_id blob
field interval_start timestamp
field interval_seconds uint
field action uint
field allocated uint64 ( updatable, nullable, default 0 )
field settled uint64 ( updatable )
)
read paged (
select storagenode_bandwidth_rollup_phase2
where storagenode_bandwidth_rollup_phase2.storagenode_id = ?
where storagenode_bandwidth_rollup_phase2.interval_start >= ?
)
model storagenode_storage_tally (
// this primary key will enforce uniqueness on interval_end_time,node_id
// and also creates an index on interval_end_time implicitly.
// the interval_end_time will be the same value for many rows so
// we put that first so we can use cockroachdb prefix compression.
// node_id is also used many times but interval_end_time is more
// repetative and will benefit greater.
key interval_end_time node_id
index ( fields node_id )
field node_id blob
field interval_end_time timestamp
field data_total float64
)
read all (
select storagenode_storage_tally
)
read all (
select storagenode_storage_tally
where storagenode_storage_tally.interval_end_time >= ?
)
// --- storage node payment tables --- //
model storagenode_paystub (
// The (period, node_id) tuple is the primary key. The primary key index
// should serve for quick queries for all paystubs in a given period since
// it comes first but efficient queries for all paystubs with a given
// node_id will require a distinct index.
key period node_id
index ( fields node_id )
field period text // YYYY-MM, e.g. 2020-02
field node_id blob //
field created_at timestamp ( autoinsert ) //
field codes text // colon separated list
field usage_at_rest float64 // byte-hours of data at rest
field usage_get int64 // bytes of bandwidth
field usage_put int64 // bytes of bandwidth
field usage_get_repair int64 // bytes of bandwidth
field usage_put_repair int64 // bytes of bandwidth
field usage_get_audit int64 // bytes of bandwidth
field comp_at_rest int64 // in micro-units of currency
field comp_get int64 // in micro-units of currency
field comp_put int64 // in micro-units of currency
field comp_get_repair int64 // in micro-units of currency
field comp_put_repair int64 // in micro-units of currency
field comp_get_audit int64 // in micro-units of currency
field surge_percent int64 // percentage
field held int64 // in micro-units of currency
field owed int64 // in micro-units of currency
field disposed int64 // in micro-units of currency
field paid int64 // in micro-units of currency
field distributed int64 // in micro-units of currency
)
create storagenode_paystub ( noreturn, replace )
read one (
select storagenode_paystub
where storagenode_paystub.node_id = ?
where storagenode_paystub.period = ?
)
read all (
select storagenode_paystub
where storagenode_paystub.node_id = ?
)
model storagenode_payment (
key id
index ( fields node_id period )
field id serial64 //
field created_at timestamp ( autoinsert ) //
field node_id blob //
field period text // YYYY-MM, e.g. 2020-02
field amount int64 // in micro-units of currency
field receipt text ( nullable ) //
field notes text ( nullable ) //
)
create storagenode_payment ( noreturn )
read limitoffset (
select storagenode_payment
where storagenode_payment.node_id = ?
where storagenode_payment.period = ?
orderby desc storagenode_payment.id
)
read all (
select storagenode_payment
where storagenode_payment.node_id = ?
)
read all (
select storagenode_payment
where storagenode_payment.node_id = ?
where storagenode_payment.period = ?
)
//--- peer_identity ---//
model peer_identity (
key node_id
field node_id blob
field leaf_serial_number blob (updatable)
field chain blob (updatable) // x509 ASN.1 DER content
field updated_at timestamp ( autoinsert, autoupdate )
)
create peer_identity ( noreturn )
update peer_identity (
where peer_identity.node_id = ?
noreturn
)
read one (
select peer_identity
where peer_identity.node_id = ?
)
read one (
select peer_identity.leaf_serial_number
where peer_identity.node_id = ?
)
//--- satellite registration token for Vanguard release (temporary table) ---//
model registration_token (
key secret
unique owner_id
field secret blob
field owner_id blob ( updatable, nullable )
field project_limit int
field created_at timestamp ( autoinsert )
)
create registration_token ( )
read one (
select registration_token
where registration_token.secret = ?
)
read one (
select registration_token
where registration_token.owner_id = ?
)
update registration_token ( where registration_token.secret = ? )
//--- satellite reset password token ---//
model reset_password_token (
key secret
unique owner_id
field secret blob
field owner_id blob ( updatable )
field created_at timestamp ( autoinsert )
)
create reset_password_token ( )
read one (
select reset_password_token
where reset_password_token.secret = ?
)
read one (
select reset_password_token
where reset_password_token.owner_id = ?
)
delete reset_password_token ( where reset_password_token.secret = ? )
//--- offer table ---//
model offer (
key id
field id serial
field name text ( updatable )
field description text ( updatable )
field award_credit_in_cents int ( updatable, default 0 )
field invitee_credit_in_cents int ( updatable, default 0 )
field award_credit_duration_days int ( updatable, nullable )
field invitee_credit_duration_days int ( updatable, nullable )
field redeemable_cap int ( updatable, nullable )
field expires_at timestamp ( updatable )
field created_at timestamp ( autoinsert )
// status corresponds to the values of rewards.OfferStatus
field status int ( updatable )
// type corresponds to the values of rewards.OfferType
field type int ( updatable )
)
//--- user credit table ---//
model user_credit (
key id
unique id offer_id
index (
name credits_earned_user_id_offer_id
fields id offer_id
unique
)
field id serial
field user_id user.id cascade
field offer_id offer.id restrict
field referred_by user.id setnull ( nullable )
field type text
field credits_earned_in_cents int
field credits_used_in_cents int ( updatable, autoinsert )
field expires_at timestamp ( updatable )
field created_at timestamp ( autoinsert )
)
//--- metainfo buckets ---//
model bucket_metainfo (
key id
unique project_id name
field id blob
field project_id project.id restrict
field name blob
field partner_id blob (nullable, updatable)
field user_agent blob (nullable, updatable)
field path_cipher int
field created_at timestamp ( autoinsert )
field default_segment_size int (updatable)
field default_encryption_cipher_suite int (updatable)
field default_encryption_block_size int (updatable)
field default_redundancy_algorithm int (updatable)
field default_redundancy_share_size int (updatable)
field default_redundancy_required_shares int (updatable)
field default_redundancy_repair_shares int (updatable)
field default_redundancy_optimal_shares int (updatable)
field default_redundancy_total_shares int (updatable)
field placement int (nullable, updatable)
)
create bucket_metainfo ()
update bucket_metainfo (
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo.created_at
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo.id
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo.placement
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read has (
select bucket_metainfo
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
delete bucket_metainfo (
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read limitoffset ( // Forward
select bucket_metainfo
where bucket_metainfo.project_id = ?
where bucket_metainfo.name >= ?
orderby asc bucket_metainfo.name
)
read limitoffset ( // After
select bucket_metainfo
where bucket_metainfo.project_id = ?
where bucket_metainfo.name > ?
orderby asc bucket_metainfo.name
)
read count (
select bucket_metainfo.name
where bucket_metainfo.project_id = ?
)
//--- graceful exit progress ---//
model graceful_exit_progress (
table graceful_exit_progress
key node_id
field node_id blob
field bytes_transferred int64 ( updatable )
field pieces_transferred int64 ( autoinsert, updatable, default 0 )
field pieces_failed int64 ( autoinsert, updatable, default 0 )
field updated_at timestamp ( autoinsert, autoupdate )
)
read one (
select graceful_exit_progress
where graceful_exit_progress.node_id = ?
)
//--- graceful exit transfer queue with segment stream_id and position ---//
model graceful_exit_segment_transfer (
table graceful_exit_segment_transfer_queue
key node_id stream_id position piece_num
field node_id blob
field stream_id blob
field position uint64
field piece_num int
field root_piece_id blob ( nullable )
field durability_ratio float64 ( updatable )
field queued_at timestamp ( autoinsert )
field requested_at timestamp ( updatable, nullable )
field last_failed_at timestamp ( updatable, nullable )
field last_failed_code int ( updatable, nullable )
field failed_count int ( updatable, nullable )
field finished_at timestamp ( updatable, nullable )
field order_limit_send_count int ( updatable, default 0 )
index (
name graceful_exit_segment_transfer_nid_dr_qa_fa_lfa_index
fields node_id durability_ratio queued_at finished_at last_failed_at
)
)
update graceful_exit_segment_transfer (
where graceful_exit_segment_transfer.node_id = ?
where graceful_exit_segment_transfer.stream_id = ?
where graceful_exit_segment_transfer.position = ?
where graceful_exit_segment_transfer.piece_num = ?
noreturn
)
delete graceful_exit_segment_transfer (
where graceful_exit_segment_transfer.node_id = ?
)
delete graceful_exit_segment_transfer (
where graceful_exit_segment_transfer.node_id = ?
where graceful_exit_segment_transfer.stream_id = ?
where graceful_exit_segment_transfer.position = ?
where graceful_exit_segment_transfer.piece_num = ?
)
delete graceful_exit_segment_transfer (
where graceful_exit_segment_transfer.node_id = ?
where graceful_exit_segment_transfer.finished_at != null
)
read one (
select graceful_exit_segment_transfer
where graceful_exit_segment_transfer.node_id = ?
where graceful_exit_segment_transfer.stream_id = ?
where graceful_exit_segment_transfer.position = ?
where graceful_exit_segment_transfer.piece_num = ?
)
//--- satellite payments ---//
model stripe_customer (
key user_id
unique customer_id
field user_id blob
field customer_id text
field created_at timestamp ( autoinsert )
)
create stripe_customer ( )
read one (
select stripe_customer.customer_id
where stripe_customer.user_id = ?
)
read limitoffset (
select stripe_customer
where stripe_customer.created_at <= ?
orderby desc stripe_customer.created_at
)
model coinpayments_transaction (
key id
field id text
field user_id blob
field address text
field amount blob
field received blob ( updatable )
field status int ( updatable )
field key text
field timeout int
field created_at timestamp ( autoinsert )
)
create coinpayments_transaction ()
update coinpayments_transaction ( where coinpayments_transaction.id = ? )
read all (
select coinpayments_transaction
where coinpayments_transaction.user_id = ?
orderby desc coinpayments_transaction.created_at
)
model stripecoinpayments_apply_balance_intent (
key tx_id
field tx_id coinpayments_transaction.id cascade
field state int ( updatable )
field created_at timestamp ( autoinsert )
)
model stripecoinpayments_invoice_project_record (
key id
unique project_id period_start period_end
field id blob
field project_id blob
field storage float64
field egress int64
field objects int64 ( nullable )
field segments int64 ( nullable )
field period_start timestamp
field period_end timestamp
field state int ( updatable )
field created_at timestamp ( autoinsert )
)
create stripecoinpayments_invoice_project_record ()
update stripecoinpayments_invoice_project_record (
where stripecoinpayments_invoice_project_record.id = ?
)
read one (
select stripecoinpayments_invoice_project_record
where stripecoinpayments_invoice_project_record.project_id = ?
where stripecoinpayments_invoice_project_record.period_start = ?
where stripecoinpayments_invoice_project_record.period_end = ?
)
read limitoffset (
select stripecoinpayments_invoice_project_record
where stripecoinpayments_invoice_project_record.period_start = ?
where stripecoinpayments_invoice_project_record.period_end = ?
where stripecoinpayments_invoice_project_record.state = ?
)
model stripecoinpayments_tx_conversion_rate (
key tx_id
field tx_id text
field rate blob
field created_at timestamp ( autoinsert )
)
create stripecoinpayments_tx_conversion_rate ()
read one (
select stripecoinpayments_tx_conversion_rate
where stripecoinpayments_tx_conversion_rate.tx_id = ?
)
model coupon_code (
key id
unique name
field id blob
field name text
field amount int64
field description text
field type int
field billing_periods int64 ( nullable )
field created_at timestamp ( autoinsert )
)
create coupon_code ()
delete coupon_code (
where coupon_code.name = ?
)
read one (
select coupon_code
where coupon_code.name = ?
)
model coupon (
key id
field id blob
field user_id blob
field amount int64
field description text
field type int
field status int ( updatable )
field duration int64
field billing_periods int64 ( nullable )
field coupon_code_name text ( nullable )
field created_at timestamp ( autoinsert )
)
create coupon ()
update coupon (
where coupon.id = ?
)
delete coupon (
where coupon.id = ?
)
read one (
select coupon
where coupon.id = ?
)
read all (
select coupon
where coupon.user_id = ?
orderby desc coupon.created_at
)
read all (
select coupon
where coupon.user_id = ?
where coupon.status = ?
orderby desc coupon.created_at
)
read all (
select coupon
where coupon.status = ?
orderby desc coupon.created_at
)
read limitoffset (
select coupon
where coupon.created_at <= ?
where coupon.status = ?
orderby desc coupon.created_at
)
model coupon_usage (
key coupon_id period
field coupon_id blob
field amount int64
field status int ( updatable )
field period timestamp
)
create coupon_usage ()
read limitoffset (
select coupon_usage
where coupon_usage.period = ?
where coupon_usage.status = 0
)
update coupon_usage (
where coupon_usage.coupon_id = ?
where coupon_usage.period = ?
)
// -- node api version -- //
model node_api_version (
key id
field id blob
field api_version int ( updatable )
field created_at timestamp ( autoinsert )
field updated_at timestamp ( autoinsert, autoupdate )
)
create node_api_version (
noreturn
replace
)
read has (
select node_api_version
where node_api_version.id = ?
where node_api_version.api_version >= ?
)
update node_api_version (
where node_api_version.id = ?
where node_api_version.api_version < ?
noreturn
)