satellite/satellitedb: Add nodes_offline_times table for downtime tracking

Change-Id: If6b80fe0a20d88cedacaf4b76b75aa21d0af2465
This commit is contained in:
Moby von Briesen 2019-12-27 17:03:03 -05:00
parent 758fe35aba
commit bb3baf5a4e
10 changed files with 860 additions and 0 deletions

View File

@ -0,0 +1,31 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package downtimetracking
import (
"context"
"time"
"storj.io/common/storj"
)
// NodeOfflineTime represents a record in the nodes_offline_time table
type NodeOfflineTime struct {
NodeID storj.NodeID
TrackedAt time.Time
TimeOffline time.Duration
}
// DB implements basic operations for downtime tracking service
//
// architecture: Database
type DB interface {
// Add adds a record for a particular node ID with the amount of time it has been offline.
Add(ctx context.Context, nodeID storj.NodeID, trackedTime time.Time, timeOffline time.Duration) error
// GetOfflineTime gets the total amount of offline time for a node within a certain timeframe.
// "total offline time" is defined as the sum of all offline time windows that begin inside the provided time window.
// An offline time window that began before `begin` but that overlaps with the provided time window is not included.
// An offline time window that begins within the provided time window, but that extends beyond `end` is included.
GetOfflineTime(ctx context.Context, nodeID storj.NodeID, begin, end time.Time) (time.Duration, error)
}

View File

@ -0,0 +1,71 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package downtimetracking_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/satellite"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
func TestDowntime(t *testing.T) {
// test basic downtime functionality
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
downtimeDB := db.DowntimeTracking()
now := time.Now()
oneYearAgo := time.Now().Add(-time.Hour * 24 * 365)
// node1 was offline for one hour recently, and offline for two hours a year ago
nodeID1 := testrand.NodeID()
err := downtimeDB.Add(ctx, nodeID1, now, time.Hour)
require.NoError(t, err)
err = downtimeDB.Add(ctx, nodeID1, oneYearAgo, 2*time.Hour)
require.NoError(t, err)
// node2 was offline for two hours a year ago
nodeID2 := testrand.NodeID()
err = downtimeDB.Add(ctx, nodeID2, oneYearAgo, 2*time.Hour)
require.NoError(t, err)
// if we only check recent history, node1 offline time should be 1 hour
duration, err := downtimeDB.GetOfflineTime(ctx, nodeID1, now.Add(-time.Hour), now.Add(time.Hour))
require.NoError(t, err)
require.Equal(t, duration, time.Hour)
// if we only check recent history, node2 should not be offline at all
duration, err = downtimeDB.GetOfflineTime(ctx, nodeID2, now.Add(-time.Hour), now.Add(time.Hour))
require.NoError(t, err)
require.Equal(t, duration, time.Duration(0))
// if we only check old history, node1 offline time should be 2 hours
duration, err = downtimeDB.GetOfflineTime(ctx, nodeID1, oneYearAgo.Add(-time.Hour), oneYearAgo.Add(time.Hour))
require.NoError(t, err)
require.Equal(t, duration, 2*time.Hour)
// if we only check old history, node2 offline time should be 2 hours
duration, err = downtimeDB.GetOfflineTime(ctx, nodeID2, oneYearAgo.Add(-time.Hour), oneYearAgo.Add(time.Hour))
require.NoError(t, err)
require.Equal(t, duration, 2*time.Hour)
// if we check all history (from before oneYearAgo to after now), node1 should be offline for 3 hours
duration, err = downtimeDB.GetOfflineTime(ctx, nodeID1, oneYearAgo.Add(-time.Hour), now.Add(time.Hour))
require.NoError(t, err)
require.Equal(t, duration, 3*time.Hour)
// if we check all history (from before oneYearAgo to after now), node2 should be offline for 2 hours
duration, err = downtimeDB.GetOfflineTime(ctx, nodeID2, oneYearAgo.Add(-time.Hour), now.Add(time.Hour))
require.NoError(t, err)
require.Equal(t, duration, 2*time.Hour)
})
}

View File

@ -19,6 +19,7 @@ import (
"storj.io/storj/satellite/console/consoleweb"
"storj.io/storj/satellite/contact"
"storj.io/storj/satellite/dbcleanup"
"storj.io/storj/satellite/downtimetracking"
"storj.io/storj/satellite/gc"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/mailservice"
@ -78,6 +79,8 @@ type DB interface {
GracefulExit() gracefulexit.DB
// StripeCoinPayments returns stripecoinpayments database.
StripeCoinPayments() stripecoinpayments.DB
// DowntimeTracking returns database for downtime tracking
DowntimeTracking() downtimetracking.DB
}
// Config is the global config satellite

View File

@ -15,6 +15,7 @@ import (
"storj.io/storj/satellite/attribution"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/downtimetracking"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
@ -145,3 +146,8 @@ func (db *satelliteDB) GracefulExit() gracefulexit.DB {
func (db *satelliteDB) StripeCoinPayments() stripecoinpayments.DB {
return &stripeCoinPaymentsDB{db: db}
}
// DowntimeTracking returns database for downtime tracking
func (db *satelliteDB) DowntimeTracking() downtimetracking.DB {
return &downtimeTrackingDB{db: db}
}

View File

@ -855,6 +855,29 @@ read one (
where graceful_exit_transfer_queue.piece_num = ?
)
//--- downtime tracking ---//
model nodes_offline_time (
key node_id tracked_at
index (
fields node_id
)
field node_id blob
field tracked_at timestamp
field seconds int
)
create nodes_offline_time ()
read all (
select nodes_offline_time
where nodes_offline_time.node_id = ?
where nodes_offline_time.tracked_at > ?
where nodes_offline_time.tracked_at <= ?
)
//--- satellite payments ---//
model stripe_customer (

View File

@ -412,6 +412,12 @@ CREATE TABLE nodes (
exit_success boolean NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
@ -610,6 +616,7 @@ CREATE TABLE user_credits (
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
@ -3194,6 +3201,74 @@ func (f Node_ExitSuccess_Field) value() interface{} {
func (Node_ExitSuccess_Field) _Column() string { return "exit_success" }
type NodesOfflineTime struct {
NodeId []byte
TrackedAt time.Time
Seconds int
}
func (NodesOfflineTime) _Table() string { return "nodes_offline_times" }
type NodesOfflineTime_Update_Fields struct {
}
type NodesOfflineTime_NodeId_Field struct {
_set bool
_null bool
_value []byte
}
func NodesOfflineTime_NodeId(v []byte) NodesOfflineTime_NodeId_Field {
return NodesOfflineTime_NodeId_Field{_set: true, _value: v}
}
func (f NodesOfflineTime_NodeId_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (NodesOfflineTime_NodeId_Field) _Column() string { return "node_id" }
type NodesOfflineTime_TrackedAt_Field struct {
_set bool
_null bool
_value time.Time
}
func NodesOfflineTime_TrackedAt(v time.Time) NodesOfflineTime_TrackedAt_Field {
return NodesOfflineTime_TrackedAt_Field{_set: true, _value: v}
}
func (f NodesOfflineTime_TrackedAt_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (NodesOfflineTime_TrackedAt_Field) _Column() string { return "tracked_at" }
type NodesOfflineTime_Seconds_Field struct {
_set bool
_null bool
_value int
}
func NodesOfflineTime_Seconds(v int) NodesOfflineTime_Seconds_Field {
return NodesOfflineTime_Seconds_Field{_set: true, _value: v}
}
func (f NodesOfflineTime_Seconds_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (NodesOfflineTime_Seconds_Field) _Column() string { return "seconds" }
type Offer struct {
Id int
Name string
@ -7042,6 +7117,30 @@ func (obj *postgresImpl) CreateNoReturn_GracefulExitTransferQueue(ctx context.Co
}
func (obj *postgresImpl) Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error) {
defer mon.Task()(&ctx)(&err)
__node_id_val := nodes_offline_time_node_id.value()
__tracked_at_val := nodes_offline_time_tracked_at.value()
__seconds_val := nodes_offline_time_seconds.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes_offline_times ( node_id, tracked_at, seconds ) VALUES ( ?, ?, ? ) RETURNING nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds")
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __node_id_val, __tracked_at_val, __seconds_val)
nodes_offline_time = &NodesOfflineTime{}
err = obj.driver.QueryRow(__stmt, __node_id_val, __tracked_at_val, __seconds_val).Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
if err != nil {
return nil, obj.makeErr(err)
}
return nodes_offline_time, nil
}
func (obj *postgresImpl) Create_StripeCustomer(ctx context.Context,
stripe_customer_user_id StripeCustomer_UserId_Field,
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
@ -8863,6 +8962,42 @@ func (obj *postgresImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_Pi
}
func (obj *postgresImpl) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds FROM nodes_offline_times WHERE nodes_offline_times.node_id = ? AND nodes_offline_times.tracked_at > ? AND nodes_offline_times.tracked_at <= ?")
var __values []interface{}
__values = append(__values, nodes_offline_time_node_id.value(), nodes_offline_time_tracked_at_greater.value(), nodes_offline_time_tracked_at_less_or_equal.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__rows, err := obj.driver.Query(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
defer __rows.Close()
for __rows.Next() {
nodes_offline_time := &NodesOfflineTime{}
err = __rows.Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
if err != nil {
return nil, obj.makeErr(err)
}
rows = append(rows, nodes_offline_time)
}
if err := __rows.Err(); err != nil {
return nil, obj.makeErr(err)
}
return rows, nil
}
func (obj *postgresImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
stripe_customer_user_id StripeCustomer_UserId_Field) (
row *CustomerId_Row, err error) {
@ -11156,6 +11291,16 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error)
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
}
count += __count
__res, err = obj.driver.Exec("DELETE FROM nodes_offline_times;")
if err != nil {
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
@ -11418,6 +11563,18 @@ func (rx *Rx) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.C
return tx.All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx)
}
func (rx *Rx) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx, nodes_offline_time_node_id, nodes_offline_time_tracked_at_greater, nodes_offline_time_tracked_at_less_or_equal)
}
func (rx *Rx) All_Offer_OrderBy_Asc_Id(ctx context.Context) (
rows []*Offer, err error) {
var tx *Tx
@ -11807,6 +11964,19 @@ func (rx *Rx) Create_CouponUsage(ctx context.Context,
}
func (rx *Rx) Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Create_NodesOfflineTime(ctx, nodes_offline_time_node_id, nodes_offline_time_tracked_at, nodes_offline_time_seconds)
}
func (rx *Rx) Create_Offer(ctx context.Context,
offer_name Offer_Name_Field,
offer_description Offer_Description_Field,
@ -12914,6 +13084,12 @@ type Methods interface {
All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
rows []*Id_PieceCount_Row, err error)
All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error)
All_Offer_OrderBy_Asc_Id(ctx context.Context) (
rows []*Offer, err error)
@ -13119,6 +13295,12 @@ type Methods interface {
coupon_usage_interval_end CouponUsage_IntervalEnd_Field) (
coupon_usage *CouponUsage, err error)
Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error)
Create_Offer(ctx context.Context,
offer_name Offer_Name_Field,
offer_description Offer_Description_Field,

View File

@ -146,6 +146,12 @@ CREATE TABLE nodes (
exit_success boolean NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
@ -344,6 +350,7 @@ CREATE TABLE user_credits (
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );

View File

@ -0,0 +1,51 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"time"
"storj.io/common/storj"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
type downtimeTrackingDB struct {
db *satelliteDB
}
// Add adds a record for a particular node ID with the amount of time it has been offline.
func (db *downtimeTrackingDB) Add(ctx context.Context, nodeID storj.NodeID, trackedTime time.Time, timeOffline time.Duration) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = db.db.Create_NodesOfflineTime(ctx,
dbx.NodesOfflineTime_NodeId(nodeID.Bytes()),
dbx.NodesOfflineTime_TrackedAt(trackedTime),
dbx.NodesOfflineTime_Seconds(int(timeOffline.Seconds())),
)
return Error.Wrap(err)
}
// GetOfflineTime gets the total amount of offline time for a node within a certain timeframe.
// "total offline time" is defined as the sum of all offline time windows that begin inside the provided time window.
// An offline time window that began before `begin` but that overlaps with the provided time window is not included.
// An offline time window that begins within the provided time window, but that extends beyond `end` is included.
func (db *downtimeTrackingDB) GetOfflineTime(ctx context.Context, nodeID storj.NodeID, begin, end time.Time) (time.Duration, error) {
offlineEntries, err := db.db.All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx,
dbx.NodesOfflineTime_NodeId(nodeID.Bytes()),
dbx.NodesOfflineTime_TrackedAt(begin),
dbx.NodesOfflineTime_TrackedAt(end),
)
if err != nil {
return time.Duration(0), Error.Wrap(err)
}
totalSeconds := 0
for _, entry := range offlineEntries {
totalSeconds += entry.Seconds
}
duration := time.Duration(totalSeconds) * time.Second
return duration, nil
}

View File

@ -536,6 +536,20 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
`ALTER TABLE user_credits ADD UNIQUE (id, offer_id);`,
},
},
{
DB: db.DB,
Description: "Add node downtime tracking table",
Version: 73,
Action: migrate.SQL{
`CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);`,
`CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );`,
},
},
},
}
}

View File

@ -0,0 +1,472 @@
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
-- DO NOT EDIT
CREATE TABLE accounting_rollups
(
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE accounting_timestamps
(
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY (name)
);
CREATE TABLE bucket_bandwidth_rollups
(
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY (bucket_name, project_id, interval_start, action)
);
CREATE TABLE bucket_storage_tallies
(
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY (bucket_name, project_id, interval_start)
);
CREATE TABLE injuredsegments
(
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp,
PRIMARY KEY (path)
);
CREATE TABLE irreparabledbs
(
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY (segmentpath)
);
CREATE TABLE nodes
(
id bytea NOT NULL,
address text NOT NULL,
last_net text NOT NULL,
protocol integer NOT NULL,
type integer NOT NULL,
email text NOT NULL,
wallet text NOT NULL,
free_bandwidth bigint NOT NULL,
free_disk bigint NOT NULL,
piece_count bigint NOT NULL,
major bigint NOT NULL,
minor bigint NOT NULL,
patch bigint NOT NULL,
hash text NOT NULL,
timestamp timestamp with time zone NOT NULL,
release boolean NOT NULL,
latency_90 bigint NOT NULL,
audit_success_count bigint NOT NULL,
total_audit_count bigint NOT NULL,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
last_contact_success timestamp with time zone NOT NULL,
last_contact_failure timestamp with time zone NOT NULL,
contained boolean NOT NULL,
disqualified timestamp with time zone,
audit_reputation_alpha double precision NOT NULL,
audit_reputation_beta double precision NOT NULL,
uptime_reputation_alpha double precision NOT NULL,
uptime_reputation_beta double precision NOT NULL,
exit_initiated_at timestamp,
exit_loop_completed_at timestamp,
exit_finished_at timestamp,
exit_success boolean NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE offers
(
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL,
invitee_credit_in_cents integer NOT NULL,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE peer_identities
(
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY (node_id)
);
CREATE TABLE pending_audits
(
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY (node_id)
);
CREATE TABLE projects
(
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint NOT NULL,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE registration_tokens
(
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (secret),
UNIQUE (owner_id)
);
CREATE TABLE reset_password_tokens
(
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (secret),
UNIQUE (owner_id)
);
CREATE TABLE serial_numbers
(
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE storagenode_bandwidth_rollups
(
storagenode_id bytea NOT NULL,
interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY (storagenode_id, interval_start, action)
);
CREATE TABLE storagenode_storage_tallies
(
id bigserial NOT NULL,
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions
(
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp NOT NULL,
PRIMARY KEY (project_id, bucket_name)
);
CREATE TABLE api_keys
(
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (id),
UNIQUE (head),
UNIQUE (name, project_id)
);
CREATE TABLE bucket_metainfos
(
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects (id),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY (id),
UNIQUE (name, project_id)
);
CREATE TABLE project_invoice_stamps
(
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
invoice_id bytea NOT NULL,
start_date timestamp with time zone NOT NULL,
end_date timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (project_id, start_date, end_date),
UNIQUE (invoice_id)
);
CREATE TABLE project_members
(
member_id bytea NOT NULL REFERENCES users (id) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (member_id, project_id)
);
CREATE TABLE used_serials
(
serial_number_id integer NOT NULL REFERENCES serial_numbers (id) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY (serial_number_id, storage_node_id)
);
CREATE TABLE user_credits
(
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users (id) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers (id),
referred_by bytea REFERENCES users (id) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (id),
UNIQUE( id, offer_id )
);
CREATE TABLE graceful_exit_progress
(
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL,
pieces_failed bigint NOT NULL,
updated_at timestamp NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue
(
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp NOT NULL,
requested_at timestamp,
last_failed_at timestamp,
last_failed_code integer,
failed_count integer,
finished_at timestamp,
order_limit_send_count integer NOT NULL,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE stripe_customers
(
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE coinpayments_transactions
(
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents
(
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records
(
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates
(
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE coupons
(
id bytea NOT NULL,
project_id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id )
);
CREATE TABLE coupon_usages
(
id bytea NOT NULL,
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
interval_end timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits (id, offer_id);
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 5, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 1, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 0, 300, 100, false);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupons" ("id", "project_id", "user_id", "amount", "description", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 111111111121, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("id", "coupon_id", "amount", "interval_end") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, '2019-06-01 09:28:24.267934+00');
-- NEW DATA --
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);