satellitedb/overlay: add database for storing peer identities (#2764)
This commit is contained in:
parent
1f3537d4a9
commit
33aff71959
21
satellite/overlay/peeridentities.go
Normal file
21
satellite/overlay/peeridentities.go
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2018 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// PeerIdentities stores storagenode peer identities
|
||||
type PeerIdentities interface {
|
||||
// Set adds a peer identity entry for a node
|
||||
Set(context.Context, storj.NodeID, *identity.PeerIdentity) error
|
||||
// Get gets peer identity
|
||||
Get(context.Context, storj.NodeID) (*identity.PeerIdentity, error)
|
||||
// BatchGet gets all nodes peer identities in a transaction
|
||||
BatchGet(context.Context, storj.NodeIDList) ([]*identity.PeerIdentity, error)
|
||||
}
|
86
satellite/overlay/peeridentities_test.go
Normal file
86
satellite/overlay/peeridentities_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright (C) 2018 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package overlay_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testidentity"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
)
|
||||
|
||||
func TestPeerIdentities(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
encode := identity.EncodePeerIdentity
|
||||
|
||||
idents := db.PeerIdentities()
|
||||
|
||||
{ // basic tests
|
||||
ca, err := testidentity.NewTestCA(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
leafFirst, err := ca.NewIdentity()
|
||||
require.NoError(t, err)
|
||||
|
||||
leafSecond, err := ca.NewIdentity()
|
||||
require.NoError(t, err)
|
||||
|
||||
// sanity check
|
||||
require.Equal(t, leafFirst.ID, leafSecond.ID)
|
||||
|
||||
{ // add entry
|
||||
err := idents.Set(ctx, leafFirst.ID, leafFirst.PeerIdentity())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
{ // get the entry
|
||||
got, err := idents.Get(ctx, leafFirst.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, encode(leafFirst.PeerIdentity()), encode(got))
|
||||
}
|
||||
|
||||
{ // update entry
|
||||
err := idents.Set(ctx, leafSecond.ID, leafSecond.PeerIdentity())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
{ // get the entry
|
||||
got, err := idents.Get(ctx, leafFirst.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, encode(leafSecond.PeerIdentity()), encode(got))
|
||||
}
|
||||
}
|
||||
|
||||
{ // get multiple
|
||||
list := make(map[storj.NodeID]*identity.PeerIdentity)
|
||||
var ids []storj.NodeID
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
ident := testidentity.MustPregeneratedSignedIdentity(i, storj.LatestIDVersion())
|
||||
list[ident.ID] = ident.PeerIdentity()
|
||||
|
||||
err := idents.Set(ctx, ident.ID, ident.PeerIdentity())
|
||||
require.NoError(t, err)
|
||||
|
||||
ids = append(ids, ident.ID)
|
||||
}
|
||||
|
||||
got, err := idents.BatchGet(ctx, ids)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, len(ids))
|
||||
for _, gotIdent := range got {
|
||||
require.Equal(t, encode(list[gotIdent.ID]), encode(gotIdent))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -77,6 +77,8 @@ type DB interface {
|
||||
// DropSchema drops the schema
|
||||
DropSchema(schema string) error
|
||||
|
||||
// PeerIdentities returns a storage for peer identities
|
||||
PeerIdentities() overlay.PeerIdentities
|
||||
// OverlayCache returns database for caching overlay information
|
||||
OverlayCache() overlay.DB
|
||||
// Attribution returns database for partner keys information
|
||||
|
@ -98,6 +98,11 @@ func (db *DB) DropSchema(schema string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerIdentities returns a storage for peer identities
|
||||
func (db *DB) PeerIdentities() overlay.PeerIdentities {
|
||||
return &peerIdentities{db: db.db}
|
||||
}
|
||||
|
||||
// Attribution is a getter for value attribution repository
|
||||
func (db *DB) Attribution() attribution.DB {
|
||||
return &attributionDB{db: db.db}
|
||||
|
@ -616,6 +616,31 @@ read all (
|
||||
where storagenode_storage_tally.interval_end_time >= ?
|
||||
)
|
||||
|
||||
//--- peer_identity ---//
|
||||
|
||||
model peer_identity (
|
||||
key node_id
|
||||
|
||||
field node_id blob
|
||||
field leaf_serial_number blob (updatable)
|
||||
field chain blob (updatable) // x509 ASN.1 DER content
|
||||
field updated_at timestamp ( autoinsert, autoupdate )
|
||||
)
|
||||
|
||||
create peer_identity ( )
|
||||
update peer_identity (
|
||||
where peer_identity.node_id = ?
|
||||
)
|
||||
|
||||
read one (
|
||||
select peer_identity
|
||||
where peer_identity.node_id = ?
|
||||
)
|
||||
read one (
|
||||
select peer_identity.leaf_serial_number
|
||||
where peer_identity.node_id = ?
|
||||
)
|
||||
|
||||
//--- satellite registration token for Vanguard release (temporary table) ---//
|
||||
|
||||
model registration_token (
|
||||
|
@ -390,6 +390,13 @@ CREATE TABLE offers (
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
@ -733,6 +740,13 @@ CREATE TABLE offers (
|
||||
type INTEGER NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id BLOB NOT NULL,
|
||||
leaf_serial_number BLOB NOT NULL,
|
||||
chain BLOB NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
@ -2982,6 +2996,96 @@ func (f Offer_Type_Field) value() interface{} {
|
||||
|
||||
func (Offer_Type_Field) _Column() string { return "type" }
|
||||
|
||||
type PeerIdentity struct {
|
||||
NodeId []byte
|
||||
LeafSerialNumber []byte
|
||||
Chain []byte
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
func (PeerIdentity) _Table() string { return "peer_identities" }
|
||||
|
||||
type PeerIdentity_Update_Fields struct {
|
||||
LeafSerialNumber PeerIdentity_LeafSerialNumber_Field
|
||||
Chain PeerIdentity_Chain_Field
|
||||
}
|
||||
|
||||
type PeerIdentity_NodeId_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value []byte
|
||||
}
|
||||
|
||||
func PeerIdentity_NodeId(v []byte) PeerIdentity_NodeId_Field {
|
||||
return PeerIdentity_NodeId_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f PeerIdentity_NodeId_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (PeerIdentity_NodeId_Field) _Column() string { return "node_id" }
|
||||
|
||||
type PeerIdentity_LeafSerialNumber_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value []byte
|
||||
}
|
||||
|
||||
func PeerIdentity_LeafSerialNumber(v []byte) PeerIdentity_LeafSerialNumber_Field {
|
||||
return PeerIdentity_LeafSerialNumber_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f PeerIdentity_LeafSerialNumber_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (PeerIdentity_LeafSerialNumber_Field) _Column() string { return "leaf_serial_number" }
|
||||
|
||||
type PeerIdentity_Chain_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value []byte
|
||||
}
|
||||
|
||||
func PeerIdentity_Chain(v []byte) PeerIdentity_Chain_Field {
|
||||
return PeerIdentity_Chain_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f PeerIdentity_Chain_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (PeerIdentity_Chain_Field) _Column() string { return "chain" }
|
||||
|
||||
type PeerIdentity_UpdatedAt_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value time.Time
|
||||
}
|
||||
|
||||
func PeerIdentity_UpdatedAt(v time.Time) PeerIdentity_UpdatedAt_Field {
|
||||
return PeerIdentity_UpdatedAt_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f PeerIdentity_UpdatedAt_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (PeerIdentity_UpdatedAt_Field) _Column() string { return "updated_at" }
|
||||
|
||||
type PendingAudits struct {
|
||||
NodeId []byte
|
||||
PieceId []byte
|
||||
@ -5444,6 +5548,10 @@ type Id_Row struct {
|
||||
Id []byte
|
||||
}
|
||||
|
||||
type LeafSerialNumber_Row struct {
|
||||
LeafSerialNumber []byte
|
||||
}
|
||||
|
||||
type Value_Row struct {
|
||||
Value time.Time
|
||||
}
|
||||
@ -6008,6 +6116,32 @@ func (obj *postgresImpl) Create_StoragenodeStorageTally(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Create_PeerIdentity(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
||||
peer_identity_chain PeerIdentity_Chain_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
|
||||
__now := obj.db.Hooks.Now().UTC()
|
||||
__node_id_val := peer_identity_node_id.value()
|
||||
__leaf_serial_number_val := peer_identity_leaf_serial_number.value()
|
||||
__chain_val := peer_identity_chain.value()
|
||||
__updated_at_val := __now
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? ) RETURNING peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
||||
|
||||
peer_identity = &PeerIdentity{}
|
||||
err = obj.driver.QueryRow(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return peer_identity, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Create_RegistrationToken(ctx context.Context,
|
||||
registration_token_secret RegistrationToken_Secret_Field,
|
||||
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
||||
@ -7418,6 +7552,48 @@ func (obj *postgresImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterO
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, peer_identity_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
peer_identity = &PeerIdentity{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return peer_identity, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
row *LeafSerialNumber_Row, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, peer_identity_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
row = &LeafSerialNumber_Row{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.LeafSerialNumber)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return row, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
||||
registration_token_secret RegistrationToken_Secret_Field) (
|
||||
registration_token *RegistrationToken, err error) {
|
||||
@ -8216,6 +8392,52 @@ func (obj *postgresImpl) Update_ApiKey_By_Id(ctx context.Context,
|
||||
return api_key, nil
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Update_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
update PeerIdentity_Update_Fields) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ? RETURNING peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
var __args []interface{}
|
||||
|
||||
if update.LeafSerialNumber._set {
|
||||
__values = append(__values, update.LeafSerialNumber.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?"))
|
||||
}
|
||||
|
||||
if update.Chain._set {
|
||||
__values = append(__values, update.Chain.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?"))
|
||||
}
|
||||
|
||||
__now := obj.db.Hooks.Now().UTC()
|
||||
|
||||
__values = append(__values, __now)
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
||||
|
||||
__args = append(__args, peer_identity_node_id.value())
|
||||
|
||||
__values = append(__values, __args...)
|
||||
__sets.SQL = __sets_sql
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
peer_identity = &PeerIdentity{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return peer_identity, nil
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
||||
registration_token_secret RegistrationToken_Secret_Field,
|
||||
update RegistrationToken_Update_Fields) (
|
||||
@ -8998,6 +9220,16 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error)
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err = __res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
count += __count
|
||||
__res, err = obj.driver.Exec("DELETE FROM peer_identities;")
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err = __res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
@ -9712,6 +9944,35 @@ func (obj *sqlite3Impl) Create_StoragenodeStorageTally(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Create_PeerIdentity(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
||||
peer_identity_chain PeerIdentity_Chain_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
|
||||
__now := obj.db.Hooks.Now().UTC()
|
||||
__node_id_val := peer_identity_node_id.value()
|
||||
__leaf_serial_number_val := peer_identity_leaf_serial_number.value()
|
||||
__chain_val := peer_identity_chain.value()
|
||||
__updated_at_val := __now
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? )")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
||||
|
||||
__res, err := obj.driver.Exec(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
__pk, err := __res.LastInsertId()
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return obj.getLastPeerIdentity(ctx, __pk)
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Create_RegistrationToken(ctx context.Context,
|
||||
registration_token_secret RegistrationToken_Secret_Field,
|
||||
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
||||
@ -11137,6 +11398,48 @@ func (obj *sqlite3Impl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOr
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, peer_identity_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
peer_identity = &PeerIdentity{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return peer_identity, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
row *LeafSerialNumber_Row, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, peer_identity_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
row = &LeafSerialNumber_Row{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.LeafSerialNumber)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return row, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
||||
registration_token_secret RegistrationToken_Secret_Field) (
|
||||
registration_token *RegistrationToken, err error) {
|
||||
@ -12015,6 +12318,62 @@ func (obj *sqlite3Impl) Update_ApiKey_By_Id(ctx context.Context,
|
||||
return api_key, nil
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Update_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
update PeerIdentity_Update_Fields) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ?")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
var __args []interface{}
|
||||
|
||||
if update.LeafSerialNumber._set {
|
||||
__values = append(__values, update.LeafSerialNumber.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?"))
|
||||
}
|
||||
|
||||
if update.Chain._set {
|
||||
__values = append(__values, update.Chain.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?"))
|
||||
}
|
||||
|
||||
__now := obj.db.Hooks.Now().UTC()
|
||||
|
||||
__values = append(__values, __now)
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
||||
|
||||
__args = append(__args, peer_identity_node_id.value())
|
||||
|
||||
__values = append(__values, __args...)
|
||||
__sets.SQL = __sets_sql
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
peer_identity = &PeerIdentity{}
|
||||
_, err = obj.driver.Exec(__stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
|
||||
var __embed_stmt_get = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
||||
|
||||
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
||||
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
||||
|
||||
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return peer_identity, nil
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
||||
registration_token_secret RegistrationToken_Secret_Field,
|
||||
update RegistrationToken_Update_Fields) (
|
||||
@ -12973,6 +13332,24 @@ func (obj *sqlite3Impl) getLastStoragenodeStorageTally(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) getLastPeerIdentity(ctx context.Context,
|
||||
pk int64) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE _rowid_ = ?")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, pk)
|
||||
|
||||
peer_identity = &PeerIdentity{}
|
||||
err = obj.driver.QueryRow(__stmt, pk).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return peer_identity, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) getLastRegistrationToken(ctx context.Context,
|
||||
pk int64) (
|
||||
registration_token *RegistrationToken, err error) {
|
||||
@ -13246,6 +13623,16 @@ func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error)
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err = __res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
count += __count
|
||||
__res, err = obj.driver.Exec("DELETE FROM peer_identities;")
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err = __res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
@ -13740,6 +14127,19 @@ func (rx *Rx) Create_Offer(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (rx *Rx) Create_PeerIdentity(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
||||
peer_identity_chain PeerIdentity_Chain_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Create_PeerIdentity(ctx, peer_identity_node_id, peer_identity_leaf_serial_number, peer_identity_chain)
|
||||
|
||||
}
|
||||
|
||||
func (rx *Rx) Create_PendingAudits(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field,
|
||||
pending_audits_piece_id PendingAudits_PieceId_Field,
|
||||
@ -14224,6 +14624,26 @@ func (rx *Rx) Get_Offer_By_Id(ctx context.Context,
|
||||
return tx.Get_Offer_By_Id(ctx, offer_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Get_PeerIdentity_By_NodeId(ctx, peer_identity_node_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
row *LeafSerialNumber_Row, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx, peer_identity_node_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_PendingAudits_By_NodeId(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field) (
|
||||
pending_audits *PendingAudits, err error) {
|
||||
@ -14527,6 +14947,17 @@ func (rx *Rx) Update_Offer_By_Id(ctx context.Context,
|
||||
return tx.Update_Offer_By_Id(ctx, offer_id, update)
|
||||
}
|
||||
|
||||
func (rx *Rx) Update_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
update PeerIdentity_Update_Fields) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Update_PeerIdentity_By_NodeId(ctx, peer_identity_node_id, update)
|
||||
}
|
||||
|
||||
func (rx *Rx) Update_PendingAudits_By_NodeId(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field,
|
||||
update PendingAudits_Update_Fields) (
|
||||
@ -14770,6 +15201,12 @@ type Methods interface {
|
||||
optional Offer_Create_Fields) (
|
||||
offer *Offer, err error)
|
||||
|
||||
Create_PeerIdentity(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
||||
peer_identity_chain PeerIdentity_Chain_Field) (
|
||||
peer_identity *PeerIdentity, err error)
|
||||
|
||||
Create_PendingAudits(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field,
|
||||
pending_audits_piece_id PendingAudits_PieceId_Field,
|
||||
@ -14987,6 +15424,14 @@ type Methods interface {
|
||||
offer_id Offer_Id_Field) (
|
||||
offer *Offer, err error)
|
||||
|
||||
Get_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
peer_identity *PeerIdentity, err error)
|
||||
|
||||
Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
row *LeafSerialNumber_Row, err error)
|
||||
|
||||
Get_PendingAudits_By_NodeId(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field) (
|
||||
pending_audits *PendingAudits, err error)
|
||||
@ -15122,6 +15567,11 @@ type Methods interface {
|
||||
update Offer_Update_Fields) (
|
||||
offer *Offer, err error)
|
||||
|
||||
Update_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field,
|
||||
update PeerIdentity_Update_Fields) (
|
||||
peer_identity *PeerIdentity, err error)
|
||||
|
||||
Update_PendingAudits_By_NodeId(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field,
|
||||
update PendingAudits_Update_Fields) (
|
||||
|
@ -118,6 +118,13 @@ CREATE TABLE offers (
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
|
@ -118,6 +118,13 @@ CREATE TABLE offers (
|
||||
type INTEGER NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id BLOB NOT NULL,
|
||||
leaf_serial_number BLOB NOT NULL,
|
||||
chain BLOB NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
@ -939,6 +940,40 @@ func (m *lockedOverlayCache) UpdateUptime(ctx context.Context, nodeID storj.Node
|
||||
return m.db.UpdateUptime(ctx, nodeID, isUp, lambda, weight, uptimeDQ)
|
||||
}
|
||||
|
||||
// PeerIdentities returns a storage for peer identities
|
||||
func (m *locked) PeerIdentities() overlay.PeerIdentities {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return &lockedPeerIdentities{m.Locker, m.db.PeerIdentities()}
|
||||
}
|
||||
|
||||
// lockedPeerIdentities implements locking wrapper for overlay.PeerIdentities
|
||||
type lockedPeerIdentities struct {
|
||||
sync.Locker
|
||||
db overlay.PeerIdentities
|
||||
}
|
||||
|
||||
// BatchGet gets all nodes peer identities in a transaction
|
||||
func (m *lockedPeerIdentities) BatchGet(ctx context.Context, a1 storj.NodeIDList) (_ []*identity.PeerIdentity, err error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.BatchGet(ctx, a1)
|
||||
}
|
||||
|
||||
// Get gets peer identity
|
||||
func (m *lockedPeerIdentities) Get(ctx context.Context, a1 storj.NodeID) (*identity.PeerIdentity, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.Get(ctx, a1)
|
||||
}
|
||||
|
||||
// Set adds a peer identity entry for a node
|
||||
func (m *lockedPeerIdentities) Set(ctx context.Context, a1 storj.NodeID, a2 *identity.PeerIdentity) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.Set(ctx, a1, a2)
|
||||
}
|
||||
|
||||
// ProjectAccounting returns database for storing information about project data use
|
||||
func (m *locked) ProjectAccounting() accounting.ProjectAccounting {
|
||||
m.Lock()
|
||||
|
@ -1128,6 +1128,19 @@ func (db *DB) PostgresMigration() *migrate.Migration {
|
||||
`ALTER TABLE nodes ADD piece_count BIGINT NOT NULL DEFAULT 0;`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add Peer Identities table",
|
||||
Version: 54,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
120
satellite/satellitedb/peeridentities.go
Normal file
120
satellite/satellitedb/peeridentities.go
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright (C) 2018 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package satellitedb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/storj"
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
type peerIdentities struct {
|
||||
db *dbx.DB
|
||||
}
|
||||
|
||||
// Set adds a peer identity entry
|
||||
func (idents *peerIdentities) Set(ctx context.Context, nodeID storj.NodeID, ident *identity.PeerIdentity) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if ident == nil {
|
||||
return Error.New("identitiy is nil")
|
||||
}
|
||||
|
||||
tx, err := idents.db.Open(ctx)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
err = errs.Combine(err, tx.Rollback())
|
||||
}
|
||||
}()
|
||||
|
||||
serial, err := tx.Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx, dbx.PeerIdentity_NodeId(nodeID.Bytes()))
|
||||
if serial == nil || err != nil {
|
||||
if serial == nil || err == sql.ErrNoRows {
|
||||
_, err = tx.Create_PeerIdentity(ctx,
|
||||
dbx.PeerIdentity_NodeId(nodeID.Bytes()),
|
||||
dbx.PeerIdentity_LeafSerialNumber(ident.Leaf.SerialNumber.Bytes()),
|
||||
dbx.PeerIdentity_Chain(identity.EncodePeerIdentity(ident)),
|
||||
)
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
if !bytes.Equal(serial.LeafSerialNumber, ident.Leaf.SerialNumber.Bytes()) {
|
||||
_, err = tx.Update_PeerIdentity_By_NodeId(ctx,
|
||||
dbx.PeerIdentity_NodeId(nodeID.Bytes()),
|
||||
dbx.PeerIdentity_Update_Fields{
|
||||
LeafSerialNumber: dbx.PeerIdentity_LeafSerialNumber(ident.Leaf.SerialNumber.Bytes()),
|
||||
Chain: dbx.PeerIdentity_Chain(identity.EncodePeerIdentity(ident)),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Get gets the peer identity based on the certificate's nodeID
|
||||
func (idents *peerIdentities) Get(ctx context.Context, nodeID storj.NodeID) (_ *identity.PeerIdentity, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
dbxIdent, err := idents.db.Get_PeerIdentity_By_NodeId(ctx, dbx.PeerIdentity_NodeId(nodeID.Bytes()))
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
if dbxIdent == nil {
|
||||
return nil, Error.New("missing node id: %v", nodeID)
|
||||
}
|
||||
|
||||
ident, err := identity.DecodePeerIdentity(ctx, dbxIdent.Chain)
|
||||
return ident, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// BatchGet gets the peer idenities based on the certificate's nodeID
|
||||
func (idents *peerIdentities) BatchGet(ctx context.Context, nodeIDs storj.NodeIDList) (peerIdents []*identity.PeerIdentity, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if len(nodeIDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
args := make([]interface{}, 0, nodeIDs.Len())
|
||||
for _, nodeID := range nodeIDs {
|
||||
args = append(args, nodeID)
|
||||
}
|
||||
|
||||
// TODO: optimize using arrays like overlay
|
||||
|
||||
rows, err := idents.db.Query(idents.db.Rebind(`
|
||||
SELECT chain FROM peer_identities WHERE node_id IN (?`+strings.Repeat(", ?", len(nodeIDs)-1)+`)`), args...)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
defer func() {
|
||||
err = errs.Combine(err, rows.Close())
|
||||
}()
|
||||
|
||||
for rows.Next() {
|
||||
var peerChain []byte
|
||||
err := rows.Scan(&peerChain)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
ident, err := identity.DecodePeerIdentity(ctx, peerChain)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
peerIdents = append(peerIdents, ident)
|
||||
}
|
||||
return peerIdents, nil
|
||||
}
|
355
satellite/satellitedb/testdata/postgres.v54.sql
vendored
Normal file
355
satellite/satellitedb/testdata/postgres.v54.sql
vendored
Normal file
@ -0,0 +1,355 @@
|
||||
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE bucket_usages (
|
||||
id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
rollup_end_time timestamp with time zone NOT NULL,
|
||||
remote_stored_data bigint NOT NULL,
|
||||
inline_stored_data bigint NOT NULL,
|
||||
remote_segments integer NOT NULL,
|
||||
inline_segments integer NOT NULL,
|
||||
objects integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
repair_egress bigint NOT NULL,
|
||||
get_egress bigint NOT NULL,
|
||||
audit_egress bigint NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
last_net text NOT NULL,
|
||||
protocol integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_bandwidth bigint NOT NULL,
|
||||
free_disk bigint NOT NULL,
|
||||
piece_count bigint NOT NULL,
|
||||
major bigint NOT NULL,
|
||||
minor bigint NOT NULL,
|
||||
patch bigint NOT NULL,
|
||||
hash text NOT NULL,
|
||||
timestamp timestamp with time zone NOT NULL,
|
||||
release boolean NOT NULL,
|
||||
latency_90 bigint NOT NULL,
|
||||
audit_success_count bigint NOT NULL,
|
||||
total_audit_count bigint NOT NULL,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
last_contact_success timestamp with time zone NOT NULL,
|
||||
last_contact_failure timestamp with time zone NOT NULL,
|
||||
contained boolean NOT NULL,
|
||||
disqualified timestamp with time zone,
|
||||
audit_reputation_alpha double precision NOT NULL,
|
||||
audit_reputation_beta double precision NOT NULL,
|
||||
uptime_reputation_alpha double precision NOT NULL,
|
||||
uptime_reputation_beta double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL,
|
||||
invitee_credit_in_cents integer NOT NULL,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
invoice_id bytea NOT NULL,
|
||||
start_date timestamp with time zone NOT NULL,
|
||||
end_date timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, start_date, end_date ),
|
||||
UNIQUE ( invoice_id )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE user_payments (
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
customer_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE project_payments (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
payer_id bytea NOT NULL REFERENCES user_payments( user_id ) ON DELETE CASCADE,
|
||||
payment_method_id bytea NOT NULL,
|
||||
is_default boolean NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
||||
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits (id, offer_id) WHERE credits_earned_in_cents=0;
|
||||
|
||||
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 5, 100, 5);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 1, 100, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 100, 300, 100);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "status", "type") VALUES ('testOffer', 'Test offer 1', 0, 0, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0);
|
||||
INSERT INTO "offers" ("name","description","award_credit_in_cents","award_credit_duration_days", "invitee_credit_in_cents","invitee_credit_duration_days", "expires_at","created_at","status","type") VALUES ('Default free credit offer','Is active when no active free credit offer',0, NULL,300, 14, '2119-03-14 08:28:24.636949+00','2019-07-14 08:28:24.636949+00',1,1);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "user_payments" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "project_payments" ("id", "project_id", "payer_id", "payment_method_id", "is_default","created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, true, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
-- NEW DATA --
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
Loading…
Reference in New Issue
Block a user