Updating the certdb to support storage of multiple public keys for same node ID (#2692)
* updating the certdb to support storage of multiple public keys for same node ID
This commit is contained in:
parent
1b051ef3c3
commit
65932ad692
@ -14,6 +14,8 @@ import (
|
|||||||
type DB interface {
|
type DB interface {
|
||||||
// SavePublicKey adds a new bandwidth agreement.
|
// SavePublicKey adds a new bandwidth agreement.
|
||||||
SavePublicKey(context.Context, storj.NodeID, crypto.PublicKey) error
|
SavePublicKey(context.Context, storj.NodeID, crypto.PublicKey) error
|
||||||
// GetPublicKey gets the public key of uplink corresponding to uplink id
|
// GetPublicKey gets one latest public key of a node
|
||||||
GetPublicKey(context.Context, storj.NodeID) (crypto.PublicKey, error)
|
GetPublicKey(context.Context, storj.NodeID) (crypto.PublicKey, error)
|
||||||
|
// GetPublicKey gets all the public keys of a node
|
||||||
|
GetPublicKeys(context.Context, storj.NodeID) ([]crypto.PublicKey, error)
|
||||||
}
|
}
|
||||||
|
@ -28,27 +28,78 @@ func TestCertDB(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testDatabase(ctx context.Context, t *testing.T, upldb certdb.DB) {
|
func testDatabase(ctx context.Context, t *testing.T, upldb certdb.DB) {
|
||||||
//testing variables
|
{ //uplink testing variables
|
||||||
upID, err := testidentity.NewTestIdentity(ctx)
|
upID, err := testidentity.NewTestIdentity(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
upIDpubbytes, err := x509.MarshalPKIXPublicKey(upID.Leaf.PublicKey)
|
upIDpubbytes, err := x509.MarshalPKIXPublicKey(upID.Leaf.PublicKey)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
{ // New entry
|
{ // New entry
|
||||||
err := upldb.SavePublicKey(ctx, upID.ID, upID.Leaf.PublicKey)
|
err := upldb.SavePublicKey(ctx, upID.ID, upID.Leaf.PublicKey)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Get the corresponding Public key for the serialnum
|
||||||
|
pubkey, err := upldb.GetPublicKey(ctx, upID.ID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
pubbytes, err := x509.MarshalPKIXPublicKey(pubkey)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, upIDpubbytes, pubbytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Get the corresponding Public key for the serialnum
|
||||||
|
pubkey, err := upldb.GetPublicKeys(ctx, upID.ID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
pubbytes, err := x509.MarshalPKIXPublicKey(pubkey[0])
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, upIDpubbytes, pubbytes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New entry
|
{ //storagenode testing variables
|
||||||
err := upldb.SavePublicKey(ctx, upID.ID, upID.Leaf.PublicKey)
|
sn1ID, err := testidentity.NewTestIdentity(ctx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
sn1IDpubbytes, err := x509.MarshalPKIXPublicKey(sn1ID.Leaf.PublicKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
{ // Get the corresponding Public key for the serialnum
|
{ // New entry
|
||||||
pubkey, err := upldb.GetPublicKey(ctx, upID.ID)
|
err := upldb.SavePublicKey(ctx, sn1ID.ID, sn1ID.Leaf.PublicKey)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
pubbytes, err := x509.MarshalPKIXPublicKey(pubkey)
|
}
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, upIDpubbytes, pubbytes)
|
sn2ID, err := testidentity.NewTestIdentity(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
sn2IDpubbytes, err := x509.MarshalPKIXPublicKey(sn2ID.Leaf.PublicKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
{ // add another key for the same storagenode ID
|
||||||
|
err := upldb.SavePublicKey(ctx, sn1ID.ID, sn2ID.Leaf.PublicKey)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // add another key for the same storagenode ID, this the latest key
|
||||||
|
err := upldb.SavePublicKey(ctx, sn1ID.ID, sn2ID.Leaf.PublicKey)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Get the corresponding Public key for the serialnum
|
||||||
|
// test to return one key but the latest of the keys
|
||||||
|
pkey, err := upldb.GetPublicKey(ctx, sn1ID.ID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
pbytes, err := x509.MarshalPKIXPublicKey(pkey)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, sn2IDpubbytes, pbytes)
|
||||||
|
|
||||||
|
// test all the keys for a given ID
|
||||||
|
pubkey, err := upldb.GetPublicKeys(ctx, sn1ID.ID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 2, len(pubkey))
|
||||||
|
pubbytes, err := x509.MarshalPKIXPublicKey(pubkey[0])
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, sn2IDpubbytes, pubbytes)
|
||||||
|
pubbytes, err = x509.MarshalPKIXPublicKey(pubkey[1])
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, sn1IDpubbytes, pubbytes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,15 +21,20 @@ type certDB struct {
|
|||||||
|
|
||||||
func (certs *certDB) SavePublicKey(ctx context.Context, nodeID storj.NodeID, publicKey crypto.PublicKey) (err error) {
|
func (certs *certDB) SavePublicKey(ctx context.Context, nodeID storj.NodeID, publicKey crypto.PublicKey) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
_, err = certs.db.Get_CertRecord_By_Id(ctx, dbx.CertRecord_Id(nodeID.Bytes()))
|
pubbytes, err := pkcrypto.PublicKeyToPKIX(publicKey)
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
return certs.tryAddPublicKey(ctx, nodeID, publicKey)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeID entry already exists, just return
|
_, err = certs.db.Get_CertRecord_By_Publickey(ctx, dbx.CertRecord_Publickey(pubbytes))
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return certs.tryAddPublicKey(ctx, nodeID, publicKey)
|
||||||
|
}
|
||||||
|
return Error.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// publickey for the nodeID entry already exists, just return
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,16 +61,40 @@ func (certs *certDB) tryAddPublicKey(ctx context.Context, nodeID storj.NodeID, p
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPublicKey gets the public key of uplink corresponding to uplink id
|
||||||
func (certs *certDB) GetPublicKey(ctx context.Context, nodeID storj.NodeID) (_ crypto.PublicKey, err error) {
|
func (certs *certDB) GetPublicKey(ctx context.Context, nodeID storj.NodeID) (_ crypto.PublicKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
dbxInfo, err := certs.db.Get_CertRecord_By_Id(ctx, dbx.CertRecord_Id(nodeID.Bytes()))
|
dbxInfo, err := certs.db.All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx, dbx.CertRecord_Id(nodeID.Bytes()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pubkey, err := pkcrypto.PublicKeyFromPKIX(dbxInfo.Publickey)
|
// the first indext always holds the lastest of the keys
|
||||||
|
pubkey, err := pkcrypto.PublicKeyFromPKIX(dbxInfo[0].Publickey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Error.New("Failed to extract Public Key from Order: %+v", err)
|
return nil, Error.New("Failed to extract Public Key from Order: %+v", err)
|
||||||
}
|
}
|
||||||
return pubkey, nil
|
return pubkey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPublicKeys gets the public keys of a storagenode corresponding to storagenode id
|
||||||
|
func (certs *certDB) GetPublicKeys(ctx context.Context, nodeID storj.NodeID) (pubkeys []crypto.PublicKey, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
dbxInfo, err := certs.db.All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx, dbx.CertRecord_Id(nodeID.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dbxInfo) == 0 {
|
||||||
|
return nil, Error.New("Failed to extract Public Key from ID: %+v", nodeID.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range dbxInfo {
|
||||||
|
pubkey, err := pkcrypto.PublicKeyFromPKIX(v.Publickey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, Error.New("Failed to extract Public Key from Order: %+v", err)
|
||||||
|
}
|
||||||
|
pubkeys = append(pubkeys, pubkey)
|
||||||
|
}
|
||||||
|
return pubkeys, nil
|
||||||
|
}
|
||||||
|
@ -617,7 +617,12 @@ read all (
|
|||||||
//--- certRecord ---//
|
//--- certRecord ---//
|
||||||
|
|
||||||
model certRecord (
|
model certRecord (
|
||||||
key id
|
key publickey
|
||||||
|
|
||||||
|
index (
|
||||||
|
name certrecord_id_update_at
|
||||||
|
fields id update_at
|
||||||
|
)
|
||||||
|
|
||||||
field publickey blob //--uplink public key--//
|
field publickey blob //--uplink public key--//
|
||||||
field id blob //--uplink node id --//
|
field id blob //--uplink node id --//
|
||||||
@ -626,13 +631,23 @@ model certRecord (
|
|||||||
|
|
||||||
create certRecord ( )
|
create certRecord ( )
|
||||||
delete certRecord ( where certRecord.id = ? )
|
delete certRecord ( where certRecord.id = ? )
|
||||||
update certRecord ( where certRecord.id = ? )
|
|
||||||
|
|
||||||
read one (
|
read one (
|
||||||
select certRecord
|
select certRecord
|
||||||
where certRecord.id = ?
|
where certRecord.id = ?
|
||||||
)
|
)
|
||||||
|
|
||||||
|
read one (
|
||||||
|
select certRecord
|
||||||
|
where certRecord.publickey = ?
|
||||||
|
)
|
||||||
|
|
||||||
|
read all (
|
||||||
|
select certRecord
|
||||||
|
where certRecord.id = ?
|
||||||
|
orderby desc certRecord.update_at
|
||||||
|
)
|
||||||
|
|
||||||
//--- satellite registration token for Vanguard release (temporary table) ---//
|
//--- satellite registration token for Vanguard release (temporary table) ---//
|
||||||
|
|
||||||
model registration_token (
|
model registration_token (
|
||||||
|
@ -331,7 +331,7 @@ CREATE TABLE certRecords (
|
|||||||
publickey bytea NOT NULL,
|
publickey bytea NOT NULL,
|
||||||
id bytea NOT NULL,
|
id bytea NOT NULL,
|
||||||
update_at timestamp with time zone NOT NULL,
|
update_at timestamp with time zone NOT NULL,
|
||||||
PRIMARY KEY ( id )
|
PRIMARY KEY ( publickey )
|
||||||
);
|
);
|
||||||
CREATE TABLE injuredsegments (
|
CREATE TABLE injuredsegments (
|
||||||
path bytea NOT NULL,
|
path bytea NOT NULL,
|
||||||
@ -551,6 +551,7 @@ CREATE TABLE project_payments (
|
|||||||
);
|
);
|
||||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||||
|
CREATE INDEX certrecord_id_update_at ON certRecords ( id, update_at );
|
||||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||||
@ -678,7 +679,7 @@ CREATE TABLE certRecords (
|
|||||||
publickey BLOB NOT NULL,
|
publickey BLOB NOT NULL,
|
||||||
id BLOB NOT NULL,
|
id BLOB NOT NULL,
|
||||||
update_at TIMESTAMP NOT NULL,
|
update_at TIMESTAMP NOT NULL,
|
||||||
PRIMARY KEY ( id )
|
PRIMARY KEY ( publickey )
|
||||||
);
|
);
|
||||||
CREATE TABLE injuredsegments (
|
CREATE TABLE injuredsegments (
|
||||||
path BLOB NOT NULL,
|
path BLOB NOT NULL,
|
||||||
@ -898,6 +899,7 @@ CREATE TABLE project_payments (
|
|||||||
);
|
);
|
||||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||||
|
CREATE INDEX certrecord_id_update_at ON certRecords ( id, update_at );
|
||||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||||
@ -7478,7 +7480,7 @@ func (obj *postgresImpl) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
certRecord *CertRecord, err error) {
|
certRecord *CertRecord, err error) {
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ?")
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ? LIMIT 2")
|
||||||
|
|
||||||
var __values []interface{}
|
var __values []interface{}
|
||||||
__values = append(__values, certRecord_id.value())
|
__values = append(__values, certRecord_id.value())
|
||||||
@ -7486,6 +7488,49 @@ func (obj *postgresImpl) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
obj.logStmt(__stmt, __values...)
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
if !__rows.Next() {
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return nil, makeErr(sql.ErrNoRows)
|
||||||
|
}
|
||||||
|
|
||||||
|
certRecord = &CertRecord{}
|
||||||
|
err = __rows.Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if __rows.Next() {
|
||||||
|
return nil, tooManyRows("CertRecord_By_Id")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certRecord, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *postgresImpl) Get_CertRecord_By_Publickey(ctx context.Context,
|
||||||
|
certRecord_publickey CertRecord_Publickey_Field) (
|
||||||
|
certRecord *CertRecord, err error) {
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.publickey = ?")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, certRecord_publickey.value())
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
certRecord = &CertRecord{}
|
certRecord = &CertRecord{}
|
||||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -7495,6 +7540,39 @@ func (obj *postgresImpl) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (obj *postgresImpl) All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx context.Context,
|
||||||
|
certRecord_id CertRecord_Id_Field) (
|
||||||
|
rows []*CertRecord, err error) {
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ? ORDER BY certRecords.update_at DESC")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, certRecord_id.value())
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
certRecord := &CertRecord{}
|
||||||
|
err = __rows.Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
rows = append(rows, certRecord)
|
||||||
|
}
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (obj *postgresImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
func (obj *postgresImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
||||||
registration_token_secret RegistrationToken_Secret_Field) (
|
registration_token_secret RegistrationToken_Secret_Field) (
|
||||||
registration_token *RegistrationToken, err error) {
|
registration_token *RegistrationToken, err error) {
|
||||||
@ -8288,42 +8366,6 @@ func (obj *postgresImpl) Update_ApiKey_By_Id(ctx context.Context,
|
|||||||
return api_key, nil
|
return api_key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *postgresImpl) Update_CertRecord_By_Id(ctx context.Context,
|
|
||||||
certRecord_id CertRecord_Id_Field,
|
|
||||||
update CertRecord_Update_Fields) (
|
|
||||||
certRecord *CertRecord, err error) {
|
|
||||||
var __sets = &__sqlbundle_Hole{}
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE certRecords SET "), __sets, __sqlbundle_Literal(" WHERE certRecords.id = ? RETURNING certRecords.publickey, certRecords.id, certRecords.update_at")}}
|
|
||||||
|
|
||||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
||||||
var __values []interface{}
|
|
||||||
var __args []interface{}
|
|
||||||
|
|
||||||
__now := obj.db.Hooks.Now().UTC()
|
|
||||||
|
|
||||||
__values = append(__values, __now)
|
|
||||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("update_at = ?"))
|
|
||||||
|
|
||||||
__args = append(__args, certRecord_id.value())
|
|
||||||
|
|
||||||
__values = append(__values, __args...)
|
|
||||||
__sets.SQL = __sets_sql
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
certRecord = &CertRecord{}
|
|
||||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return certRecord, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *postgresImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
func (obj *postgresImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
||||||
registration_token_secret RegistrationToken_Secret_Field,
|
registration_token_secret RegistrationToken_Secret_Field,
|
||||||
update RegistrationToken_Update_Fields) (
|
update RegistrationToken_Update_Fields) (
|
||||||
@ -8877,7 +8919,7 @@ func (obj *postgresImpl) Delete_StoragenodeStorageTally_By_Id(ctx context.Contex
|
|||||||
|
|
||||||
func (obj *postgresImpl) Delete_CertRecord_By_Id(ctx context.Context,
|
func (obj *postgresImpl) Delete_CertRecord_By_Id(ctx context.Context,
|
||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
deleted bool, err error) {
|
count int64, err error) {
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("DELETE FROM certRecords WHERE certRecords.id = ?")
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM certRecords WHERE certRecords.id = ?")
|
||||||
|
|
||||||
@ -8889,15 +8931,15 @@ func (obj *postgresImpl) Delete_CertRecord_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
__res, err := obj.driver.Exec(__stmt, __values...)
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
__count, err := __res.RowsAffected()
|
count, err = __res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return __count > 0, nil
|
return count, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -11309,7 +11351,7 @@ func (obj *sqlite3Impl) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
certRecord *CertRecord, err error) {
|
certRecord *CertRecord, err error) {
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ?")
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ? LIMIT 2")
|
||||||
|
|
||||||
var __values []interface{}
|
var __values []interface{}
|
||||||
__values = append(__values, certRecord_id.value())
|
__values = append(__values, certRecord_id.value())
|
||||||
@ -11317,6 +11359,49 @@ func (obj *sqlite3Impl) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
obj.logStmt(__stmt, __values...)
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
if !__rows.Next() {
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return nil, makeErr(sql.ErrNoRows)
|
||||||
|
}
|
||||||
|
|
||||||
|
certRecord = &CertRecord{}
|
||||||
|
err = __rows.Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if __rows.Next() {
|
||||||
|
return nil, tooManyRows("CertRecord_By_Id")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certRecord, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *sqlite3Impl) Get_CertRecord_By_Publickey(ctx context.Context,
|
||||||
|
certRecord_publickey CertRecord_Publickey_Field) (
|
||||||
|
certRecord *CertRecord, err error) {
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.publickey = ?")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, certRecord_publickey.value())
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
certRecord = &CertRecord{}
|
certRecord = &CertRecord{}
|
||||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -11326,6 +11411,39 @@ func (obj *sqlite3Impl) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (obj *sqlite3Impl) All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx context.Context,
|
||||||
|
certRecord_id CertRecord_Id_Field) (
|
||||||
|
rows []*CertRecord, err error) {
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ? ORDER BY certRecords.update_at DESC")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, certRecord_id.value())
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
defer __rows.Close()
|
||||||
|
|
||||||
|
for __rows.Next() {
|
||||||
|
certRecord := &CertRecord{}
|
||||||
|
err = __rows.Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
rows = append(rows, certRecord)
|
||||||
|
}
|
||||||
|
if err := __rows.Err(); err != nil {
|
||||||
|
return nil, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
func (obj *sqlite3Impl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
||||||
registration_token_secret RegistrationToken_Secret_Field) (
|
registration_token_secret RegistrationToken_Secret_Field) (
|
||||||
registration_token *RegistrationToken, err error) {
|
registration_token *RegistrationToken, err error) {
|
||||||
@ -12199,52 +12317,6 @@ func (obj *sqlite3Impl) Update_ApiKey_By_Id(ctx context.Context,
|
|||||||
return api_key, nil
|
return api_key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Update_CertRecord_By_Id(ctx context.Context,
|
|
||||||
certRecord_id CertRecord_Id_Field,
|
|
||||||
update CertRecord_Update_Fields) (
|
|
||||||
certRecord *CertRecord, err error) {
|
|
||||||
var __sets = &__sqlbundle_Hole{}
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE certRecords SET "), __sets, __sqlbundle_Literal(" WHERE certRecords.id = ?")}}
|
|
||||||
|
|
||||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
||||||
var __values []interface{}
|
|
||||||
var __args []interface{}
|
|
||||||
|
|
||||||
__now := obj.db.Hooks.Now().UTC()
|
|
||||||
|
|
||||||
__values = append(__values, __now)
|
|
||||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("update_at = ?"))
|
|
||||||
|
|
||||||
__args = append(__args, certRecord_id.value())
|
|
||||||
|
|
||||||
__values = append(__values, __args...)
|
|
||||||
__sets.SQL = __sets_sql
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
certRecord = &CertRecord{}
|
|
||||||
_, err = obj.driver.Exec(__stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var __embed_stmt_get = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ?")
|
|
||||||
|
|
||||||
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
||||||
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
||||||
|
|
||||||
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return certRecord, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
func (obj *sqlite3Impl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
||||||
registration_token_secret RegistrationToken_Secret_Field,
|
registration_token_secret RegistrationToken_Secret_Field,
|
||||||
update RegistrationToken_Update_Fields) (
|
update RegistrationToken_Update_Fields) (
|
||||||
@ -12828,7 +12900,7 @@ func (obj *sqlite3Impl) Delete_StoragenodeStorageTally_By_Id(ctx context.Context
|
|||||||
|
|
||||||
func (obj *sqlite3Impl) Delete_CertRecord_By_Id(ctx context.Context,
|
func (obj *sqlite3Impl) Delete_CertRecord_By_Id(ctx context.Context,
|
||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
deleted bool, err error) {
|
count int64, err error) {
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("DELETE FROM certRecords WHERE certRecords.id = ?")
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM certRecords WHERE certRecords.id = ?")
|
||||||
|
|
||||||
@ -12840,15 +12912,15 @@ func (obj *sqlite3Impl) Delete_CertRecord_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
__res, err := obj.driver.Exec(__stmt, __values...)
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
__count, err := __res.RowsAffected()
|
count, err = __res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return __count > 0, nil
|
return count, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -13705,6 +13777,16 @@ func (rx *Rx) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalSta
|
|||||||
return tx.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id, bucket_storage_tally_bucket_name, bucket_storage_tally_interval_start_greater_or_equal, bucket_storage_tally_interval_start_less_or_equal)
|
return tx.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id, bucket_storage_tally_bucket_name, bucket_storage_tally_interval_start_greater_or_equal, bucket_storage_tally_interval_start_less_or_equal)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rx *Rx) All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx context.Context,
|
||||||
|
certRecord_id CertRecord_Id_Field) (
|
||||||
|
rows []*CertRecord, err error) {
|
||||||
|
var tx *Tx
|
||||||
|
if tx, err = rx.getTx(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return tx.All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx, certRecord_id)
|
||||||
|
}
|
||||||
|
|
||||||
func (rx *Rx) All_Node_Id(ctx context.Context) (
|
func (rx *Rx) All_Node_Id(ctx context.Context) (
|
||||||
rows []*Id_Row, err error) {
|
rows []*Id_Row, err error) {
|
||||||
var tx *Tx
|
var tx *Tx
|
||||||
@ -14272,12 +14354,13 @@ func (rx *Rx) Delete_BucketUsage_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
func (rx *Rx) Delete_CertRecord_By_Id(ctx context.Context,
|
func (rx *Rx) Delete_CertRecord_By_Id(ctx context.Context,
|
||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
deleted bool, err error) {
|
count int64, err error) {
|
||||||
var tx *Tx
|
var tx *Tx
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
if tx, err = rx.getTx(ctx); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
return tx.Delete_CertRecord_By_Id(ctx, certRecord_id)
|
return tx.Delete_CertRecord_By_Id(ctx, certRecord_id)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rx *Rx) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
func (rx *Rx) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
@ -14509,6 +14592,16 @@ func (rx *Rx) Get_CertRecord_By_Id(ctx context.Context,
|
|||||||
return tx.Get_CertRecord_By_Id(ctx, certRecord_id)
|
return tx.Get_CertRecord_By_Id(ctx, certRecord_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rx *Rx) Get_CertRecord_By_Publickey(ctx context.Context,
|
||||||
|
certRecord_publickey CertRecord_Publickey_Field) (
|
||||||
|
certRecord *CertRecord, err error) {
|
||||||
|
var tx *Tx
|
||||||
|
if tx, err = rx.getTx(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return tx.Get_CertRecord_By_Publickey(ctx, certRecord_publickey)
|
||||||
|
}
|
||||||
|
|
||||||
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
||||||
irreparabledb *Irreparabledb, err error) {
|
irreparabledb *Irreparabledb, err error) {
|
||||||
@ -14809,17 +14902,6 @@ func (rx *Rx) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|||||||
return tx.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name, update)
|
return tx.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name, update)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rx *Rx) Update_CertRecord_By_Id(ctx context.Context,
|
|
||||||
certRecord_id CertRecord_Id_Field,
|
|
||||||
update CertRecord_Update_Fields) (
|
|
||||||
certRecord *CertRecord, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Update_CertRecord_By_Id(ctx, certRecord_id, update)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
func (rx *Rx) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
||||||
update Irreparabledb_Update_Fields) (
|
update Irreparabledb_Update_Fields) (
|
||||||
@ -14924,6 +15006,10 @@ type Methods interface {
|
|||||||
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
||||||
rows []*BucketStorageTally, err error)
|
rows []*BucketStorageTally, err error)
|
||||||
|
|
||||||
|
All_CertRecord_By_Id_OrderBy_Desc_UpdateAt(ctx context.Context,
|
||||||
|
certRecord_id CertRecord_Id_Field) (
|
||||||
|
rows []*CertRecord, err error)
|
||||||
|
|
||||||
All_Node_Id(ctx context.Context) (
|
All_Node_Id(ctx context.Context) (
|
||||||
rows []*Id_Row, err error)
|
rows []*Id_Row, err error)
|
||||||
|
|
||||||
@ -15215,7 +15301,7 @@ type Methods interface {
|
|||||||
|
|
||||||
Delete_CertRecord_By_Id(ctx context.Context,
|
Delete_CertRecord_By_Id(ctx context.Context,
|
||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
deleted bool, err error)
|
count int64, err error)
|
||||||
|
|
||||||
Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
||||||
@ -15313,6 +15399,10 @@ type Methods interface {
|
|||||||
certRecord_id CertRecord_Id_Field) (
|
certRecord_id CertRecord_Id_Field) (
|
||||||
certRecord *CertRecord, err error)
|
certRecord *CertRecord, err error)
|
||||||
|
|
||||||
|
Get_CertRecord_By_Publickey(ctx context.Context,
|
||||||
|
certRecord_publickey CertRecord_Publickey_Field) (
|
||||||
|
certRecord *CertRecord, err error)
|
||||||
|
|
||||||
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
||||||
irreparabledb *Irreparabledb, err error)
|
irreparabledb *Irreparabledb, err error)
|
||||||
@ -15445,11 +15535,6 @@ type Methods interface {
|
|||||||
update BucketMetainfo_Update_Fields) (
|
update BucketMetainfo_Update_Fields) (
|
||||||
bucket_metainfo *BucketMetainfo, err error)
|
bucket_metainfo *BucketMetainfo, err error)
|
||||||
|
|
||||||
Update_CertRecord_By_Id(ctx context.Context,
|
|
||||||
certRecord_id CertRecord_Id_Field,
|
|
||||||
update CertRecord_Update_Fields) (
|
|
||||||
certRecord *CertRecord, err error)
|
|
||||||
|
|
||||||
Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
||||||
update Irreparabledb_Update_Fields) (
|
update Irreparabledb_Update_Fields) (
|
||||||
|
@ -59,7 +59,7 @@ CREATE TABLE certRecords (
|
|||||||
publickey bytea NOT NULL,
|
publickey bytea NOT NULL,
|
||||||
id bytea NOT NULL,
|
id bytea NOT NULL,
|
||||||
update_at timestamp with time zone NOT NULL,
|
update_at timestamp with time zone NOT NULL,
|
||||||
PRIMARY KEY ( id )
|
PRIMARY KEY ( publickey )
|
||||||
);
|
);
|
||||||
CREATE TABLE injuredsegments (
|
CREATE TABLE injuredsegments (
|
||||||
path bytea NOT NULL,
|
path bytea NOT NULL,
|
||||||
@ -279,6 +279,7 @@ CREATE TABLE project_payments (
|
|||||||
);
|
);
|
||||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||||
|
CREATE INDEX certrecord_id_update_at ON certRecords ( id, update_at );
|
||||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||||
|
@ -59,7 +59,7 @@ CREATE TABLE certRecords (
|
|||||||
publickey BLOB NOT NULL,
|
publickey BLOB NOT NULL,
|
||||||
id BLOB NOT NULL,
|
id BLOB NOT NULL,
|
||||||
update_at TIMESTAMP NOT NULL,
|
update_at TIMESTAMP NOT NULL,
|
||||||
PRIMARY KEY ( id )
|
PRIMARY KEY ( publickey )
|
||||||
);
|
);
|
||||||
CREATE TABLE injuredsegments (
|
CREATE TABLE injuredsegments (
|
||||||
path BLOB NOT NULL,
|
path BLOB NOT NULL,
|
||||||
@ -279,6 +279,7 @@ CREATE TABLE project_payments (
|
|||||||
);
|
);
|
||||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||||
|
CREATE INDEX certrecord_id_update_at ON certRecords ( id, update_at );
|
||||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||||
|
@ -137,13 +137,20 @@ type lockedCertDB struct {
|
|||||||
db certdb.DB
|
db certdb.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPublicKey gets the public key of uplink corresponding to uplink id
|
// GetPublicKey gets one latest public key of a node
|
||||||
func (m *lockedCertDB) GetPublicKey(ctx context.Context, a1 storj.NodeID) (crypto.PublicKey, error) {
|
func (m *lockedCertDB) GetPublicKey(ctx context.Context, a1 storj.NodeID) (crypto.PublicKey, error) {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
return m.db.GetPublicKey(ctx, a1)
|
return m.db.GetPublicKey(ctx, a1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPublicKey gets all the public keys of a node
|
||||||
|
func (m *lockedCertDB) GetPublicKeys(ctx context.Context, a1 storj.NodeID) ([]crypto.PublicKey, error) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
return m.db.GetPublicKeys(ctx, a1)
|
||||||
|
}
|
||||||
|
|
||||||
// SavePublicKey adds a new bandwidth agreement.
|
// SavePublicKey adds a new bandwidth agreement.
|
||||||
func (m *lockedCertDB) SavePublicKey(ctx context.Context, a1 storj.NodeID, a2 crypto.PublicKey) error {
|
func (m *lockedCertDB) SavePublicKey(ctx context.Context, a1 storj.NodeID, a2 crypto.PublicKey) error {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
@ -864,6 +871,13 @@ type lockedOverlayCache struct {
|
|||||||
db overlay.DB
|
db overlay.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BatchUpdateStats updates multiple storagenode's stats in one transaction
|
||||||
|
func (m *lockedOverlayCache) BatchUpdateStats(ctx context.Context, updateRequests []*overlay.UpdateRequest, batchSize int) (failed storj.NodeIDList, err error) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
return m.db.BatchUpdateStats(ctx, updateRequests, batchSize)
|
||||||
|
}
|
||||||
|
|
||||||
// Get looks up the node by nodeID
|
// Get looks up the node by nodeID
|
||||||
func (m *lockedOverlayCache) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) {
|
func (m *lockedOverlayCache) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
@ -941,13 +955,6 @@ func (m *lockedOverlayCache) UpdateNodeInfo(ctx context.Context, node storj.Node
|
|||||||
return m.db.UpdateNodeInfo(ctx, node, nodeInfo)
|
return m.db.UpdateNodeInfo(ctx, node, nodeInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchUpdateStats updates multiple storagenode's stats in one transaction
|
|
||||||
func (m *lockedOverlayCache) BatchUpdateStats(ctx context.Context, request []*overlay.UpdateRequest, batchSize int) (failed storj.NodeIDList, err error) {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.db.BatchUpdateStats(ctx, request, batchSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateStats all parts of single storagenode's stats.
|
// UpdateStats all parts of single storagenode's stats.
|
||||||
func (m *lockedOverlayCache) UpdateStats(ctx context.Context, request *overlay.UpdateRequest) (stats *overlay.NodeStats, err error) {
|
func (m *lockedOverlayCache) UpdateStats(ctx context.Context, request *overlay.UpdateRequest) (stats *overlay.NodeStats, err error) {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
@ -1023,6 +1030,13 @@ type lockedRepairQueue struct {
|
|||||||
db queue.RepairQueue
|
db queue.RepairQueue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Count counts the number of segments in the repair queue.
|
||||||
|
func (m *lockedRepairQueue) Count(ctx context.Context) (count int, err error) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
return m.db.Count(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Delete removes an injured segment.
|
// Delete removes an injured segment.
|
||||||
func (m *lockedRepairQueue) Delete(ctx context.Context, s *pb.InjuredSegment) error {
|
func (m *lockedRepairQueue) Delete(ctx context.Context, s *pb.InjuredSegment) error {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
@ -1051,13 +1065,6 @@ func (m *lockedRepairQueue) SelectN(ctx context.Context, limit int) ([]pb.Injure
|
|||||||
return m.db.SelectN(ctx, limit)
|
return m.db.SelectN(ctx, limit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count counts the number of segments in the repair queue.
|
|
||||||
func (m *lockedRepairQueue) Count(ctx context.Context) (int, error) {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.db.Count(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns database for marketing admin GUI
|
// returns database for marketing admin GUI
|
||||||
func (m *locked) Rewards() rewards.DB {
|
func (m *locked) Rewards() rewards.DB {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
|
@ -1072,6 +1072,15 @@ func (db *DB) PostgresMigration() *migrate.Migration {
|
|||||||
ALTER TABLE user_credits ALTER COLUMN type SET NOT NULL;`,
|
ALTER TABLE user_credits ALTER COLUMN type SET NOT NULL;`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Description: "Changing the primary key constraint",
|
||||||
|
Version: 50,
|
||||||
|
Action: migrate.SQL{
|
||||||
|
`ALTER TABLE certRecords DROP CONSTRAINT certrecords_pkey;
|
||||||
|
ALTER TABLE certRecords ADD CONSTRAINT certrecords_pkey PRIMARY KEY (publickey);
|
||||||
|
CREATE INDEX certrecord_id_update_at ON certRecords ( id, update_at );`,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
353
satellite/satellitedb/testdata/postgres.v50.sql
vendored
Normal file
353
satellite/satellitedb/testdata/postgres.v50.sql
vendored
Normal file
@ -0,0 +1,353 @@
|
|||||||
|
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
|
||||||
|
-- DO NOT EDIT
|
||||||
|
CREATE TABLE accounting_rollups (
|
||||||
|
id bigserial NOT NULL,
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
start_time timestamp with time zone NOT NULL,
|
||||||
|
put_total bigint NOT NULL,
|
||||||
|
get_total bigint NOT NULL,
|
||||||
|
get_audit_total bigint NOT NULL,
|
||||||
|
get_repair_total bigint NOT NULL,
|
||||||
|
put_repair_total bigint NOT NULL,
|
||||||
|
at_rest_total double precision NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE accounting_timestamps (
|
||||||
|
name text NOT NULL,
|
||||||
|
value timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( name )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_bandwidth_rollups (
|
||||||
|
bucket_name bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
interval_start timestamp NOT NULL,
|
||||||
|
interval_seconds integer NOT NULL,
|
||||||
|
action integer NOT NULL,
|
||||||
|
inline bigint NOT NULL,
|
||||||
|
allocated bigint NOT NULL,
|
||||||
|
settled bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_storage_tallies (
|
||||||
|
bucket_name bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
interval_start timestamp NOT NULL,
|
||||||
|
inline bigint NOT NULL,
|
||||||
|
remote bigint NOT NULL,
|
||||||
|
remote_segments_count integer NOT NULL,
|
||||||
|
inline_segments_count integer NOT NULL,
|
||||||
|
object_count integer NOT NULL,
|
||||||
|
metadata_size bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_usages (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
bucket_id bytea NOT NULL,
|
||||||
|
rollup_end_time timestamp with time zone NOT NULL,
|
||||||
|
remote_stored_data bigint NOT NULL,
|
||||||
|
inline_stored_data bigint NOT NULL,
|
||||||
|
remote_segments integer NOT NULL,
|
||||||
|
inline_segments integer NOT NULL,
|
||||||
|
objects integer NOT NULL,
|
||||||
|
metadata_size bigint NOT NULL,
|
||||||
|
repair_egress bigint NOT NULL,
|
||||||
|
get_egress bigint NOT NULL,
|
||||||
|
audit_egress bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE certRecords (
|
||||||
|
publickey bytea NOT NULL,
|
||||||
|
id bytea NOT NULL,
|
||||||
|
update_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( publickey )
|
||||||
|
);
|
||||||
|
CREATE INDEX certrecord_id_update_at ON certRecords ( id, update_at );
|
||||||
|
CREATE TABLE injuredsegments (
|
||||||
|
path bytea NOT NULL,
|
||||||
|
data bytea NOT NULL,
|
||||||
|
attempted timestamp,
|
||||||
|
PRIMARY KEY ( path )
|
||||||
|
);
|
||||||
|
CREATE TABLE irreparabledbs (
|
||||||
|
segmentpath bytea NOT NULL,
|
||||||
|
segmentdetail bytea NOT NULL,
|
||||||
|
pieces_lost_count bigint NOT NULL,
|
||||||
|
seg_damaged_unix_sec bigint NOT NULL,
|
||||||
|
repair_attempt_count bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( segmentpath )
|
||||||
|
);
|
||||||
|
CREATE TABLE nodes (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
address text NOT NULL,
|
||||||
|
last_net text NOT NULL,
|
||||||
|
protocol integer NOT NULL,
|
||||||
|
type integer NOT NULL,
|
||||||
|
email text NOT NULL,
|
||||||
|
wallet text NOT NULL,
|
||||||
|
free_bandwidth bigint NOT NULL,
|
||||||
|
free_disk bigint NOT NULL,
|
||||||
|
major bigint NOT NULL,
|
||||||
|
minor bigint NOT NULL,
|
||||||
|
patch bigint NOT NULL,
|
||||||
|
hash text NOT NULL,
|
||||||
|
timestamp timestamp with time zone NOT NULL,
|
||||||
|
release boolean NOT NULL,
|
||||||
|
latency_90 bigint NOT NULL,
|
||||||
|
audit_success_count bigint NOT NULL,
|
||||||
|
total_audit_count bigint NOT NULL,
|
||||||
|
uptime_success_count bigint NOT NULL,
|
||||||
|
total_uptime_count bigint NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
updated_at timestamp with time zone NOT NULL,
|
||||||
|
last_contact_success timestamp with time zone NOT NULL,
|
||||||
|
last_contact_failure timestamp with time zone NOT NULL,
|
||||||
|
contained boolean NOT NULL,
|
||||||
|
disqualified timestamp with time zone,
|
||||||
|
audit_reputation_alpha double precision NOT NULL,
|
||||||
|
audit_reputation_beta double precision NOT NULL,
|
||||||
|
uptime_reputation_alpha double precision NOT NULL,
|
||||||
|
uptime_reputation_beta double precision NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE offers (
|
||||||
|
id serial NOT NULL,
|
||||||
|
name text NOT NULL,
|
||||||
|
description text NOT NULL,
|
||||||
|
award_credit_in_cents integer NOT NULL,
|
||||||
|
invitee_credit_in_cents integer NOT NULL,
|
||||||
|
award_credit_duration_days integer,
|
||||||
|
invitee_credit_duration_days integer,
|
||||||
|
redeemable_cap integer,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
type integer NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE pending_audits (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
piece_id bytea NOT NULL,
|
||||||
|
stripe_index bigint NOT NULL,
|
||||||
|
share_size bigint NOT NULL,
|
||||||
|
expected_share_hash bytea NOT NULL,
|
||||||
|
reverify_count bigint NOT NULL,
|
||||||
|
path bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE projects (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
name text NOT NULL,
|
||||||
|
description text NOT NULL,
|
||||||
|
usage_limit bigint NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE registration_tokens (
|
||||||
|
secret bytea NOT NULL,
|
||||||
|
owner_id bytea,
|
||||||
|
project_limit integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( secret ),
|
||||||
|
UNIQUE ( owner_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE reset_password_tokens (
|
||||||
|
secret bytea NOT NULL,
|
||||||
|
owner_id bytea NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( secret ),
|
||||||
|
UNIQUE ( owner_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE serial_numbers (
|
||||||
|
id serial NOT NULL,
|
||||||
|
serial_number bytea NOT NULL,
|
||||||
|
bucket_id bytea NOT NULL,
|
||||||
|
expires_at timestamp NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE storagenode_bandwidth_rollups (
|
||||||
|
storagenode_id bytea NOT NULL,
|
||||||
|
interval_start timestamp NOT NULL,
|
||||||
|
interval_seconds integer NOT NULL,
|
||||||
|
action integer NOT NULL,
|
||||||
|
allocated bigint NOT NULL,
|
||||||
|
settled bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||||
|
);
|
||||||
|
CREATE TABLE storagenode_storage_tallies (
|
||||||
|
id bigserial NOT NULL,
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
interval_end_time timestamp with time zone NOT NULL,
|
||||||
|
data_total double precision NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE users (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
email text NOT NULL,
|
||||||
|
full_name text NOT NULL,
|
||||||
|
short_name text,
|
||||||
|
password_hash bytea NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE value_attributions (
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
bucket_name bytea NOT NULL,
|
||||||
|
partner_id bytea NOT NULL,
|
||||||
|
last_updated timestamp NOT NULL,
|
||||||
|
PRIMARY KEY ( project_id, bucket_name )
|
||||||
|
);
|
||||||
|
CREATE TABLE api_keys (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
head bytea NOT NULL,
|
||||||
|
name text NOT NULL,
|
||||||
|
secret bytea NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id ),
|
||||||
|
UNIQUE ( head ),
|
||||||
|
UNIQUE ( name, project_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_metainfos (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||||
|
name bytea NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
path_cipher integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
default_segment_size integer NOT NULL,
|
||||||
|
default_encryption_cipher_suite integer NOT NULL,
|
||||||
|
default_encryption_block_size integer NOT NULL,
|
||||||
|
default_redundancy_algorithm integer NOT NULL,
|
||||||
|
default_redundancy_share_size integer NOT NULL,
|
||||||
|
default_redundancy_required_shares integer NOT NULL,
|
||||||
|
default_redundancy_repair_shares integer NOT NULL,
|
||||||
|
default_redundancy_optimal_shares integer NOT NULL,
|
||||||
|
default_redundancy_total_shares integer NOT NULL,
|
||||||
|
PRIMARY KEY ( id ),
|
||||||
|
UNIQUE ( name, project_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE project_invoice_stamps (
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
invoice_id bytea NOT NULL,
|
||||||
|
start_date timestamp with time zone NOT NULL,
|
||||||
|
end_date timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( project_id, start_date, end_date ),
|
||||||
|
UNIQUE ( invoice_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE project_members (
|
||||||
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( member_id, project_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE used_serials (
|
||||||
|
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||||
|
storage_node_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE user_credits (
|
||||||
|
id serial NOT NULL,
|
||||||
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
|
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||||
|
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||||
|
type text NOT NULL,
|
||||||
|
credits_earned_in_cents integer NOT NULL,
|
||||||
|
credits_used_in_cents integer NOT NULL,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE user_payments (
|
||||||
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
|
customer_id bytea NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( user_id ),
|
||||||
|
UNIQUE ( customer_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE project_payments (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
payer_id bytea NOT NULL REFERENCES user_payments( user_id ) ON DELETE CASCADE,
|
||||||
|
payment_method_id bytea NOT NULL,
|
||||||
|
is_default boolean NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||||
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||||
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||||
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||||
|
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||||
|
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
||||||
|
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits (id, offer_id) WHERE credits_earned_in_cents=0;
|
||||||
|
---
|
||||||
|
|
||||||
|
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||||
|
|
||||||
|
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||||
|
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||||
|
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||||
|
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 5, 100, 5);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 1, 100, 1);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 100, 300, 100);
|
||||||
|
|
||||||
|
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, '2019-02-14 08:28:24.254934+00');
|
||||||
|
|
||||||
|
INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||||
|
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, '2019-02-14 08:28:24.636949+00');
|
||||||
|
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||||
|
|
||||||
|
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||||
|
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||||
|
|
||||||
|
INSERT INTO "certrecords" VALUES (E'0Y0\\023\\006\\007*\\206H\\316=\\002\\001\\006\\010*\\206H\\316=\\003\\001\\007\\003B\\000\\004\\360\\267\\227\\377\\253u\\222\\337Y\\324C:GQ\\010\\277v\\010\\315D\\271\\333\\337.\\203\\023=C\\343\\014T%6\\027\\362?\\214\\326\\017U\\334\\000\\260\\224\\260J\\221\\304\\331F\\304\\221\\236zF,\\325\\326l\\215\\306\\365\\200\\022', E'L\\301|\\200\\247}F|1\\320\\232\\037n\\335\\241\\206\\244\\242\\207\\204.\\253\\357\\326\\352\\033Dt\\202`\\022\\325', '2019-02-14 08:07:31.335028+00');
|
||||||
|
|
||||||
|
INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18);
|
||||||
|
|
||||||
|
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||||
|
|
||||||
|
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||||
|
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||||
|
|
||||||
|
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
|
||||||
|
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||||
|
|
||||||
|
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||||
|
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||||
|
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||||
|
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||||
|
|
||||||
|
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||||
|
|
||||||
|
INSERT INTO "offers" ("name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "status", "type") VALUES ('testOffer', 'Test offer 1', 0, 0, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0);
|
||||||
|
INSERT INTO "offers" ("name","description","award_credit_in_cents","award_credit_duration_days", "invitee_credit_in_cents","invitee_credit_duration_days", "expires_at","created_at","status","type") VALUES ('Default free credit offer','Is active when no active free credit offer',0, NULL,300, 14, '2119-03-14 08:28:24.636949+00','2019-07-14 08:28:24.636949+00',1,1);
|
||||||
|
|
||||||
|
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "user_payments" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||||
|
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||||
|
|
||||||
|
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||||
|
|
||||||
|
INSERT INTO "project_payments" ("id", "project_id", "payer_id", "payment_method_id", "is_default","created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, true, '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||||
|
|
||||||
|
-- NEW DATA --
|
Loading…
Reference in New Issue
Block a user