Drop SN certifiates table from DB (#2498)

This commit is contained in:
Michal Niewrzal 2019-07-09 23:33:45 +02:00 committed by littleskunk
parent d616be8ae0
commit bbc25a2bf7
15 changed files with 164 additions and 234 deletions

View File

@ -74,7 +74,6 @@ func TestOrders(t *testing.T) {
info := &orders.Info{
Limit: limit,
Order: order,
Uplink: uplink.PeerIdentity(),
}
// basic add
@ -122,7 +121,6 @@ func TestOrders(t *testing.T) {
{
Limit: limit,
Order: order,
Uplink: uplink.PeerIdentity(),
Status: orders.StatusAccepted,
ArchivedAt: archived[0].ArchivedAt,

View File

@ -14,7 +14,6 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
@ -32,14 +31,12 @@ var (
type Info struct {
Limit *pb.OrderLimit
Order *pb.Order
Uplink *identity.PeerIdentity
}
// ArchivedInfo contains full information about an archived order.
type ArchivedInfo struct {
Limit *pb.OrderLimit
Order *pb.Order
Uplink *identity.PeerIdentity
Status Status
ArchivedAt time.Time

View File

@ -54,7 +54,6 @@ type DB interface {
Orders() orders.DB
PieceInfo() pieces.DB
CertDB() trust.CertDB
Bandwidth() bandwidth.DB
UsedSerials() piecestore.UsedSerials
Vouchers() vouchers.DB

View File

@ -57,7 +57,6 @@ func TestPieceInfo(t *testing.T) {
PieceExpiration: &now,
UplinkPieceHash: piecehash0,
Uplink: uplink0.PeerIdentity(),
}
piecehash1, err := signing.SignPieceHash(ctx,
@ -77,7 +76,6 @@ func TestPieceInfo(t *testing.T) {
PieceExpiration: &now,
UplinkPieceHash: piecehash1,
Uplink: uplink1.PeerIdentity(),
}
piecehash2, err := signing.SignPieceHash(ctx,
@ -97,7 +95,6 @@ func TestPieceInfo(t *testing.T) {
PieceExpiration: &now,
UplinkPieceHash: piecehash2,
Uplink: uplink2.PeerIdentity(),
}
_, err = pieceinfos.Get(ctx, info0.SatelliteID, info0.PieceID)

View File

@ -540,7 +540,6 @@ func (endpoint *Endpoint) SaveOrder(ctx context.Context, limit *pb.OrderLimit, o
err = endpoint.orders.Enqueue(ctx, &orders.Info{
Limit: limit,
Order: order,
Uplink: uplink,
})
if err != nil {
endpoint.log.Error("failed to add order", zap.Error(err))

View File

@ -1,105 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package storagenodedb
import (
"context"
"crypto/x509"
"encoding/asn1"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/dbutil/sqliteutil"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pkcrypto"
"storj.io/storj/storagenode/trust"
)
var (
mon = monkit.Package()
)
type certdb struct {
*InfoDB
}
// CertDB returns certificate database.
func (db *DB) CertDB() trust.CertDB { return db.info.CertDB() }
// CertDB returns certificate database.
func (db *InfoDB) CertDB() trust.CertDB { return &certdb{db} }
// Include includes the certificate in the table and returns an unique id.
func (db *certdb) Include(ctx context.Context, pi *identity.PeerIdentity) (certid int64, err error) {
defer mon.Task()(&ctx)(&err)
chain := encodePeerIdentity(pi)
result, err := db.db.Exec(`INSERT INTO certificate(node_id, peer_identity) VALUES(?, ?)`, pi.ID, chain)
if err != nil && sqliteutil.IsConstraintError(err) {
err = db.db.QueryRow(`SELECT cert_id FROM certificate WHERE peer_identity = ?`, chain).Scan(&certid)
return certid, ErrInfo.Wrap(err)
} else if err != nil {
return -1, ErrInfo.Wrap(err)
}
certid, err = result.LastInsertId()
return certid, ErrInfo.Wrap(err)
}
// LookupByCertID finds certificate by the certid returned by Include.
func (db *certdb) LookupByCertID(ctx context.Context, id int64) (_ *identity.PeerIdentity, err error) {
defer mon.Task()(&ctx)(&err)
var pem *[]byte
err = db.db.QueryRow(`SELECT peer_identity FROM certificate WHERE cert_id = ?`, id).Scan(&pem)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
if pem == nil {
return nil, ErrInfo.New("did not find certificate")
}
peer, err := decodePeerIdentity(ctx, *pem)
return peer, ErrInfo.Wrap(err)
}
// TODO: move into pkcrypto
func encodePeerIdentity(pi *identity.PeerIdentity) []byte {
var chain []byte
chain = append(chain, pi.Leaf.Raw...)
chain = append(chain, pi.CA.Raw...)
for _, cert := range pi.RestChain {
chain = append(chain, cert.Raw...)
}
return chain
}
func decodePeerIdentity(ctx context.Context, chain []byte) (_ *identity.PeerIdentity, err error) {
defer mon.Task()(&ctx)(&err)
var certs []*x509.Certificate
for len(chain) > 0 {
var raw asn1.RawValue
var err error
chain, err = asn1.Unmarshal(chain, &raw)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
cert, err := pkcrypto.CertFromDER(raw.FullBytes)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
certs = append(certs, cert)
}
if len(certs) < 2 {
return nil, ErrInfo.New("not enough certificates")
}
return identity.PeerIdentityFromChain(certs)
}

View File

@ -4,8 +4,10 @@
package storagenodedb
import (
_ "github.com/mattn/go-sqlite3" // used indirectly
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/storage"
@ -15,6 +17,10 @@ import (
"storj.io/storj/storagenode"
)
var (
mon = monkit.Package()
)
var _ storagenode.DB = (*DB)(nil)
// Config configures storage node database

View File

@ -236,6 +236,14 @@ func (db *InfoDB) Migration() *migrate.Migration {
`ALTER TABLE pieceinfo ADD COLUMN piece_creation TIMESTAMP NOT NULL DEFAULT 'epoch'`,
},
},
{
Description: "Drop certificate table.",
Version: 7,
Action: migrate.SQL{
`DROP TABLE certificate`,
`CREATE TABLE certificate (cert_id INTEGER)`,
},
},
},
}
}

View File

@ -29,13 +29,6 @@ func (db *InfoDB) Orders() orders.DB { return &ordersdb{db} }
func (db *ordersdb) Enqueue(ctx context.Context, info *orders.Info) (err error) {
defer mon.Task()(&ctx)(&err)
certdb := db.CertDB()
uplinkCertID, err := certdb.Include(ctx, info.Uplink)
if err != nil {
return ErrInfo.Wrap(err)
}
limitSerialized, err := proto.Marshal(info.Limit)
if err != nil {
return ErrInfo.Wrap(err)
@ -51,13 +44,14 @@ func (db *ordersdb) Enqueue(ctx context.Context, info *orders.Info) (err error)
return ErrInfo.Wrap(err)
}
// TODO remove `uplink_cert_id` from DB
_, err = db.db.Exec(`
INSERT INTO unsent_order(
satellite_id, serial_number,
order_limit_serialized, order_serialized, order_limit_expiration,
uplink_cert_id
) VALUES (?,?, ?,?,?, ?)
`, info.Limit.SatelliteId, info.Limit.SerialNumber, limitSerialized, orderSerialized, expirationTime, uplinkCertID)
`, info.Limit.SatelliteId, info.Limit.SerialNumber, limitSerialized, orderSerialized, expirationTime, 0)
return ErrInfo.Wrap(err)
}
@ -67,9 +61,8 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
defer mon.Task()(&ctx)(&err)
rows, err := db.db.Query(`
SELECT order_limit_serialized, order_serialized, certificate.peer_identity
SELECT order_limit_serialized, order_serialized
FROM unsent_order
INNER JOIN certificate on unsent_order.uplink_cert_id = certificate.cert_id
LIMIT ?
`, limit)
if err != nil {
@ -84,9 +77,8 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
for rows.Next() {
var limitSerialized []byte
var orderSerialized []byte
var uplinkIdentity []byte
err := rows.Scan(&limitSerialized, &orderSerialized, &uplinkIdentity)
err := rows.Scan(&limitSerialized, &orderSerialized)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
@ -105,11 +97,6 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
return nil, ErrInfo.Wrap(err)
}
info.Uplink, err = decodePeerIdentity(ctx, uplinkIdentity)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
infos = append(infos, &info)
}
@ -205,10 +192,8 @@ func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.Ar
defer mon.Task()(&ctx)(&err)
rows, err := db.db.Query(`
SELECT order_limit_serialized, order_serialized, certificate.peer_identity,
status, archived_at
SELECT order_limit_serialized, order_serialized, status, archived_at
FROM order_archive
INNER JOIN certificate on order_archive.uplink_cert_id = certificate.cert_id
LIMIT ?
`, limit)
if err != nil {
@ -223,12 +208,11 @@ func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.Ar
for rows.Next() {
var limitSerialized []byte
var orderSerialized []byte
var uplinkIdentity []byte
var status int
var archivedAt time.Time
err := rows.Scan(&limitSerialized, &orderSerialized, &uplinkIdentity, &status, &archivedAt)
err := rows.Scan(&limitSerialized, &orderSerialized, &status, &archivedAt)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
@ -250,11 +234,6 @@ func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.Ar
return nil, ErrInfo.Wrap(err)
}
info.Uplink, err = decodePeerIdentity(ctx, uplinkIdentity)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
infos = append(infos, &info)
}

View File

@ -38,22 +38,17 @@ func (db *InfoDB) PieceInfo() pieces.DB { return &db.pieceinfo }
func (db *pieceinfo) Add(ctx context.Context, info *pieces.Info) (err error) {
defer mon.Task()(&ctx)(&err)
certdb := db.CertDB()
certid, err := certdb.Include(ctx, info.Uplink)
if err != nil {
return ErrInfo.Wrap(err)
}
uplinkPieceHash, err := proto.Marshal(info.UplinkPieceHash)
if err != nil {
return ErrInfo.Wrap(err)
}
// TODO remove `uplink_cert_id` from DB
_, err = db.db.ExecContext(ctx, db.Rebind(`
INSERT INTO
pieceinfo(satellite_id, piece_id, piece_size, piece_creation, piece_expiration, uplink_piece_hash, uplink_cert_id)
VALUES (?,?,?,?,?,?,?)
`), info.SatelliteID, info.PieceID, info.PieceSize, info.PieceCreation, info.PieceExpiration, uplinkPieceHash, certid)
`), info.SatelliteID, info.PieceID, info.PieceSize, info.PieceCreation, info.PieceExpiration, uplinkPieceHash, 0)
if err == nil {
db.loadSpaceUsed(ctx)
@ -70,14 +65,12 @@ func (db *pieceinfo) Get(ctx context.Context, satelliteID storj.NodeID, pieceID
info.PieceID = pieceID
var uplinkPieceHash []byte
var uplinkIdentity []byte
err = db.db.QueryRowContext(ctx, db.Rebind(`
SELECT piece_size, piece_creation, piece_expiration, uplink_piece_hash, certificate.peer_identity
SELECT piece_size, piece_creation, piece_expiration, uplink_piece_hash
FROM pieceinfo
INNER JOIN certificate ON pieceinfo.uplink_cert_id = certificate.cert_id
WHERE satellite_id = ? AND piece_id = ?
`), satelliteID, pieceID).Scan(&info.PieceSize, &info.PieceCreation, &info.PieceExpiration, &uplinkPieceHash, &uplinkIdentity)
`), satelliteID, pieceID).Scan(&info.PieceSize, &info.PieceCreation, &info.PieceExpiration, &uplinkPieceHash)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
@ -88,11 +81,6 @@ func (db *pieceinfo) Get(ctx context.Context, satelliteID storj.NodeID, pieceID
return nil, ErrInfo.Wrap(err)
}
info.Uplink, err = decodePeerIdentity(ctx, uplinkIdentity)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
return info, nil
}

View File

@ -163,6 +163,5 @@ func createOrder(t *testing.T, ctx *testcontext.Context) (info *orders.Info) {
return &orders.Info{
Limit: limit,
Order: order,
Uplink: uplink.PeerIdentity(),
}
}

View File

@ -0,0 +1,126 @@
-- table for keeping serials that need to be verified against
CREATE TABLE used_serial (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
expiration TIMESTAMP NOT NULL
);
-- primary key on satellite id and serial number
CREATE UNIQUE INDEX pk_used_serial ON used_serial(satellite_id, serial_number);
-- expiration index to allow fast deletion
CREATE INDEX idx_used_serial ON used_serial(expiration);
-- certificate table for storing uplink/satellite certificates
CREATE TABLE certificate (
cert_id INTEGER
);
-- table for storing piece meta info
CREATE TABLE pieceinfo (
satellite_id BLOB NOT NULL,
piece_id BLOB NOT NULL,
piece_size BIGINT NOT NULL,
piece_expiration TIMESTAMP,
uplink_piece_hash BLOB NOT NULL,
uplink_cert_id INTEGER NOT NULL,
deletion_failed_at TIMESTAMP,
piece_creation TIMESTAMP NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
-- primary key by satellite id and piece id
CREATE UNIQUE INDEX pk_pieceinfo ON pieceinfo(satellite_id, piece_id);
-- table for storing bandwidth usage
CREATE TABLE bandwidth_usage (
satellite_id BLOB NOT NULL,
action INTEGER NOT NULL,
amount BIGINT NOT NULL,
created_at TIMESTAMP NOT NULL
);
CREATE INDEX idx_bandwidth_usage_satellite ON bandwidth_usage(satellite_id);
CREATE INDEX idx_bandwidth_usage_created ON bandwidth_usage(created_at);
-- table for storing all unsent orders
CREATE TABLE unsent_order (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
order_limit_serialized BLOB NOT NULL,
order_serialized BLOB NOT NULL,
order_limit_expiration TIMESTAMP NOT NULL,
uplink_cert_id INTEGER NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
CREATE UNIQUE INDEX idx_orders ON unsent_order(satellite_id, serial_number);
-- table for storing all sent orders
CREATE TABLE order_archive (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
order_limit_serialized BLOB NOT NULL,
order_serialized BLOB NOT NULL,
uplink_cert_id INTEGER NOT NULL,
status INTEGER NOT NULL,
archived_at TIMESTAMP NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
CREATE INDEX idx_order_archive_satellite ON order_archive(satellite_id);
CREATE INDEX idx_order_archive_status ON order_archive(status);
-- table for storing vouchers
CREATE TABLE vouchers (
satellite_id BLOB PRIMARY KEY NOT NULL,
voucher_serialized BLOB NOT NULL,
expiration TIMESTAMP NOT NULL
);
INSERT INTO used_serial VALUES(X'0693a8529105f5ff763e30b6f58ead3fe7a4f93f32b4b298073c01b2b39fa76e',X'18283dd3cec0a5abf6112e903549bdff','2019-04-01 18:58:53.3169599+03:00');
INSERT INTO used_serial VALUES(X'976a6bbcfcec9d96d847f8642c377d5f23c118187fb0ca21e9e1c5a9fbafa5f7',X'18283dd3cec0a5abf6112e903549bdff','2019-04-01 18:58:53.3169599+03:00');
INSERT INTO unsent_order VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'1eddef484b4c03f01332279032796972',X'0a101eddef484b4c03f0133227903279697212202b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf410001a201968996e7ef170a402fdfd88b6753df792c063c07c555905ffac9cd3cbd1c00022200ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac30002a20d00cf14f3c68b56321ace04902dec0484eb6f9098b22b31c6b3f82db249f191630643802420c08dfeb88e50510a8c1a5b9034a0c08dfeb88e50510a8c1a5b9035246304402204df59dc6f5d1bb7217105efbc9b3604d19189af37a81efbf16258e5d7db5549e02203bb4ead16e6e7f10f658558c22b59c3339911841e8dbaae6e2dea821f7326894',X'0a101eddef484b4c03f0133227903279697210321a47304502206d4c106ddec88140414bac5979c95bdea7de2e0ecc5be766e08f7d5ea36641a7022100e932ff858f15885ffa52d07e260c2c25d3861810ea6157956c1793ad0c906284','2019-04-01 16:01:35.9254586+00:00',1);
INSERT INTO pieceinfo VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b',123,'2019-05-09 00:00:00.000000+00:00',X'0a20d5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b120501020304051a47304502201c16d76ecd9b208f7ad9f1edf66ce73dce50da6bde6bbd7d278415099a727421022100ca730450e7f6506c2647516f6e20d0641e47c8270f58dde2bb07d1f5a3a45673',1,NULL,'epoch');
INSERT INTO pieceinfo VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b',123,'2019-05-09 00:00:00.000000+00:00',X'0a20d5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b120501020304051a483046022100e623cf4705046e2c04d5b42d5edbecb81f000459713ad460c691b3361817adbf022100993da2a5298bb88de6c35b2e54009d1bf306cda5d441c228aa9eaf981ceb0f3d',2,NULL,'epoch');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',0,0,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',0,0,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO order_archive VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'62180593328b8ff3c9f97565fdfd305d',X'0a1062180593328b8ff3c9f97565fdfd305d12202b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf410001a201968996e7ef170a402fdfd88b6753df792c063c07c555905ffac9cd3cbd1c00022200ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac30002a2077003db64dfd50c5bdc84daf28bcef97f140d302c3e5bfd002bcc7ac04e1273430643802420c08fce688e50510a0ffe7ff014a0c08fce688e50510a0ffe7ff0152473045022100943d90068a1b1e6879b16a6ed8cdf0237005de09f61cddab884933fefd9692bf0220417a74f2e59523d962e800a1b06618f0113039d584e28aae37737e4a71555966',X'0a1062180593328b8ff3c9f97565fdfd305d10321a47304502200f4d97f03ad2d87501f68bfcf0525ec518aebf817cf56aa5eeaea53d01b153a102210096e60cf4b594837b43b5c841d283e4b72c9a09207d64bdd4665c700dc2e0a4a2',1,1,'2019-04-01 18:51:24.5374893+03:00');
INSERT INTO vouchers VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000', X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b', '2019-07-04 00:00:00.000000+00:00');
CREATE INDEX idx_pieceinfo_expiration ON pieceinfo(piece_expiration);
CREATE INDEX idx_pieceinfo_deletion_failed ON pieceinfo(deletion_failed_at);

View File

@ -17,10 +17,10 @@ type usedSerials struct {
*InfoDB
}
// UsedSerials returns certificate database.
// UsedSerials returns used serials database.
func (db *DB) UsedSerials() piecestore.UsedSerials { return db.info.UsedSerials() }
// UsedSerials returns certificate database.
// UsedSerials returns used serials database.
func (db *InfoDB) UsedSerials() piecestore.UsedSerials { return &usedSerials{db} }
// Add adds a serial to the database.

View File

@ -1,16 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package trust
import (
"context"
"storj.io/storj/pkg/identity"
)
// CertDB is a database of peer identities.
type CertDB interface {
Include(ctx context.Context, pi *identity.PeerIdentity) (certid int64, err error)
LookupByCertID(ctx context.Context, id int64) (*identity.PeerIdentity, error)
}

View File

@ -1,45 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package trust_test
import (
"testing"
"github.com/stretchr/testify/require"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testidentity"
"storj.io/storj/pkg/storj"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
)
func TestCertDB(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
certdb := db.CertDB()
node0 := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
node1 := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
certid0, err := certdb.Include(ctx, node0.PeerIdentity())
require.NoError(t, err)
certid1, err := certdb.Include(ctx, node1.PeerIdentity())
require.NoError(t, err)
certid0duplicate, err := certdb.Include(ctx, node0.PeerIdentity())
require.NoError(t, err)
require.Equal(t, certid0, certid0duplicate, "insert duplicate")
require.NotEqual(t, certid0, certid1, "insert non-duplicate")
identity, err := certdb.LookupByCertID(ctx, certid0)
require.NoError(t, err, "lookup by id")
require.Equal(t, node0.PeerIdentity(), identity)
})
}