storagenode/vouchers: remove storagenode vouchers (#2873)

This commit is contained in:
Cameron 2019-08-26 12:35:19 -04:00 committed by Egon Elbre
parent 462640a9fe
commit 1f3537d4a9
8 changed files with 190 additions and 654 deletions

View File

@ -30,7 +30,6 @@ import (
"storj.io/storj/storagenode/piecestore"
"storj.io/storj/storagenode/retain"
"storj.io/storj/storagenode/storagenodedb"
"storj.io/storj/storagenode/vouchers"
)
// newStorageNodes initializes storage nodes
@ -121,9 +120,6 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatellites storj.Nod
RetainStatus: retain.Enabled,
MaxConcurrentRetain: 5,
},
Vouchers: vouchers.Config{
Interval: time.Hour,
},
Version: planet.NewVersionConfig(),
Bandwidth: bandwidth.Config{
Interval: time.Hour,

View File

@ -6,7 +6,6 @@ package storagenode
import (
"context"
"net"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
@ -40,7 +39,6 @@ import (
"storj.io/storj/storagenode/retain"
"storj.io/storj/storagenode/storageusage"
"storj.io/storj/storagenode/trust"
"storj.io/storj/storagenode/vouchers"
)
var (
@ -62,7 +60,6 @@ type DB interface {
PieceSpaceUsedDB() pieces.PieceSpaceUsedDB
Bandwidth() bandwidth.DB
UsedSerials() piecestore.UsedSerials
Vouchers() vouchers.DB
Console() console.DB
Reputation() reputation.DB
StorageUsage() storageusage.DB
@ -85,8 +82,6 @@ type Config struct {
Retain retain.Config
Vouchers vouchers.Config
Nodestats nodestats.Config
Console consoleserver.Config
@ -136,8 +131,6 @@ type Peer struct {
Orders *orders.Service
}
Vouchers *vouchers.Service
Collector *collector.Service
NodeStats struct {
@ -323,13 +316,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Storage2.Trust)
}
{ // setup vouchers
interval := config.Vouchers.Interval
buffer := interval + time.Hour
peer.Vouchers = vouchers.NewService(peer.Log.Named("vouchers"), peer.Transport, peer.DB.Vouchers(),
peer.Storage2.Trust, interval, buffer)
}
{ // setup storage node operator dashboard
peer.Console.Service, err = console.NewService(
peer.Log.Named("console:service"),
@ -414,9 +400,6 @@ func (peer *Peer) Run(ctx context.Context) (err error) {
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Storage2.RetainService.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Vouchers.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Bandwidth.Run(ctx))
@ -457,9 +440,6 @@ func (peer *Peer) Close() error {
if peer.Bandwidth != nil {
errlist.Add(peer.Bandwidth.Close())
}
if peer.Vouchers != nil {
errlist.Add(peer.Vouchers.Close())
}
if peer.Storage2.Monitor != nil {
errlist.Add(peer.Storage2.Monitor.Close())
}

View File

@ -33,7 +33,6 @@ import (
"storj.io/storj/storagenode/piecestore"
"storj.io/storj/storagenode/reputation"
"storj.io/storj/storagenode/storageusage"
"storj.io/storj/storagenode/vouchers"
)
var (
@ -100,7 +99,6 @@ type DB struct {
reputationDB *reputationDB
storageUsageDB *storageusageDB
usedSerialsDB *usedSerialsDB
vouchersDB *vouchersDB
kdb, ndb, adb storage.KeyValueStore
}
@ -143,7 +141,6 @@ func New(log *zap.Logger, config Config) (*DB, error) {
reputationDB: newReputationDB(versionsDB, versionsPath),
storageUsageDB: newStorageusageDB(versionsDB, versionsPath),
usedSerialsDB: newUsedSerialsDB(versionsDB, versionsPath),
vouchersDB: newVouchersDB(versionsDB, versionsPath),
}
return db, nil
@ -182,7 +179,6 @@ func NewTest(log *zap.Logger, storageDir string) (*DB, error) {
reputationDB: newReputationDB(versionsDB, versionsPath),
storageUsageDB: newStorageusageDB(versionsDB, versionsPath),
usedSerialsDB: newUsedSerialsDB(versionsDB, versionsPath),
vouchersDB: newVouchersDB(versionsDB, versionsPath),
}
return db, nil
}
@ -247,7 +243,6 @@ func (db *DB) Close() error {
db.reputationDB.Close(),
db.storageUsageDB.Close(),
db.usedSerialsDB.Close(),
db.vouchersDB.Close(),
)
}
@ -311,11 +306,6 @@ func (db *DB) UsedSerials() piecestore.UsedSerials {
return db.usedSerialsDB
}
// Vouchers returns the instance of the Vouchers database.
func (db *DB) Vouchers() vouchers.DB {
return db.vouchersDB
}
// RoutingTable returns kademlia routing table
func (db *DB) RoutingTable() (kdb, ndb, adb storage.KeyValueStore) {
return db.kdb, db.ndb, db.adb
@ -653,6 +643,13 @@ func (db *DB) Migration() *migrate.Migration {
`INSERT INTO piece_space_used (total) select ifnull(sum(piece_size), 0) from pieceinfo_`,
},
},
{
Description: "Drop vouchers table",
Version: 18,
Action: migrate.SQL{
`DROP TABLE vouchers`,
},
},
},
}
}

View File

@ -0,0 +1,183 @@
-- table for keeping serials that need to be verified against
CREATE TABLE used_serial_ (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
expiration TIMESTAMP NOT NULL
);
-- primary key on satellite id and serial number
CREATE UNIQUE INDEX pk_used_serial_ ON used_serial_(satellite_id, serial_number);
-- expiration index to allow fast deletion
CREATE INDEX idx_used_serial_ ON used_serial_(expiration);
-- certificate table for storing uplink/satellite certificates
CREATE TABLE certificate (
cert_id INTEGER
);
-- table for storing piece meta info
CREATE TABLE pieceinfo_ (
satellite_id BLOB NOT NULL,
piece_id BLOB NOT NULL,
piece_size BIGINT NOT NULL,
piece_expiration TIMESTAMP,
order_limit BLOB NOT NULL,
uplink_piece_hash BLOB NOT NULL,
uplink_cert_id INTEGER NOT NULL,
deletion_failed_at TIMESTAMP,
piece_creation TIMESTAMP NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
-- primary key by satellite id and piece id
CREATE UNIQUE INDEX pk_pieceinfo_ ON pieceinfo_(satellite_id, piece_id);
-- fast queries for expiration for pieces that have one
CREATE INDEX idx_pieceinfo__expiration ON pieceinfo_(piece_expiration) WHERE piece_expiration IS NOT NULL;
-- table for storing bandwidth usage
CREATE TABLE bandwidth_usage (
satellite_id BLOB NOT NULL,
action INTEGER NOT NULL,
amount BIGINT NOT NULL,
created_at TIMESTAMP NOT NULL
);
CREATE INDEX idx_bandwidth_usage_satellite ON bandwidth_usage(satellite_id);
CREATE INDEX idx_bandwidth_usage_created ON bandwidth_usage(created_at);
-- table for storing all unsent orders
CREATE TABLE unsent_order (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
order_limit_serialized BLOB NOT NULL,
order_serialized BLOB NOT NULL,
order_limit_expiration TIMESTAMP NOT NULL,
uplink_cert_id INTEGER NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
CREATE UNIQUE INDEX idx_orders ON unsent_order(satellite_id, serial_number);
-- table for storing all sent orders
CREATE TABLE order_archive_ (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
order_limit_serialized BLOB NOT NULL,
order_serialized BLOB NOT NULL,
uplink_cert_id INTEGER NOT NULL,
status INTEGER NOT NULL,
archived_at TIMESTAMP NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
CREATE TABLE bandwidth_usage_rollups (
interval_start TIMESTAMP NOT NULL,
satellite_id BLOB NOT NULL,
action INTEGER NOT NULL,
amount BIGINT NOT NULL,
PRIMARY KEY ( interval_start, satellite_id, action )
);
-- table to hold expiration data (and only expirations. no other pieceinfo)
CREATE TABLE piece_expirations (
satellite_id BLOB NOT NULL,
piece_id BLOB NOT NULL,
piece_expiration TIMESTAMP NOT NULL, -- date when it can be deleted
deletion_failed_at TIMESTAMP,
PRIMARY KEY ( satellite_id, piece_id )
);
CREATE INDEX idx_piece_expirations_piece_expiration ON piece_expirations(piece_expiration);
CREATE INDEX idx_piece_expirations_deletion_failed_at ON piece_expirations(deletion_failed_at);
-- tables to store nodestats cache
CREATE TABLE reputation (
satellite_id BLOB NOT NULL,
uptime_success_count INTEGER NOT NULL,
uptime_total_count INTEGER NOT NULL,
uptime_reputation_alpha REAL NOT NULL,
uptime_reputation_beta REAL NOT NULL,
uptime_reputation_score REAL NOT NULL,
audit_success_count INTEGER NOT NULL,
audit_total_count INTEGER NOT NULL,
audit_reputation_alpha REAL NOT NULL,
audit_reputation_beta REAL NOT NULL,
audit_reputation_score REAL NOT NULL,
updated_at TIMESTAMP NOT NULL,
PRIMARY KEY (satellite_id)
);
CREATE TABLE storage_usage (
satellite_id BLOB NOT NULL,
at_rest_total REAL NOT NUll,
timestamp TIMESTAMP NOT NULL,
PRIMARY KEY (satellite_id, timestamp)
);
CREATE TABLE piece_space_used (
total INTEGER NOT NULL,
satellite_id BLOB
);
CREATE UNIQUE INDEX idx_piece_space_used_satellite_id ON piece_space_used(satellite_id);
INSERT INTO unsent_order VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'1eddef484b4c03f01332279032796972',X'0a101eddef484b4c03f0133227903279697212202b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf410001a201968996e7ef170a402fdfd88b6753df792c063c07c555905ffac9cd3cbd1c00022200ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac30002a20d00cf14f3c68b56321ace04902dec0484eb6f9098b22b31c6b3f82db249f191630643802420c08dfeb88e50510a8c1a5b9034a0c08dfeb88e50510a8c1a5b9035246304402204df59dc6f5d1bb7217105efbc9b3604d19189af37a81efbf16258e5d7db5549e02203bb4ead16e6e7f10f658558c22b59c3339911841e8dbaae6e2dea821f7326894',X'0a101eddef484b4c03f0133227903279697210321a47304502206d4c106ddec88140414bac5979c95bdea7de2e0ecc5be766e08f7d5ea36641a7022100e932ff858f15885ffa52d07e260c2c25d3861810ea6157956c1793ad0c906284','2019-04-01 16:01:35.9254586+00:00',1);
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',0,0,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',0,0,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6,'2019-04-01 18:51:24.1074772+00:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6,'2019-04-01 20:51:24.1074772+00:00');
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',0,0);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',0,0);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 18:00:00+00:00',X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6);
INSERT INTO bandwidth_usage_rollups VALUES('2019-07-12 20:00:00+00:00',X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6);
INSERT INTO reputation VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,1.0,1.0,1.0,1,1,1.0,1.0,1.0,'2019-07-19 20:00:00+00:00');
INSERT INTO storage_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5.0,'2019-07-19 20:00:00+00:00');
INSERT INTO pieceinfo_ VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b',1000,'2019-05-09 00:00:00.000000+00:00', X'', X'0a20d5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b120501020304051a47304502201c16d76ecd9b208f7ad9f1edf66ce73dce50da6bde6bbd7d278415099a727421022100ca730450e7f6506c2647516f6e20d0641e47c8270f58dde2bb07d1f5a3a45673',1,NULL,'epoch');
INSERT INTO pieceinfo_ VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b',337,'2019-05-09 00:00:00.000000+00:00', X'', X'0a20d5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b120501020304051a483046022100e623cf4705046e2c04d5b42d5edbecb81f000459713ad460c691b3361817adbf022100993da2a5298bb88de6c35b2e54009d1bf306cda5d441c228aa9eaf981ceb0f3d',2,NULL,'epoch');
INSERT INTO piece_space_used (total) VALUES (1337);
INSERT INTO piece_space_used (total, satellite_id) VALUES (1337, X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000');
-- NEW DATA --

View File

@ -1,112 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package storagenodedb
import (
"context"
"database/sql"
"time"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// ErrVouchers represents errors from the vouchers database.
var ErrVouchers = errs.Class("vouchersdb error")
type vouchersDB struct {
location string
SQLDB
}
// newVouchersDB returns a new instance of vouchersdb initialized with the specified database.
func newVouchersDB(db SQLDB, location string) *vouchersDB {
return &vouchersDB{
location: location,
SQLDB: db,
}
}
// Put inserts or updates a voucher from a satellite
func (db *vouchersDB) Put(ctx context.Context, voucher *pb.Voucher) (err error) {
defer mon.Task()(&ctx)(&err)
voucherSerialized, err := proto.Marshal(voucher)
if err != nil {
return ErrVouchers.Wrap(err)
}
_, err = db.Exec(`
INSERT INTO vouchers(
satellite_id,
voucher_serialized,
expiration
) VALUES (?, ?, ?)
ON CONFLICT(satellite_id) DO UPDATE SET
voucher_serialized = ?,
expiration = ?
`, voucher.SatelliteId, voucherSerialized, voucher.Expiration.UTC(), voucherSerialized, voucher.Expiration.UTC())
return err
}
// NeedVoucher returns true if a voucher from a particular satellite is expired, about to expire, or does not exist
func (db *vouchersDB) NeedVoucher(ctx context.Context, satelliteID storj.NodeID, expirationBuffer time.Duration) (need bool, err error) {
defer mon.Task()(&ctx)(&err)
expiresBefore := time.Now().Add(expirationBuffer)
// query returns row if voucher is good. If not, it is either expiring or does not exist
row := db.QueryRow(`
SELECT satellite_id
FROM vouchers
WHERE satellite_id = ? AND expiration >= ?
`, satelliteID, expiresBefore.UTC())
var bytes []byte
err = row.Scan(&bytes)
if err != nil {
if err == sql.ErrNoRows {
return true, nil
}
return false, ErrVouchers.Wrap(err)
}
return false, nil
}
// GetAll returns all vouchers in the table
func (db *vouchersDB) GetAll(ctx context.Context) (vouchers []*pb.Voucher, err error) {
defer mon.Task()(&ctx)(&err)
rows, err := db.Query(`
SELECT voucher_serialized
FROM vouchers
`)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, ErrVouchers.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var voucherSerialized []byte
err := rows.Scan(&voucherSerialized)
if err != nil {
return nil, ErrVouchers.Wrap(err)
}
voucher := &pb.Voucher{}
err = proto.Unmarshal(voucherSerialized, voucher)
if err != nil {
return nil, ErrVouchers.Wrap(err)
}
vouchers = append(vouchers, voucher)
}
return vouchers, nil
}

View File

@ -1,173 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package vouchers
import (
"context"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
"storj.io/storj/storagenode/trust"
)
var (
// VoucherError represents errors with vouchers
VoucherError = errs.Class("voucher")
mon = monkit.Package()
)
// DB implements storing and retrieving vouchers
type DB interface {
// Put inserts or updates a voucher from a satellite
Put(context.Context, *pb.Voucher) error
// GetAll returns all vouchers in the table
GetAll(context.Context) ([]*pb.Voucher, error)
// NeedVoucher returns true if a voucher from a particular satellite is expired, about to expire, or does not exist
NeedVoucher(context.Context, storj.NodeID, time.Duration) (bool, error)
}
// Config defines configuration for requesting vouchers.
type Config struct {
Interval time.Duration `help:"interval between voucher service iterations" default:"168h0m0s"`
}
// Service is a service for requesting vouchers
type Service struct {
log *zap.Logger
transport transport.Client
vouchersdb DB
trust *trust.Pool
expirationBuffer time.Duration
Loop sync2.Cycle
}
// NewService creates a new voucher service
func NewService(log *zap.Logger, transport transport.Client, vouchersdb DB, trust *trust.Pool, interval, expirationBuffer time.Duration) *Service {
return &Service{
log: log,
transport: transport,
vouchersdb: vouchersdb,
trust: trust,
expirationBuffer: expirationBuffer,
Loop: *sync2.NewCycle(interval),
}
}
// Run sends requests to satellites for vouchers
func (service *Service) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
return service.Loop.Run(ctx, service.RunOnce)
}
// RunOnce runs one iteration of the voucher request service
func (service *Service) RunOnce(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
service.log.Info("Checking vouchers")
trustedSatellites := service.trust.GetSatellites((ctx))
if len(trustedSatellites) == 0 {
service.log.Debug("No trusted satellites configured. No vouchers to request")
return nil
}
var group errgroup.Group
ctx, cancel := context.WithTimeout(ctx, time.Hour)
defer cancel()
for _, satellite := range trustedSatellites {
satellite := satellite
needVoucher, err := service.vouchersdb.NeedVoucher(ctx, satellite, service.expirationBuffer)
if err != nil {
service.log.Error("getting voucher status", zap.Error(err))
return nil
}
if needVoucher {
group.Go(func() error {
service.Request(ctx, satellite)
return nil
})
}
}
_ = group.Wait() // doesn't return errors
return nil
}
// Request makes a voucher request to a satellite
func (service *Service) Request(ctx context.Context, satelliteID storj.NodeID) {
service.log.Info("Requesting voucher", zap.String("satellite", satelliteID.String()))
err := service.request(ctx, satelliteID)
if err != nil {
service.log.Error("Error requesting voucher", zap.String("satellite", satelliteID.String()), zap.Error(err))
}
}
func (service *Service) request(ctx context.Context, satelliteID storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
address, err := service.trust.GetAddress(ctx, satelliteID)
if err != nil {
return err
}
conn, err := service.transport.DialNode(ctx, &pb.Node{
Id: satelliteID,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: address,
},
})
if err != nil {
return VoucherError.New("unable to connect to the satellite: %v", err)
}
defer func() {
if cerr := conn.Close(); cerr != nil {
err = errs.Combine(err, VoucherError.New("failed to close connection: %v", err))
}
}()
resp, err := pb.NewVouchersClient(conn).Request(ctx, &pb.VoucherRequest{})
if err != nil {
return VoucherError.New("failed to start request: %v", err)
}
switch resp.GetStatus() {
case pb.VoucherResponse_REJECTED:
service.log.Info("Voucher request denied. Vetting process not yet complete")
case pb.VoucherResponse_ACCEPTED:
voucher := resp.GetVoucher()
if err := service.VerifyVoucher(ctx, satelliteID, voucher); err != nil {
return err
}
err = service.vouchersdb.Put(ctx, voucher)
if err != nil {
return err
}
service.log.Info("Voucher received", zap.String("satellite", voucher.SatelliteId.String()))
default:
service.log.Warn("Unknown voucher response status")
}
return err
}
// Close stops the voucher service
func (service *Service) Close() error {
service.Loop.Close()
return nil
}

View File

@ -1,52 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package vouchers
import (
"context"
"time"
"github.com/zeebo/errs"
"storj.io/storj/internal/errs2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/signing"
"storj.io/storj/pkg/storj"
)
var (
// ErrVerify is returned when voucher fields are not valid.
ErrVerify = errs.Class("verification")
)
// VerifyVoucher verifies that the signature and the information contained in a voucher are valid
func (service *Service) VerifyVoucher(ctx context.Context, satellite storj.NodeID, voucher *pb.Voucher) (err error) {
defer mon.Task()(&ctx)(&err)
if self := service.transport.Identity().ID; voucher.StorageNodeId != self {
return ErrVerify.New("Storage node ID does not match expected: (%v) (%v)", voucher.StorageNodeId, self)
}
if voucher.SatelliteId != satellite {
return ErrVerify.New("Satellite ID does not match expected: (%v) (%v)", voucher.SatelliteId, satellite)
}
if voucher.Expiration.Before(time.Now()) {
return ErrVerify.New("Voucher is already expired")
}
signee, err := service.trust.GetSignee(ctx, voucher.SatelliteId)
if err != nil {
if errs2.IsCanceled(err) {
return err
}
return ErrVerify.New("unable to get signee: %v", err) // TODO: report grpc status bad message
}
if err := signing.VerifyVoucher(ctx, signee, voucher); err != nil {
return ErrVerify.New("invalid voucher signature: %v", err) // TODO: report grpc bad message
}
return nil
}

View File

@ -1,283 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package vouchers_test
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testidentity"
"storj.io/storj/internal/testplanet"
"storj.io/storj/internal/testrand"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/signing"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
)
func TestDB(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
vdb := db.Vouchers()
satellite0 := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
satellite1 := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
storagenode := testidentity.MustPregeneratedSignedIdentity(2, storj.LatestIDVersion())
voucher := &pb.Voucher{
SatelliteId: satellite0.ID,
StorageNodeId: storagenode.ID,
Expiration: time.Now().Add(24 * time.Hour),
}
// Test GetAll returns nil result and nil error when table is empty
results, err := vdb.GetAll(ctx)
require.NoError(t, err)
assert.Nil(t, results)
// Test Put returns no error
err = vdb.Put(ctx, voucher)
require.NoError(t, err)
// Test GetAll returns accurate voucher
results, err = vdb.GetAll(ctx)
require.NoError(t, err)
for _, res := range results {
require.Equal(t, voucher.SatelliteId, res.SatelliteId)
require.Equal(t, voucher.StorageNodeId, res.StorageNodeId)
require.True(t, voucher.Expiration.Equal(res.Expiration))
}
// test NeedVoucher returns true if voucher expiration falls within expirationBuffer period
// voucher expiration is 24 hours from now
expirationBuffer := 48 * time.Hour
need, err := vdb.NeedVoucher(ctx, satellite0.ID, expirationBuffer)
require.NoError(t, err)
require.True(t, need)
// test NeedVoucher returns true if satellite ID does not exist in table
need, err = vdb.NeedVoucher(ctx, teststorj.NodeIDFromString("testnodeID"), expirationBuffer)
require.NoError(t, err)
require.True(t, need)
// test NeedVoucher returns false if satellite ID exists and expiration does not fall within expirationBuffer period
// voucher expiration is 24 hours from now
expirationBuffer = 1 * time.Hour
need, err = vdb.NeedVoucher(ctx, satellite0.ID, expirationBuffer)
require.NoError(t, err)
require.False(t, need)
// Test Put with duplicate satellite id updates voucher info
voucher.Expiration = time.Now().Add(48 * time.Hour)
err = vdb.Put(ctx, voucher)
require.NoError(t, err)
results, err = vdb.GetAll(ctx)
require.NoError(t, err)
for _, res := range results {
require.True(t, voucher.Expiration.Equal(res.Expiration))
}
// test GetAll returns more than one
voucher = &pb.Voucher{
SatelliteId: satellite1.ID,
StorageNodeId: storagenode.ID,
Expiration: time.Now().Add(24 * time.Hour),
}
err = vdb.Put(ctx, voucher)
require.NoError(t, err)
results, err = vdb.GetAll(ctx)
require.NoError(t, err)
require.Len(t, results, 2)
})
}
func TestVouchersService(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 5, StorageNodeCount: 1, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Vouchers.Expiration = time.Hour
config.Overlay.Node.AuditCount = 1
config.Audit.Interval = time.Hour
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
node := planet.StorageNodes[0]
node.Vouchers.Loop.Stop()
// node type needs to be set to receive vouchers
for _, sat := range planet.Satellites {
_, err := sat.Overlay.Service.UpdateNodeInfo(ctx, node.ID(), &pb.InfoResponse{Type: pb.NodeType_STORAGE})
require.NoError(t, err)
}
// run service and assert no vouchers (does not meet audit requirement)
err := node.Vouchers.RunOnce(ctx)
require.NoError(t, err)
vouchers, err := node.DB.Vouchers().GetAll(ctx)
require.NoError(t, err)
assert.Nil(t, vouchers)
// update node's audit count above reputable threshold on each satellite
for _, sat := range planet.Satellites {
_, err := sat.Overlay.Service.UpdateStats(ctx, &overlay.UpdateRequest{
NodeID: node.ID(),
IsUp: true,
AuditSuccess: true,
AuditLambda: 1,
AuditWeight: 1,
AuditDQ: 0.5,
UptimeLambda: 1,
UptimeWeight: 1,
UptimeDQ: 0.5,
})
require.NoError(t, err)
}
// Node is now vetted. Run service and check vouchers have been received
err = node.Vouchers.RunOnce(ctx)
require.NoError(t, err)
vouchers, err = node.DB.Vouchers().GetAll(ctx)
require.NoError(t, err)
assert.Len(t, vouchers, len(planet.Satellites))
// Check expiration is updated
oldVoucher := vouchers[0]
// Run service and get new voucher with new expiration
err = node.Vouchers.RunOnce(ctx)
require.NoError(t, err)
newVouchers, err := node.DB.Vouchers().GetAll(ctx)
require.NoError(t, err)
// assert old expiration is before new expiration
assert.True(t, oldVoucher.Expiration.Before(newVouchers[0].Expiration))
})
}
func TestVerifyVoucher(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 2, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
service := planet.StorageNodes[0].Vouchers
service.Loop.Pause()
satellite0 := planet.Satellites[0]
satellite1 := planet.Satellites[1]
storagenode := planet.StorageNodes[0]
tests := []struct {
satelliteID storj.NodeID
storagenodeID storj.NodeID
expiration time.Time
invalidSignature bool
err string
}{
{ // passing
satelliteID: satellite0.ID(),
storagenodeID: storagenode.ID(),
expiration: time.Now().Add(24 * time.Hour),
invalidSignature: false,
err: "",
},
{ // incorrect satellite ID
satelliteID: teststorj.NodeIDFromString("satellite"),
storagenodeID: storagenode.ID(),
expiration: time.Now().Add(24 * time.Hour),
invalidSignature: false,
err: fmt.Sprintf("verification: Satellite ID does not match expected: (%v) (%v)", teststorj.NodeIDFromString("satellite"), satellite0.ID()),
},
{ // incorrect storagenode ID
satelliteID: satellite0.ID(),
storagenodeID: teststorj.NodeIDFromString("storagenode"),
expiration: time.Now().Add(24 * time.Hour),
invalidSignature: false,
err: fmt.Sprintf("verification: Storage node ID does not match expected: (%v) (%v)", teststorj.NodeIDFromString("storagenode"), storagenode.ID()),
},
{ // expired voucher
satelliteID: satellite0.ID(),
storagenodeID: storagenode.ID(),
expiration: time.Now().Add(-24 * time.Hour),
invalidSignature: false,
err: "verification: Voucher is already expired",
},
{ // invalid signature
satelliteID: satellite0.ID(),
storagenodeID: storagenode.ID(),
expiration: time.Now().Add(24 * time.Hour),
invalidSignature: true,
err: fmt.Sprintf("verification: invalid voucher signature: signature verification error: signature is not valid"),
},
}
for _, tt := range tests {
var signer signing.Signer
if tt.invalidSignature {
signer = signing.SignerFromFullIdentity(satellite1.Identity)
} else {
signer = signing.SignerFromFullIdentity(satellite0.Identity)
}
voucher, err := signing.SignVoucher(ctx, signer, &pb.Voucher{
SatelliteId: tt.satelliteID,
StorageNodeId: tt.storagenodeID,
Expiration: tt.expiration,
})
require.NoError(t, err)
err = service.VerifyVoucher(ctx, satellite0.ID(), voucher)
if tt.err != "" {
require.Equal(t, tt.err, err.Error())
} else {
require.NoError(t, err)
}
}
})
}
func TestDB_Trivial(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
satelliteID := testrand.NodeID()
{ // Ensure Put works at all
err := db.Vouchers().Put(ctx, &pb.Voucher{
SatelliteId: satelliteID,
Expiration: time.Now(),
})
require.NoError(t, err)
}
{ // Ensure NeedVoucher works at all
_, err := db.Vouchers().NeedVoucher(ctx, satelliteID, time.Hour)
require.NoError(t, err)
}
{ // Ensure GetValid works at all
_, err := db.Vouchers().GetAll(ctx)
require.NoError(t, err)
}
})
}