storagenode/storagenodedb: refactor SQLite3 database connection initialization. (#2732)
* Rebasing changes against master. * Added back withTx(). * Fix using new error type. * Moving back database initialization back into the struct. * Fix failing migration tests. * Fix linting errors. * Renamed database object names to be consistent. * Fixing linting error in imports. * Rebasing changes against master. * Added back withTx(). * Fix using new error type. * Moving back database initialization back into the struct. * Fix failing migration tests. * Fix linting errors. * Renamed database object names to be consistent. * Fixing linting error in imports. * Adding missing change from merge. * Fix error name.
This commit is contained in:
parent
87ef5e3398
commit
476fbf919a
@ -16,25 +16,31 @@ import (
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
)
|
||||
|
||||
type bandwidthdb struct {
|
||||
// ErrBandwidth represents errors from the bandwidthdb database.
|
||||
var ErrBandwidth = errs.Class("bandwidthdb error")
|
||||
|
||||
type bandwidthDB struct {
|
||||
// Moved to top of struct to resolve alignment issue with atomic operations on ARM
|
||||
usedSpace int64
|
||||
usedMu sync.RWMutex
|
||||
usedSince time.Time
|
||||
|
||||
*InfoDB
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// Bandwidth returns table for storing bandwidth usage.
|
||||
func (db *DB) Bandwidth() bandwidth.DB { return db.info.Bandwidth() }
|
||||
|
||||
// Bandwidth returns table for storing bandwidth usage.
|
||||
func (db *InfoDB) Bandwidth() bandwidth.DB { return &db.bandwidthdb }
|
||||
// newBandwidthDB returns a new instance of usedSerials initialized with the specified database.
|
||||
func newBandwidthDB(db SQLDB, location string) *bandwidthDB {
|
||||
return &bandwidthDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds bandwidth usage to the table
|
||||
func (db *bandwidthdb) Add(ctx context.Context, satelliteID storj.NodeID, action pb.PieceAction, amount int64, created time.Time) (err error) {
|
||||
func (db *bandwidthDB) Add(ctx context.Context, satelliteID storj.NodeID, action pb.PieceAction, amount int64, created time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = db.db.Exec(`
|
||||
_, err = db.Exec(`
|
||||
INSERT INTO
|
||||
bandwidth_usage(satellite_id, action, amount, created_at)
|
||||
VALUES(?, ?, ?, ?)`, satelliteID, action, amount, created.UTC())
|
||||
@ -54,11 +60,11 @@ func (db *bandwidthdb) Add(ctx context.Context, satelliteID storj.NodeID, action
|
||||
db.usedSpace = usage.Total()
|
||||
}
|
||||
}
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrBandwidth.Wrap(err)
|
||||
}
|
||||
|
||||
// MonthSummary returns summary of the current months bandwidth usages
|
||||
func (db *bandwidthdb) MonthSummary(ctx context.Context) (_ int64, err error) {
|
||||
func (db *bandwidthDB) MonthSummary(ctx context.Context) (_ int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
db.usedMu.RLock()
|
||||
beginningOfMonth := getBeginningOfMonth(time.Now().UTC())
|
||||
@ -77,14 +83,14 @@ func (db *bandwidthdb) MonthSummary(ctx context.Context) (_ int64, err error) {
|
||||
}
|
||||
|
||||
// Summary returns summary of bandwidth usages
|
||||
func (db *bandwidthdb) Summary(ctx context.Context, from, to time.Time) (_ *bandwidth.Usage, err error) {
|
||||
func (db *bandwidthDB) Summary(ctx context.Context, from, to time.Time) (_ *bandwidth.Usage, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
usage := &bandwidth.Usage{}
|
||||
|
||||
from = from.UTC()
|
||||
to = to.UTC()
|
||||
rows, err := db.db.Query(`
|
||||
rows, err := db.Query(`
|
||||
SELECT action, sum(a) amount from(
|
||||
SELECT action, sum(amount) a
|
||||
FROM bandwidth_usage
|
||||
@ -101,7 +107,7 @@ func (db *bandwidthdb) Summary(ctx context.Context, from, to time.Time) (_ *band
|
||||
if err == sql.ErrNoRows {
|
||||
return usage, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrBandwidth.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
@ -110,23 +116,23 @@ func (db *bandwidthdb) Summary(ctx context.Context, from, to time.Time) (_ *band
|
||||
var amount int64
|
||||
err := rows.Scan(&action, &amount)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrBandwidth.Wrap(err)
|
||||
}
|
||||
usage.Include(action, amount)
|
||||
}
|
||||
|
||||
return usage, ErrInfo.Wrap(rows.Err())
|
||||
return usage, ErrBandwidth.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// SummaryBySatellite returns summary of bandwidth usage grouping by satellite.
|
||||
func (db *bandwidthdb) SummaryBySatellite(ctx context.Context, from, to time.Time) (_ map[storj.NodeID]*bandwidth.Usage, err error) {
|
||||
func (db *bandwidthDB) SummaryBySatellite(ctx context.Context, from, to time.Time) (_ map[storj.NodeID]*bandwidth.Usage, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
entries := map[storj.NodeID]*bandwidth.Usage{}
|
||||
|
||||
from = from.UTC()
|
||||
to = to.UTC()
|
||||
rows, err := db.db.Query(`
|
||||
rows, err := db.Query(`
|
||||
SELECT satellite_id, action, sum(a) amount from(
|
||||
SELECT satellite_id, action, sum(amount) a
|
||||
FROM bandwidth_usage
|
||||
@ -143,7 +149,7 @@ func (db *bandwidthdb) SummaryBySatellite(ctx context.Context, from, to time.Tim
|
||||
if err == sql.ErrNoRows {
|
||||
return entries, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrBandwidth.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
@ -154,7 +160,7 @@ func (db *bandwidthdb) SummaryBySatellite(ctx context.Context, from, to time.Tim
|
||||
|
||||
err := rows.Scan(&satelliteID, &action, &amount)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrBandwidth.Wrap(err)
|
||||
}
|
||||
|
||||
entry, ok := entries[satelliteID]
|
||||
@ -166,11 +172,11 @@ func (db *bandwidthdb) SummaryBySatellite(ctx context.Context, from, to time.Tim
|
||||
entry.Include(action, amount)
|
||||
}
|
||||
|
||||
return entries, ErrInfo.Wrap(rows.Err())
|
||||
return entries, ErrBandwidth.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// Rollup bandwidth_usage data earlier than the current hour, then delete the rolled up records
|
||||
func (db *bandwidthdb) Rollup(ctx context.Context) (err error) {
|
||||
func (db *bandwidthDB) Rollup(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
@ -180,7 +186,7 @@ func (db *bandwidthdb) Rollup(ctx context.Context) (err error) {
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrBandwidth.Wrap(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@ -203,12 +209,12 @@ func (db *bandwidthdb) Rollup(ctx context.Context) (err error) {
|
||||
DELETE FROM bandwidth_usage WHERE datetime(created_at) < datetime(?);
|
||||
`, hour, hour)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrBandwidth.Wrap(err)
|
||||
}
|
||||
|
||||
_, err = result.RowsAffected()
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrBandwidth.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -15,24 +15,25 @@ import (
|
||||
"storj.io/storj/storagenode/console"
|
||||
)
|
||||
|
||||
type consoledb struct {
|
||||
*InfoDB
|
||||
type consoleDB struct {
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// Console returns console.DB
|
||||
func (db *InfoDB) Console() console.DB { return &consoledb{db} }
|
||||
|
||||
// Console returns console.DB
|
||||
func (db *DB) Console() console.DB { return db.info.Console() }
|
||||
|
||||
// Bandwidth returns consoledb as console.Bandwidth
|
||||
func (db *consoledb) Bandwidth() console.Bandwidth {
|
||||
func (db *consoleDB) Bandwidth() console.Bandwidth {
|
||||
return db
|
||||
}
|
||||
|
||||
// newConsoleDB returns a new instance of consoledb initialized with the specified database.
|
||||
func newConsoleDB(db SQLDB) *consoleDB {
|
||||
return &consoleDB{
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaily returns slice of daily bandwidth usage for provided time range,
|
||||
// sorted in ascending order for particular satellite
|
||||
func (db *consoledb) GetDaily(ctx context.Context, satelliteID storj.NodeID, from, to time.Time) (_ []console.BandwidthUsed, err error) {
|
||||
func (db *consoleDB) GetDaily(ctx context.Context, satelliteID storj.NodeID, from, to time.Time) (_ []console.BandwidthUsed, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
since, _ := date.DayBoundary(from.UTC())
|
||||
@ -45,7 +46,7 @@ func (db *consoledb) GetDaily(ctx context.Context, satelliteID storj.NodeID, fro
|
||||
|
||||
// GetDaily returns slice of daily bandwidth usage for provided time range,
|
||||
// sorted in ascending order
|
||||
func (db *consoledb) GetDailyTotal(ctx context.Context, from, to time.Time) (_ []console.BandwidthUsed, err error) {
|
||||
func (db *consoleDB) GetDailyTotal(ctx context.Context, from, to time.Time) (_ []console.BandwidthUsed, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
since, _ := date.DayBoundary(from.UTC())
|
||||
@ -58,7 +59,7 @@ func (db *consoledb) GetDailyTotal(ctx context.Context, from, to time.Time) (_ [
|
||||
|
||||
// getDailyBandwidthUsed returns slice of grouped by date bandwidth usage
|
||||
// sorted in ascending order and applied condition if any
|
||||
func (db *consoledb) getDailyBandwidthUsed(ctx context.Context, cond string, args ...interface{}) (_ []console.BandwidthUsed, err error) {
|
||||
func (db *consoleDB) getDailyBandwidthUsed(ctx context.Context, cond string, args ...interface{}) (_ []console.BandwidthUsed, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
query := `SELECT action, SUM(amount), created_at
|
||||
@ -67,7 +68,7 @@ func (db *consoledb) getDailyBandwidthUsed(ctx context.Context, cond string, arg
|
||||
GROUP BY DATE(created_at), action
|
||||
ORDER BY created_at ASC`
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, query, args...)
|
||||
rows, err := db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -4,25 +4,72 @@
|
||||
package storagenodedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3" // used indirectly
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/dbutil"
|
||||
"storj.io/storj/internal/migrate"
|
||||
"storj.io/storj/pkg/kademlia"
|
||||
"storj.io/storj/storage"
|
||||
"storj.io/storj/storage/boltdb"
|
||||
"storj.io/storj/storage/filestore"
|
||||
"storj.io/storj/storage/teststore"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/storagenode/console"
|
||||
"storj.io/storj/storagenode/orders"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/piecestore"
|
||||
"storj.io/storj/storagenode/reputation"
|
||||
"storj.io/storj/storagenode/storageusage"
|
||||
"storj.io/storj/storagenode/vouchers"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// ErrDatabase represents errors from the databases.
|
||||
ErrDatabase = errs.Class("storage node database error")
|
||||
)
|
||||
|
||||
var _ storagenode.DB = (*DB)(nil)
|
||||
|
||||
// SQLDB defines interface that matches *sql.DB
|
||||
// this is such that we can use utccheck.DB for the backend
|
||||
//
|
||||
// TODO: wrap the connector instead of *sql.DB
|
||||
type SQLDB interface {
|
||||
Begin() (*sql.Tx, error)
|
||||
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
|
||||
Close() error
|
||||
Conn(ctx context.Context) (*sql.Conn, error)
|
||||
Driver() driver.Driver
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
||||
Ping() error
|
||||
PingContext(ctx context.Context) error
|
||||
Prepare(query string) (*sql.Stmt, error)
|
||||
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
||||
SetConnMaxLifetime(d time.Duration)
|
||||
SetMaxIdleConns(n int)
|
||||
SetMaxOpenConns(n int)
|
||||
}
|
||||
|
||||
// Config configures storage node database
|
||||
type Config struct {
|
||||
// TODO: figure out better names
|
||||
@ -43,7 +90,17 @@ type DB struct {
|
||||
Close() error
|
||||
}
|
||||
|
||||
info *InfoDB
|
||||
versionsDB *versionsDB
|
||||
v0PieceInfoDB *v0PieceInfoDB
|
||||
bandwidthDB *bandwidthDB
|
||||
consoleDB *consoleDB
|
||||
ordersDB *ordersDB
|
||||
pieceExpirationDB *pieceExpirationDB
|
||||
pieceSpaceUsedDB *pieceSpaceUsedDB
|
||||
reputationDB *reputationDB
|
||||
storageUsageDB *storageusageDB
|
||||
usedSerialsDB *usedSerialsDB
|
||||
vouchersDB *vouchersDB
|
||||
|
||||
kdb, ndb, adb storage.KeyValueStore
|
||||
}
|
||||
@ -56,27 +113,40 @@ func New(log *zap.Logger, config Config) (*DB, error) {
|
||||
}
|
||||
pieces := filestore.New(log, piecesDir)
|
||||
|
||||
infodb, err := newInfo(config.Info2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbs, err := boltdb.NewShared(config.Kademlia, kademlia.KademliaBucket, kademlia.NodeBucket, kademlia.AntechamberBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DB{
|
||||
log: log,
|
||||
versionsPath := config.Info2
|
||||
versionsDB, err := openDatabase(versionsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := &DB{
|
||||
log: log,
|
||||
pieces: pieces,
|
||||
kdb: dbs[0],
|
||||
ndb: dbs[1],
|
||||
adb: dbs[2],
|
||||
|
||||
info: infodb,
|
||||
// Initialize databases. Currently shares one info.db database file but
|
||||
// in the future these will initialize their own database connections.
|
||||
versionsDB: newVersionsDB(versionsDB, versionsPath),
|
||||
v0PieceInfoDB: newV0PieceInfoDB(versionsDB, versionsPath),
|
||||
bandwidthDB: newBandwidthDB(versionsDB, versionsPath),
|
||||
consoleDB: newConsoleDB(versionsDB),
|
||||
ordersDB: newOrdersDB(versionsDB, versionsPath),
|
||||
pieceExpirationDB: newPieceExpirationDB(versionsDB, versionsPath),
|
||||
pieceSpaceUsedDB: newPieceSpaceUsedDB(versionsDB, versionsPath),
|
||||
reputationDB: newReputationDB(versionsDB, versionsPath),
|
||||
storageUsageDB: newStorageusageDB(versionsDB, versionsPath),
|
||||
usedSerialsDB: newUsedSerialsDB(versionsDB, versionsPath),
|
||||
vouchersDB: newVouchersDB(versionsDB, versionsPath),
|
||||
}
|
||||
|
||||
kdb: dbs[0],
|
||||
ndb: dbs[1],
|
||||
adb: dbs[2],
|
||||
}, nil
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// NewTest creates new test database for storage node.
|
||||
@ -87,26 +157,77 @@ func NewTest(log *zap.Logger, storageDir string) (*DB, error) {
|
||||
}
|
||||
pieces := filestore.New(log, piecesDir)
|
||||
|
||||
infodb, err := NewInfoTest()
|
||||
versionsPath := ""
|
||||
versionsDB, err := openTestDatabase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DB{
|
||||
log: log,
|
||||
|
||||
db := &DB{
|
||||
log: log,
|
||||
pieces: pieces,
|
||||
info: infodb,
|
||||
kdb: teststore.New(),
|
||||
ndb: teststore.New(),
|
||||
adb: teststore.New(),
|
||||
|
||||
kdb: teststore.New(),
|
||||
ndb: teststore.New(),
|
||||
adb: teststore.New(),
|
||||
}, nil
|
||||
// Initialize databases. Currently shares one info.db database file but
|
||||
// in the future these will initialize their own database connections.
|
||||
versionsDB: newVersionsDB(versionsDB, versionsPath),
|
||||
v0PieceInfoDB: newV0PieceInfoDB(versionsDB, versionsPath),
|
||||
bandwidthDB: newBandwidthDB(versionsDB, versionsPath),
|
||||
consoleDB: newConsoleDB(versionsDB),
|
||||
ordersDB: newOrdersDB(versionsDB, versionsPath),
|
||||
pieceExpirationDB: newPieceExpirationDB(versionsDB, versionsPath),
|
||||
pieceSpaceUsedDB: newPieceSpaceUsedDB(versionsDB, versionsPath),
|
||||
reputationDB: newReputationDB(versionsDB, versionsPath),
|
||||
storageUsageDB: newStorageusageDB(versionsDB, versionsPath),
|
||||
usedSerialsDB: newUsedSerialsDB(versionsDB, versionsPath),
|
||||
vouchersDB: newVouchersDB(versionsDB, versionsPath),
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// openDatabase opens or creates a database at the specified path.
|
||||
func openDatabase(path string) (*sql.DB, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", "file:"+path+"?_journal=WAL&_busy_timeout=10000")
|
||||
if err != nil {
|
||||
return nil, ErrDatabase.Wrap(err)
|
||||
}
|
||||
|
||||
dbutil.Configure(db, mon)
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// // openTestDatabase creates an in memory database.
|
||||
func openTestDatabase() (*sql.DB, error) {
|
||||
// create memory DB with a shared cache and a unique name to avoid collisions
|
||||
db, err := sql.Open("sqlite3", fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63()))
|
||||
if err != nil {
|
||||
return nil, ErrDatabase.Wrap(err)
|
||||
}
|
||||
|
||||
// Set max idle and max open to 1 to control concurrent access to the memory DB
|
||||
// Setting max open higher than 1 results in table locked errors
|
||||
db.SetMaxIdleConns(1)
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetConnMaxLifetime(-1)
|
||||
|
||||
mon.Chain("db_stats", monkit.StatSourceFunc(
|
||||
func(cb func(name string, val float64)) {
|
||||
monkit.StatSourceFromStruct(db.Stats()).Stats(cb)
|
||||
}))
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// CreateTables creates any necessary tables.
|
||||
func (db *DB) CreateTables() error {
|
||||
return db.info.CreateTables(db.log)
|
||||
migration := db.Migration()
|
||||
return migration.Run(db.log.Named("migration"), db.versionsDB)
|
||||
}
|
||||
|
||||
// Close closes any resources.
|
||||
@ -116,17 +237,422 @@ func (db *DB) Close() error {
|
||||
db.ndb.Close(),
|
||||
db.adb.Close(),
|
||||
|
||||
db.pieces.Close(),
|
||||
db.info.Close(),
|
||||
db.versionsDB.Close(),
|
||||
db.v0PieceInfoDB.Close(),
|
||||
db.bandwidthDB.Close(),
|
||||
db.consoleDB.Close(),
|
||||
db.ordersDB.Close(),
|
||||
db.pieceExpirationDB.Close(),
|
||||
db.pieceSpaceUsedDB.Close(),
|
||||
db.reputationDB.Close(),
|
||||
db.storageUsageDB.Close(),
|
||||
db.usedSerialsDB.Close(),
|
||||
db.vouchersDB.Close(),
|
||||
)
|
||||
}
|
||||
|
||||
// VersionsMigration returns the instance of the versions database.
|
||||
func (db *DB) VersionsMigration() migrate.DB {
|
||||
return db.versionsDB
|
||||
}
|
||||
|
||||
// Versions returns the instance of the versions database.
|
||||
func (db *DB) Versions() SQLDB {
|
||||
return db.versionsDB
|
||||
}
|
||||
|
||||
// V0PieceInfo returns the instance of the V0PieceInfoDB database.
|
||||
func (db *DB) V0PieceInfo() pieces.V0PieceInfoDB {
|
||||
return db.v0PieceInfoDB
|
||||
}
|
||||
|
||||
// Bandwidth returns the instance of the Bandwidth database.
|
||||
func (db *DB) Bandwidth() bandwidth.DB {
|
||||
return db.bandwidthDB
|
||||
}
|
||||
|
||||
// Console returns the instance of the Console database.
|
||||
func (db *DB) Console() console.DB {
|
||||
return db.consoleDB
|
||||
}
|
||||
|
||||
// Orders returns the instance of the Orders database.
|
||||
func (db *DB) Orders() orders.DB {
|
||||
return db.ordersDB
|
||||
}
|
||||
|
||||
// Pieces returns blob storage for pieces
|
||||
func (db *DB) Pieces() storage.Blobs {
|
||||
return db.pieces
|
||||
}
|
||||
|
||||
// PieceExpirationDB returns the instance of the PieceExpiration database.
|
||||
func (db *DB) PieceExpirationDB() pieces.PieceExpirationDB {
|
||||
return db.pieceExpirationDB
|
||||
}
|
||||
|
||||
// PieceSpaceUsedDB returns the instance of the PieceSpacedUsed database.
|
||||
func (db *DB) PieceSpaceUsedDB() pieces.PieceSpaceUsedDB {
|
||||
return db.pieceSpaceUsedDB
|
||||
}
|
||||
|
||||
// Reputation returns the instance of the Reputation database.
|
||||
func (db *DB) Reputation() reputation.DB {
|
||||
return db.reputationDB
|
||||
}
|
||||
|
||||
// StorageUsage returns the instance of the StorageUsage database.
|
||||
func (db *DB) StorageUsage() storageusage.DB {
|
||||
return db.storageUsageDB
|
||||
}
|
||||
|
||||
// UsedSerials returns the instance of the UsedSerials database.
|
||||
func (db *DB) UsedSerials() piecestore.UsedSerials {
|
||||
return db.usedSerialsDB
|
||||
}
|
||||
|
||||
// Vouchers returns the instance of the Vouchers database.
|
||||
func (db *DB) Vouchers() vouchers.DB {
|
||||
return db.vouchersDB
|
||||
}
|
||||
|
||||
// RoutingTable returns kademlia routing table
|
||||
func (db *DB) RoutingTable() (kdb, ndb, adb storage.KeyValueStore) {
|
||||
return db.kdb, db.ndb, db.adb
|
||||
}
|
||||
|
||||
// Migration returns table migrations.
|
||||
func (db *DB) Migration() *migrate.Migration {
|
||||
return &migrate.Migration{
|
||||
Table: "versions",
|
||||
Steps: []*migrate.Step{
|
||||
{
|
||||
Description: "Initial setup",
|
||||
Version: 0,
|
||||
Action: migrate.SQL{
|
||||
// table for keeping serials that need to be verified against
|
||||
`CREATE TABLE used_serial (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
expiration TIMESTAMP NOT NULL
|
||||
)`,
|
||||
// primary key on satellite id and serial number
|
||||
`CREATE UNIQUE INDEX pk_used_serial ON used_serial(satellite_id, serial_number)`,
|
||||
// expiration index to allow fast deletion
|
||||
`CREATE INDEX idx_used_serial ON used_serial(expiration)`,
|
||||
|
||||
// certificate table for storing uplink/satellite certificates
|
||||
`CREATE TABLE certificate (
|
||||
cert_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||
node_id BLOB NOT NULL, -- same NodeID can have multiple valid leaf certificates
|
||||
peer_identity BLOB UNIQUE NOT NULL -- PEM encoded
|
||||
)`,
|
||||
|
||||
// table for storing piece meta info
|
||||
`CREATE TABLE pieceinfo (
|
||||
satellite_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
piece_size BIGINT NOT NULL,
|
||||
piece_expiration TIMESTAMP, -- date when it can be deleted
|
||||
|
||||
uplink_piece_hash BLOB NOT NULL, -- serialized pb.PieceHash signed by uplink
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
// primary key by satellite id and piece id
|
||||
`CREATE UNIQUE INDEX pk_pieceinfo ON pieceinfo(satellite_id, piece_id)`,
|
||||
|
||||
// table for storing bandwidth usage
|
||||
`CREATE TABLE bandwidth_usage (
|
||||
satellite_id BLOB NOT NULL,
|
||||
action INTEGER NOT NULL,
|
||||
amount BIGINT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL
|
||||
)`,
|
||||
`CREATE INDEX idx_bandwidth_usage_satellite ON bandwidth_usage(satellite_id)`,
|
||||
`CREATE INDEX idx_bandwidth_usage_created ON bandwidth_usage(created_at)`,
|
||||
|
||||
// table for storing all unsent orders
|
||||
`CREATE TABLE unsent_order (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
|
||||
order_limit_serialized BLOB NOT NULL, -- serialized pb.OrderLimit
|
||||
order_serialized BLOB NOT NULL, -- serialized pb.Order
|
||||
order_limit_expiration TIMESTAMP NOT NULL, -- when is the deadline for sending it
|
||||
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX idx_orders ON unsent_order(satellite_id, serial_number)`,
|
||||
|
||||
// table for storing all sent orders
|
||||
`CREATE TABLE order_archive (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
|
||||
order_limit_serialized BLOB NOT NULL, -- serialized pb.OrderLimit
|
||||
order_serialized BLOB NOT NULL, -- serialized pb.Order
|
||||
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
status INTEGER NOT NULL, -- accepted, rejected, confirmed
|
||||
archived_at TIMESTAMP NOT NULL, -- when was it rejected
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
`CREATE INDEX idx_order_archive_satellite ON order_archive(satellite_id)`,
|
||||
`CREATE INDEX idx_order_archive_status ON order_archive(status)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Network Wipe #2",
|
||||
Version: 1,
|
||||
Action: migrate.SQL{
|
||||
`UPDATE pieceinfo SET piece_expiration = '2019-05-09 00:00:00.000000+00:00'`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add tracking of deletion failures.",
|
||||
Version: 2,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE pieceinfo ADD COLUMN deletion_failed_at TIMESTAMP`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add vouchersDB for storing and retrieving vouchers.",
|
||||
Version: 3,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE vouchers (
|
||||
satellite_id BLOB PRIMARY KEY NOT NULL,
|
||||
voucher_serialized BLOB NOT NULL,
|
||||
expiration TIMESTAMP NOT NULL
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add index on pieceinfo expireation",
|
||||
Version: 4,
|
||||
Action: migrate.SQL{
|
||||
`CREATE INDEX idx_pieceinfo_expiration ON pieceinfo(piece_expiration)`,
|
||||
`CREATE INDEX idx_pieceinfo_deletion_failed ON pieceinfo(deletion_failed_at)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Partial Network Wipe - Tardigrade Satellites",
|
||||
Version: 5,
|
||||
Action: migrate.SQL{
|
||||
`UPDATE pieceinfo SET piece_expiration = '2019-06-25 00:00:00.000000+00:00' WHERE satellite_id
|
||||
IN (x'84A74C2CD43C5BA76535E1F42F5DF7C287ED68D33522782F4AFABFDB40000000',
|
||||
x'A28B4F04E10BAE85D67F4C6CB82BF8D4C0F0F47A8EA72627524DEB6EC0000000',
|
||||
x'AF2C42003EFC826AB4361F73F9D890942146FE0EBE806786F8E7190800000000'
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add creation date.",
|
||||
Version: 6,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE pieceinfo ADD COLUMN piece_creation TIMESTAMP NOT NULL DEFAULT 'epoch'`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Drop certificate table.",
|
||||
Version: 7,
|
||||
Action: migrate.SQL{
|
||||
`DROP TABLE certificate`,
|
||||
`CREATE TABLE certificate (cert_id INTEGER)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Drop old used serials and remove pieceinfo_deletion_failed index.",
|
||||
Version: 8,
|
||||
Action: migrate.SQL{
|
||||
`DELETE FROM used_serial`,
|
||||
`DROP INDEX idx_pieceinfo_deletion_failed`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add order limit table.",
|
||||
Version: 9,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE pieceinfo ADD COLUMN order_limit BLOB NOT NULL DEFAULT X''`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Optimize index usage.",
|
||||
Version: 10,
|
||||
Action: migrate.SQL{
|
||||
`DROP INDEX idx_pieceinfo_expiration`,
|
||||
`DROP INDEX idx_order_archive_satellite`,
|
||||
`DROP INDEX idx_order_archive_status`,
|
||||
`CREATE INDEX idx_pieceinfo_expiration ON pieceinfo(piece_expiration) WHERE piece_expiration IS NOT NULL`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Create bandwidth_usage_rollup table.",
|
||||
Version: 11,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE bandwidth_usage_rollups (
|
||||
interval_start TIMESTAMP NOT NULL,
|
||||
satellite_id BLOB NOT NULL,
|
||||
action INTEGER NOT NULL,
|
||||
amount BIGINT NOT NULL,
|
||||
PRIMARY KEY ( interval_start, satellite_id, action )
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Clear Tables from Alpha data",
|
||||
Version: 12,
|
||||
Action: migrate.SQL{
|
||||
`DROP TABLE pieceinfo`,
|
||||
`DROP TABLE used_serial`,
|
||||
`DROP TABLE order_archive`,
|
||||
`CREATE TABLE pieceinfo_ (
|
||||
satellite_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
piece_size BIGINT NOT NULL,
|
||||
piece_expiration TIMESTAMP,
|
||||
|
||||
order_limit BLOB NOT NULL,
|
||||
uplink_piece_hash BLOB NOT NULL,
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
deletion_failed_at TIMESTAMP,
|
||||
piece_creation TIMESTAMP NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX pk_pieceinfo_ ON pieceinfo_(satellite_id, piece_id)`,
|
||||
`CREATE INDEX idx_pieceinfo__expiration ON pieceinfo_(piece_expiration) WHERE piece_expiration IS NOT NULL`,
|
||||
`CREATE TABLE used_serial_ (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
expiration TIMESTAMP NOT NULL
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX pk_used_serial_ ON used_serial_(satellite_id, serial_number)`,
|
||||
`CREATE INDEX idx_used_serial_ ON used_serial_(expiration)`,
|
||||
`CREATE TABLE order_archive_ (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
|
||||
order_limit_serialized BLOB NOT NULL,
|
||||
order_serialized BLOB NOT NULL,
|
||||
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
status INTEGER NOT NULL,
|
||||
archived_at TIMESTAMP NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Free Storagenodes from trash data",
|
||||
Version: 13,
|
||||
Action: migrate.Func(func(log *zap.Logger, mgdb migrate.DB, tx *sql.Tx) error {
|
||||
// When using inmemory DB, skip deletion process
|
||||
if db.versionsDB.location == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := os.RemoveAll(filepath.Join(filepath.Dir(db.versionsDB.location), "blob/ukfu6bhbboxilvt7jrwlqk7y2tapb5d2r2tsmj2sjxvw5qaaaaaa")) // us-central1
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(filepath.Dir(db.versionsDB.location), "blob/v4weeab67sbgvnbwd5z7tweqsqqun7qox2agpbxy44mqqaaaaaaa")) // europe-west1
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(filepath.Dir(db.versionsDB.location), "blob/qstuylguhrn2ozjv4h2c6xpxykd622gtgurhql2k7k75wqaaaaaa")) // asia-east1
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(filepath.Dir(db.versionsDB.location), "blob/abforhuxbzyd35blusvrifvdwmfx4hmocsva4vmpp3rgqaaaaaaa")) // "tothemoon (stefan)"
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
// To prevent the node from starting up, we just log errors and return nil
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
Description: "Free Storagenodes from orphaned tmp data",
|
||||
Version: 14,
|
||||
Action: migrate.Func(func(log *zap.Logger, mgdb migrate.DB, tx *sql.Tx) error {
|
||||
// When using inmemory DB, skip deletion process
|
||||
if db.versionsDB.location == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := os.RemoveAll(filepath.Join(filepath.Dir(db.versionsDB.location), "tmp"))
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
// To prevent the node from starting up, we just log errors and return nil
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
Description: "Start piece_expirations table, deprecate pieceinfo table",
|
||||
Version: 15,
|
||||
Action: migrate.SQL{
|
||||
// new table to hold expiration data (and only expirations. no other pieceinfo)
|
||||
`CREATE TABLE piece_expirations (
|
||||
satellite_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
piece_expiration TIMESTAMP NOT NULL, -- date when it can be deleted
|
||||
deletion_failed_at TIMESTAMP,
|
||||
PRIMARY KEY (satellite_id, piece_id)
|
||||
)`,
|
||||
`CREATE INDEX idx_piece_expirations_piece_expiration ON piece_expirations(piece_expiration)`,
|
||||
`CREATE INDEX idx_piece_expirations_deletion_failed_at ON piece_expirations(deletion_failed_at)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add reputation and storage usage cache tables",
|
||||
Version: 16,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE reputation (
|
||||
satellite_id BLOB NOT NULL,
|
||||
uptime_success_count INTEGER NOT NULL,
|
||||
uptime_total_count INTEGER NOT NULL,
|
||||
uptime_reputation_alpha REAL NOT NULL,
|
||||
uptime_reputation_beta REAL NOT NULL,
|
||||
uptime_reputation_score REAL NOT NULL,
|
||||
audit_success_count INTEGER NOT NULL,
|
||||
audit_total_count INTEGER NOT NULL,
|
||||
audit_reputation_alpha REAL NOT NULL,
|
||||
audit_reputation_beta REAL NOT NULL,
|
||||
audit_reputation_score REAL NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (satellite_id)
|
||||
)`,
|
||||
`CREATE TABLE storage_usage (
|
||||
satellite_id BLOB NOT NULL,
|
||||
at_rest_total REAL NOT NUll,
|
||||
timestamp TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (satellite_id, timestamp)
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Create piece_space_used table",
|
||||
Version: 17,
|
||||
Action: migrate.SQL{
|
||||
// new table to hold the most recent totals from the piece space used cache
|
||||
`CREATE TABLE piece_space_used (
|
||||
total INTEGER NOT NULL,
|
||||
satellite_id BLOB
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX idx_piece_space_used_satellite_id ON piece_space_used(satellite_id)`,
|
||||
`INSERT INTO piece_space_used (total) select ifnull(sum(piece_size), 0) from pieceinfo_`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -1,490 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package storagenodedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/dbutil"
|
||||
"storj.io/storj/internal/dbutil/utccheck"
|
||||
"storj.io/storj/internal/migrate"
|
||||
)
|
||||
|
||||
// ErrInfo is the default error class for InfoDB
|
||||
var ErrInfo = errs.Class("infodb")
|
||||
|
||||
// SQLDB defines interface that matches *sql.DB
|
||||
// this is such that we can use utccheck.DB for the backend
|
||||
//
|
||||
// TODO: wrap the connector instead of *sql.DB
|
||||
type SQLDB interface {
|
||||
Begin() (*sql.Tx, error)
|
||||
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
|
||||
Close() error
|
||||
Conn(ctx context.Context) (*sql.Conn, error)
|
||||
Driver() driver.Driver
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
||||
Ping() error
|
||||
PingContext(ctx context.Context) error
|
||||
Prepare(query string) (*sql.Stmt, error)
|
||||
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
||||
SetConnMaxLifetime(d time.Duration)
|
||||
SetMaxIdleConns(n int)
|
||||
SetMaxOpenConns(n int)
|
||||
}
|
||||
|
||||
// InfoDB implements information database for piecestore.
|
||||
type InfoDB struct {
|
||||
db SQLDB
|
||||
bandwidthdb bandwidthdb
|
||||
v0PieceInfo v0PieceInfo
|
||||
pieceExpirationDB pieceExpirationDB
|
||||
location string
|
||||
pieceSpaceUsedDB pieceSpaceUsedDB
|
||||
}
|
||||
|
||||
// newInfo creates or opens InfoDB at the specified path.
|
||||
func newInfo(path string) (*InfoDB, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", "file:"+path+"?_journal=WAL&_busy_timeout=10000")
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
}
|
||||
|
||||
dbutil.Configure(db, mon)
|
||||
|
||||
infoDb := &InfoDB{db: db}
|
||||
infoDb.v0PieceInfo = v0PieceInfo{InfoDB: infoDb}
|
||||
infoDb.bandwidthdb = bandwidthdb{InfoDB: infoDb}
|
||||
infoDb.pieceExpirationDB = pieceExpirationDB{InfoDB: infoDb}
|
||||
infoDb.location = path
|
||||
infoDb.pieceSpaceUsedDB = pieceSpaceUsedDB{InfoDB: infoDb}
|
||||
|
||||
return infoDb, nil
|
||||
}
|
||||
|
||||
// NewInfoTest creates a new inmemory InfoDB.
|
||||
func NewInfoTest() (*InfoDB, error) {
|
||||
// create memory DB with a shared cache and a unique name to avoid collisions
|
||||
db, err := sql.Open("sqlite3", fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63()))
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
}
|
||||
|
||||
// Set max idle and max open to 1 to control concurrent access to the memory DB
|
||||
// Setting max open higher than 1 results in table locked errors
|
||||
db.SetMaxIdleConns(1)
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetConnMaxLifetime(-1)
|
||||
|
||||
mon.Chain("db_stats", monkit.StatSourceFunc(
|
||||
func(cb func(name string, val float64)) {
|
||||
monkit.StatSourceFromStruct(db.Stats()).Stats(cb)
|
||||
}))
|
||||
|
||||
infoDb := &InfoDB{db: utccheck.New(db)}
|
||||
infoDb.v0PieceInfo = v0PieceInfo{InfoDB: infoDb}
|
||||
infoDb.bandwidthdb = bandwidthdb{InfoDB: infoDb}
|
||||
infoDb.pieceExpirationDB = pieceExpirationDB{InfoDB: infoDb}
|
||||
infoDb.pieceSpaceUsedDB = pieceSpaceUsedDB{InfoDB: infoDb}
|
||||
|
||||
return infoDb, nil
|
||||
}
|
||||
|
||||
// Close closes any resources.
|
||||
func (db *InfoDB) Close() error {
|
||||
return db.db.Close()
|
||||
}
|
||||
|
||||
// CreateTables creates any necessary tables.
|
||||
func (db *InfoDB) CreateTables(log *zap.Logger) error {
|
||||
migration := db.Migration()
|
||||
return migration.Run(log.Named("migration"), db)
|
||||
}
|
||||
|
||||
// RawDB returns access to the raw database, only for migration tests.
|
||||
func (db *InfoDB) RawDB() SQLDB { return db.db }
|
||||
|
||||
// Begin begins transaction
|
||||
func (db *InfoDB) Begin() (*sql.Tx, error) { return db.db.Begin() }
|
||||
|
||||
// Rebind rebind parameters
|
||||
func (db *InfoDB) Rebind(s string) string { return s }
|
||||
|
||||
// Schema returns schema
|
||||
func (db *InfoDB) Schema() string { return "" }
|
||||
|
||||
// Migration returns table migrations.
|
||||
func (db *InfoDB) Migration() *migrate.Migration {
|
||||
return &migrate.Migration{
|
||||
Table: "versions",
|
||||
Steps: []*migrate.Step{
|
||||
{
|
||||
Description: "Initial setup",
|
||||
Version: 0,
|
||||
Action: migrate.SQL{
|
||||
// table for keeping serials that need to be verified against
|
||||
`CREATE TABLE used_serial (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
expiration TIMESTAMP NOT NULL
|
||||
)`,
|
||||
// primary key on satellite id and serial number
|
||||
`CREATE UNIQUE INDEX pk_used_serial ON used_serial(satellite_id, serial_number)`,
|
||||
// expiration index to allow fast deletion
|
||||
`CREATE INDEX idx_used_serial ON used_serial(expiration)`,
|
||||
|
||||
// certificate table for storing uplink/satellite certificates
|
||||
`CREATE TABLE certificate (
|
||||
cert_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||
node_id BLOB NOT NULL, -- same NodeID can have multiple valid leaf certificates
|
||||
peer_identity BLOB UNIQUE NOT NULL -- PEM encoded
|
||||
)`,
|
||||
|
||||
// table for storing piece meta info
|
||||
`CREATE TABLE pieceinfo (
|
||||
satellite_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
piece_size BIGINT NOT NULL,
|
||||
piece_expiration TIMESTAMP, -- date when it can be deleted
|
||||
|
||||
uplink_piece_hash BLOB NOT NULL, -- serialized pb.PieceHash signed by uplink
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
// primary key by satellite id and piece id
|
||||
`CREATE UNIQUE INDEX pk_pieceinfo ON pieceinfo(satellite_id, piece_id)`,
|
||||
|
||||
// table for storing bandwidth usage
|
||||
`CREATE TABLE bandwidth_usage (
|
||||
satellite_id BLOB NOT NULL,
|
||||
action INTEGER NOT NULL,
|
||||
amount BIGINT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL
|
||||
)`,
|
||||
`CREATE INDEX idx_bandwidth_usage_satellite ON bandwidth_usage(satellite_id)`,
|
||||
`CREATE INDEX idx_bandwidth_usage_created ON bandwidth_usage(created_at)`,
|
||||
|
||||
// table for storing all unsent orders
|
||||
`CREATE TABLE unsent_order (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
|
||||
order_limit_serialized BLOB NOT NULL, -- serialized pb.OrderLimit
|
||||
order_serialized BLOB NOT NULL, -- serialized pb.Order
|
||||
order_limit_expiration TIMESTAMP NOT NULL, -- when is the deadline for sending it
|
||||
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX idx_orders ON unsent_order(satellite_id, serial_number)`,
|
||||
|
||||
// table for storing all sent orders
|
||||
`CREATE TABLE order_archive (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
|
||||
order_limit_serialized BLOB NOT NULL, -- serialized pb.OrderLimit
|
||||
order_serialized BLOB NOT NULL, -- serialized pb.Order
|
||||
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
status INTEGER NOT NULL, -- accepted, rejected, confirmed
|
||||
archived_at TIMESTAMP NOT NULL, -- when was it rejected
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
`CREATE INDEX idx_order_archive_satellite ON order_archive(satellite_id)`,
|
||||
`CREATE INDEX idx_order_archive_status ON order_archive(status)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Network Wipe #2",
|
||||
Version: 1,
|
||||
Action: migrate.SQL{
|
||||
`UPDATE pieceinfo SET piece_expiration = '2019-05-09 00:00:00.000000+00:00'`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add tracking of deletion failures.",
|
||||
Version: 2,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE pieceinfo ADD COLUMN deletion_failed_at TIMESTAMP`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add vouchersDB for storing and retrieving vouchers.",
|
||||
Version: 3,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE vouchers (
|
||||
satellite_id BLOB PRIMARY KEY NOT NULL,
|
||||
voucher_serialized BLOB NOT NULL,
|
||||
expiration TIMESTAMP NOT NULL
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add index on pieceinfo expireation",
|
||||
Version: 4,
|
||||
Action: migrate.SQL{
|
||||
`CREATE INDEX idx_pieceinfo_expiration ON pieceinfo(piece_expiration)`,
|
||||
`CREATE INDEX idx_pieceinfo_deletion_failed ON pieceinfo(deletion_failed_at)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Partial Network Wipe - Tardigrade Satellites",
|
||||
Version: 5,
|
||||
Action: migrate.SQL{
|
||||
`UPDATE pieceinfo SET piece_expiration = '2019-06-25 00:00:00.000000+00:00' WHERE satellite_id
|
||||
IN (x'84A74C2CD43C5BA76535E1F42F5DF7C287ED68D33522782F4AFABFDB40000000',
|
||||
x'A28B4F04E10BAE85D67F4C6CB82BF8D4C0F0F47A8EA72627524DEB6EC0000000',
|
||||
x'AF2C42003EFC826AB4361F73F9D890942146FE0EBE806786F8E7190800000000'
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add creation date.",
|
||||
Version: 6,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE pieceinfo ADD COLUMN piece_creation TIMESTAMP NOT NULL DEFAULT 'epoch'`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Drop certificate table.",
|
||||
Version: 7,
|
||||
Action: migrate.SQL{
|
||||
`DROP TABLE certificate`,
|
||||
`CREATE TABLE certificate (cert_id INTEGER)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Drop old used serials and remove pieceinfo_deletion_failed index.",
|
||||
Version: 8,
|
||||
Action: migrate.SQL{
|
||||
`DELETE FROM used_serial`,
|
||||
`DROP INDEX idx_pieceinfo_deletion_failed`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add order limit table.",
|
||||
Version: 9,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE pieceinfo ADD COLUMN order_limit BLOB NOT NULL DEFAULT X''`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Optimize index usage.",
|
||||
Version: 10,
|
||||
Action: migrate.SQL{
|
||||
`DROP INDEX idx_pieceinfo_expiration`,
|
||||
`DROP INDEX idx_order_archive_satellite`,
|
||||
`DROP INDEX idx_order_archive_status`,
|
||||
`CREATE INDEX idx_pieceinfo_expiration ON pieceinfo(piece_expiration) WHERE piece_expiration IS NOT NULL`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Create bandwidth_usage_rollup table.",
|
||||
Version: 11,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE bandwidth_usage_rollups (
|
||||
interval_start TIMESTAMP NOT NULL,
|
||||
satellite_id BLOB NOT NULL,
|
||||
action INTEGER NOT NULL,
|
||||
amount BIGINT NOT NULL,
|
||||
PRIMARY KEY ( interval_start, satellite_id, action )
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Clear Tables from Alpha data",
|
||||
Version: 12,
|
||||
Action: migrate.SQL{
|
||||
`DROP TABLE pieceinfo`,
|
||||
`DROP TABLE used_serial`,
|
||||
`DROP TABLE order_archive`,
|
||||
`CREATE TABLE pieceinfo_ (
|
||||
satellite_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
piece_size BIGINT NOT NULL,
|
||||
piece_expiration TIMESTAMP,
|
||||
|
||||
order_limit BLOB NOT NULL,
|
||||
uplink_piece_hash BLOB NOT NULL,
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
deletion_failed_at TIMESTAMP,
|
||||
piece_creation TIMESTAMP NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX pk_pieceinfo_ ON pieceinfo_(satellite_id, piece_id)`,
|
||||
`CREATE INDEX idx_pieceinfo__expiration ON pieceinfo_(piece_expiration) WHERE piece_expiration IS NOT NULL`,
|
||||
`CREATE TABLE used_serial_ (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
expiration TIMESTAMP NOT NULL
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX pk_used_serial_ ON used_serial_(satellite_id, serial_number)`,
|
||||
`CREATE INDEX idx_used_serial_ ON used_serial_(expiration)`,
|
||||
`CREATE TABLE order_archive_ (
|
||||
satellite_id BLOB NOT NULL,
|
||||
serial_number BLOB NOT NULL,
|
||||
|
||||
order_limit_serialized BLOB NOT NULL,
|
||||
order_serialized BLOB NOT NULL,
|
||||
|
||||
uplink_cert_id INTEGER NOT NULL,
|
||||
|
||||
status INTEGER NOT NULL,
|
||||
archived_at TIMESTAMP NOT NULL,
|
||||
|
||||
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Free Storagenodes from trash data",
|
||||
Version: 13,
|
||||
Action: migrate.Func(func(log *zap.Logger, mgdb migrate.DB, tx *sql.Tx) error {
|
||||
// When using inmemory DB, skip deletion process
|
||||
if db.location == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := os.RemoveAll(filepath.Join(filepath.Dir(db.location), "blob/ukfu6bhbboxilvt7jrwlqk7y2tapb5d2r2tsmj2sjxvw5qaaaaaa")) // us-central1
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(filepath.Dir(db.location), "blob/v4weeab67sbgvnbwd5z7tweqsqqun7qox2agpbxy44mqqaaaaaaa")) // europe-west1
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(filepath.Dir(db.location), "blob/qstuylguhrn2ozjv4h2c6xpxykd622gtgurhql2k7k75wqaaaaaa")) // asia-east1
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(filepath.Dir(db.location), "blob/abforhuxbzyd35blusvrifvdwmfx4hmocsva4vmpp3rgqaaaaaaa")) // "tothemoon (stefan)"
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
// To prevent the node from starting up, we just log errors and return nil
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
Description: "Free Storagenodes from orphaned tmp data",
|
||||
Version: 14,
|
||||
Action: migrate.Func(func(log *zap.Logger, mgdb migrate.DB, tx *sql.Tx) error {
|
||||
// When using inmemory DB, skip deletion process
|
||||
if db.location == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := os.RemoveAll(filepath.Join(filepath.Dir(db.location), "tmp"))
|
||||
if err != nil {
|
||||
log.Sugar().Debug(err)
|
||||
}
|
||||
// To prevent the node from starting up, we just log errors and return nil
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
Description: "Start piece_expirations table, deprecate pieceinfo table",
|
||||
Version: 15,
|
||||
Action: migrate.SQL{
|
||||
// new table to hold expiration data (and only expirations. no other pieceinfo)
|
||||
`CREATE TABLE piece_expirations (
|
||||
satellite_id BLOB NOT NULL,
|
||||
piece_id BLOB NOT NULL,
|
||||
piece_expiration TIMESTAMP NOT NULL, -- date when it can be deleted
|
||||
deletion_failed_at TIMESTAMP,
|
||||
PRIMARY KEY (satellite_id, piece_id)
|
||||
)`,
|
||||
`CREATE INDEX idx_piece_expirations_piece_expiration ON piece_expirations(piece_expiration)`,
|
||||
`CREATE INDEX idx_piece_expirations_deletion_failed_at ON piece_expirations(deletion_failed_at)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add reputation and storage usage cache tables",
|
||||
Version: 16,
|
||||
Action: migrate.SQL{
|
||||
`CREATE TABLE reputation (
|
||||
satellite_id BLOB NOT NULL,
|
||||
uptime_success_count INTEGER NOT NULL,
|
||||
uptime_total_count INTEGER NOT NULL,
|
||||
uptime_reputation_alpha REAL NOT NULL,
|
||||
uptime_reputation_beta REAL NOT NULL,
|
||||
uptime_reputation_score REAL NOT NULL,
|
||||
audit_success_count INTEGER NOT NULL,
|
||||
audit_total_count INTEGER NOT NULL,
|
||||
audit_reputation_alpha REAL NOT NULL,
|
||||
audit_reputation_beta REAL NOT NULL,
|
||||
audit_reputation_score REAL NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (satellite_id)
|
||||
)`,
|
||||
`CREATE TABLE storage_usage (
|
||||
satellite_id BLOB NOT NULL,
|
||||
at_rest_total REAL NOT NUll,
|
||||
timestamp TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (satellite_id, timestamp)
|
||||
)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Create piece_space_used table",
|
||||
Version: 17,
|
||||
Action: migrate.SQL{
|
||||
// new table to hold the most recent totals from the piece space used cache
|
||||
`CREATE TABLE piece_space_used (
|
||||
total INTEGER NOT NULL,
|
||||
satellite_id BLOB
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX idx_piece_space_used_satellite_id ON piece_space_used(satellite_id)`,
|
||||
`INSERT INTO piece_space_used (total) select ifnull(sum(piece_size), 0) from pieceinfo_`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// withTx is a helper method which executes callback in transaction scope
|
||||
func (db *InfoDB) withTx(ctx context.Context, cb func(tx *sql.Tx) error) error {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = errs.Combine(err, tx.Rollback())
|
||||
return
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
return cb(tx)
|
||||
}
|
@ -17,6 +17,7 @@ import (
|
||||
|
||||
"storj.io/storj/internal/dbutil/dbschema"
|
||||
"storj.io/storj/internal/dbutil/sqliteutil"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
)
|
||||
|
||||
@ -74,13 +75,20 @@ func newData(snap *dbschema.Snapshot) string {
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
snapshots, err := loadSnapshots()
|
||||
require.NoError(t, err)
|
||||
|
||||
log := zaptest.NewLogger(t)
|
||||
|
||||
cfg := storagenodedb.Config{
|
||||
Pieces: ctx.Dir("storage"),
|
||||
Info2: ctx.Dir("storage") + "/info.db",
|
||||
Kademlia: ctx.Dir("storage") + "/kademlia",
|
||||
}
|
||||
|
||||
// create a new satellitedb connection
|
||||
db, err := storagenodedb.NewInfoTest()
|
||||
db, err := storagenodedb.New(log, cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
@ -91,7 +99,7 @@ func TestMigrate(t *testing.T) {
|
||||
tag := fmt.Sprintf("#%d - v%d", i, step.Version)
|
||||
|
||||
// run migration up to a specific version
|
||||
err := migrations.TargetVersion(step.Version).Run(log.Named("migrate"), db)
|
||||
err := migrations.TargetVersion(step.Version).Run(log.Named("migrate"), db.VersionsMigration())
|
||||
require.NoError(t, err, tag)
|
||||
|
||||
// find the matching expected version
|
||||
@ -100,19 +108,19 @@ func TestMigrate(t *testing.T) {
|
||||
|
||||
// insert data for new tables
|
||||
if newdata := newData(expected); newdata != "" {
|
||||
_, err = db.RawDB().Exec(newdata)
|
||||
_, err = db.Versions().Exec(newdata)
|
||||
require.NoError(t, err, tag)
|
||||
}
|
||||
|
||||
// load schema from database
|
||||
currentSchema, err := sqliteutil.QuerySchema(db.RawDB())
|
||||
currentSchema, err := sqliteutil.QuerySchema(db.Versions())
|
||||
require.NoError(t, err, tag)
|
||||
|
||||
// we don't care changes in versions table
|
||||
currentSchema.DropTable("versions")
|
||||
|
||||
// load data from database
|
||||
currentData, err := sqliteutil.QueryData(db.RawDB(), currentSchema)
|
||||
currentData, err := sqliteutil.QueryData(db.Versions(), currentSchema)
|
||||
require.NoError(t, err, tag)
|
||||
|
||||
// verify schema and data
|
||||
|
@ -16,30 +16,38 @@ import (
|
||||
"storj.io/storj/storagenode/orders"
|
||||
)
|
||||
|
||||
type ordersdb struct{ *InfoDB }
|
||||
// ErrOrders represents errors from the ordersdb database.
|
||||
var ErrOrders = errs.Class("ordersdb error")
|
||||
|
||||
// Orders returns database for storing orders
|
||||
func (db *DB) Orders() orders.DB { return db.info.Orders() }
|
||||
type ordersDB struct {
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// Orders returns database for storing orders
|
||||
func (db *InfoDB) Orders() orders.DB { return &ordersdb{db} }
|
||||
// newOrdersDB returns a new instance of ordersdb initialized with the specified database.
|
||||
func newOrdersDB(db SQLDB, location string) *ordersDB {
|
||||
return &ordersDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue inserts order to the unsent list
|
||||
func (db *ordersdb) Enqueue(ctx context.Context, info *orders.Info) (err error) {
|
||||
func (db *ordersDB) Enqueue(ctx context.Context, info *orders.Info) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
limitSerialized, err := proto.Marshal(info.Limit)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
orderSerialized, err := proto.Marshal(info.Order)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: remove uplink_cert_id
|
||||
_, err = db.db.Exec(`
|
||||
_, err = db.Exec(`
|
||||
INSERT INTO unsent_order(
|
||||
satellite_id, serial_number,
|
||||
order_limit_serialized, order_serialized, order_limit_expiration,
|
||||
@ -47,7 +55,7 @@ func (db *ordersdb) Enqueue(ctx context.Context, info *orders.Info) (err error)
|
||||
) VALUES (?,?, ?,?,?, ?)
|
||||
`, info.Limit.SatelliteId, info.Limit.SerialNumber, limitSerialized, orderSerialized, info.Limit.OrderExpiration.UTC(), 0)
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
// ListUnsent returns orders that haven't been sent yet.
|
||||
@ -58,10 +66,10 @@ func (db *ordersdb) Enqueue(ctx context.Context, info *orders.Info) (err error)
|
||||
// which have not. In case of database or other system error, the method will
|
||||
// stop without any further processing and will return an error without any
|
||||
// order.
|
||||
func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info, err error) {
|
||||
func (db *ordersDB) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.Query(`
|
||||
rows, err := db.Query(`
|
||||
SELECT order_limit_serialized, order_serialized
|
||||
FROM unsent_order
|
||||
LIMIT ?
|
||||
@ -70,7 +78,7 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
var unmarshalErrors errs.Group
|
||||
@ -83,7 +91,7 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
|
||||
|
||||
err := rows.Scan(&limitSerialized, &orderSerialized)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
var info orders.Info
|
||||
@ -92,20 +100,20 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
|
||||
|
||||
err = proto.Unmarshal(limitSerialized, info.Limit)
|
||||
if err != nil {
|
||||
unmarshalErrors.Add(ErrInfo.Wrap(err))
|
||||
unmarshalErrors.Add(ErrOrders.Wrap(err))
|
||||
continue
|
||||
}
|
||||
|
||||
err = proto.Unmarshal(orderSerialized, info.Order)
|
||||
if err != nil {
|
||||
unmarshalErrors.Add(ErrInfo.Wrap(err))
|
||||
unmarshalErrors.Add(ErrOrders.Wrap(err))
|
||||
continue
|
||||
}
|
||||
|
||||
infos = append(infos, &info)
|
||||
}
|
||||
|
||||
return infos, ErrInfo.Wrap(rows.Err())
|
||||
return infos, ErrOrders.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// ListUnsentBySatellite returns orders that haven't been sent yet grouped by
|
||||
@ -117,11 +125,11 @@ func (db *ordersdb) ListUnsent(ctx context.Context, limit int) (_ []*orders.Info
|
||||
// which have not. In case of database or other system error, the method will
|
||||
// stop without any further processing and will return an error without any
|
||||
// order.
|
||||
func (db *ordersdb) ListUnsentBySatellite(ctx context.Context) (_ map[storj.NodeID][]*orders.Info, err error) {
|
||||
func (db *ordersDB) ListUnsentBySatellite(ctx context.Context) (_ map[storj.NodeID][]*orders.Info, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: add some limiting
|
||||
|
||||
rows, err := db.db.Query(`
|
||||
rows, err := db.Query(`
|
||||
SELECT order_limit_serialized, order_serialized
|
||||
FROM unsent_order
|
||||
`)
|
||||
@ -129,7 +137,7 @@ func (db *ordersdb) ListUnsentBySatellite(ctx context.Context) (_ map[storj.Node
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
var unmarshalErrors errs.Group
|
||||
@ -142,7 +150,7 @@ func (db *ordersdb) ListUnsentBySatellite(ctx context.Context) (_ map[storj.Node
|
||||
|
||||
err := rows.Scan(&limitSerialized, &orderSerialized)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
var info orders.Info
|
||||
@ -151,20 +159,20 @@ func (db *ordersdb) ListUnsentBySatellite(ctx context.Context) (_ map[storj.Node
|
||||
|
||||
err = proto.Unmarshal(limitSerialized, info.Limit)
|
||||
if err != nil {
|
||||
unmarshalErrors.Add(ErrInfo.Wrap(err))
|
||||
unmarshalErrors.Add(ErrOrders.Wrap(err))
|
||||
continue
|
||||
}
|
||||
|
||||
err = proto.Unmarshal(orderSerialized, info.Order)
|
||||
if err != nil {
|
||||
unmarshalErrors.Add(ErrInfo.Wrap(err))
|
||||
unmarshalErrors.Add(ErrOrders.Wrap(err))
|
||||
continue
|
||||
}
|
||||
|
||||
infos[info.Limit.SatelliteId] = append(infos[info.Limit.SatelliteId], &info)
|
||||
}
|
||||
|
||||
return infos, ErrInfo.Wrap(rows.Err())
|
||||
return infos, ErrOrders.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// Archive marks order as being handled.
|
||||
@ -173,12 +181,12 @@ func (db *ordersdb) ListUnsentBySatellite(ctx context.Context) (_ map[storj.Node
|
||||
// follow with the next ones without interrupting the operation and it will
|
||||
// return an error of the class orders.OrderNotFoundError. Any other error, will
|
||||
// abort the operation, rolling back the transaction.
|
||||
func (db *ordersdb) Archive(ctx context.Context, requests ...orders.ArchiveRequest) (err error) {
|
||||
func (db *ordersDB) Archive(ctx context.Context, requests ...orders.ArchiveRequest) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
var notFoundErrs errs.Group
|
||||
@ -212,7 +220,7 @@ func (db *ordersdb) Archive(ctx context.Context, requests ...orders.ArchiveReque
|
||||
}
|
||||
|
||||
// archiveOne marks order as being handled.
|
||||
func (db *ordersdb) archiveOne(ctx context.Context, txn *sql.Tx, req orders.ArchiveRequest) (err error) {
|
||||
func (db *ordersDB) archiveOne(ctx context.Context, txn *sql.Tx, req orders.ArchiveRequest) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
result, err := txn.Exec(`
|
||||
@ -233,12 +241,12 @@ func (db *ordersdb) archiveOne(ctx context.Context, txn *sql.Tx, req orders.Arch
|
||||
WHERE satellite_id = ? AND serial_number = ?;
|
||||
`, int(req.Status), time.Now().UTC(), req.Satellite, req.Serial, req.Satellite, req.Serial)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrOrders.Wrap(err)
|
||||
}
|
||||
if count == 0 {
|
||||
return orders.OrderNotFoundError.New("satellite: %s, serial number: %s",
|
||||
@ -250,10 +258,10 @@ func (db *ordersdb) archiveOne(ctx context.Context, txn *sql.Tx, req orders.Arch
|
||||
}
|
||||
|
||||
// ListArchived returns orders that have been sent.
|
||||
func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.ArchivedInfo, err error) {
|
||||
func (db *ordersDB) ListArchived(ctx context.Context, limit int) (_ []*orders.ArchivedInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.Query(`
|
||||
rows, err := db.Query(`
|
||||
SELECT order_limit_serialized, order_serialized, status, archived_at
|
||||
FROM order_archive_
|
||||
LIMIT ?
|
||||
@ -262,7 +270,7 @@ func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.Ar
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
@ -276,7 +284,7 @@ func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.Ar
|
||||
|
||||
err := rows.Scan(&limitSerialized, &orderSerialized, &status, &archivedAt)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
var info orders.ArchivedInfo
|
||||
@ -288,26 +296,26 @@ func (db *ordersdb) ListArchived(ctx context.Context, limit int) (_ []*orders.Ar
|
||||
|
||||
err = proto.Unmarshal(limitSerialized, info.Limit)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
err = proto.Unmarshal(orderSerialized, info.Order)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrOrders.Wrap(err)
|
||||
}
|
||||
|
||||
infos = append(infos, &info)
|
||||
}
|
||||
|
||||
return infos, ErrInfo.Wrap(rows.Err())
|
||||
return infos, ErrOrders.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// CleanArchive deletes all entries older than ttl
|
||||
func (db *ordersdb) CleanArchive(ctx context.Context, ttl time.Duration) (_ int, err error) {
|
||||
func (db *ordersDB) CleanArchive(ctx context.Context, ttl time.Duration) (_ int, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
deleteBefore := time.Now().UTC().Add(-1 * ttl)
|
||||
result, err := db.db.Exec(`
|
||||
result, err := db.Exec(`
|
||||
DELETE FROM order_archive_
|
||||
WHERE archived_at <= ?
|
||||
`, deleteBefore)
|
||||
@ -315,11 +323,11 @@ func (db *ordersdb) CleanArchive(ctx context.Context, ttl time.Duration) (_ int,
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, ErrInfo.Wrap(err)
|
||||
return 0, ErrOrders.Wrap(err)
|
||||
}
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, ErrInfo.Wrap(err)
|
||||
return 0, ErrOrders.Wrap(err)
|
||||
}
|
||||
return int(count), nil
|
||||
}
|
||||
|
@ -13,21 +13,27 @@ import (
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
// ErrPieceExpiration represents errors from the piece expiration database.
|
||||
var ErrPieceExpiration = errs.Class("piece expiration error")
|
||||
|
||||
type pieceExpirationDB struct {
|
||||
*InfoDB
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// PieceExpirationDB returns database for storing piece expiration data
|
||||
func (db *DB) PieceExpirationDB() pieces.PieceExpirationDB { return db.info.PieceExpirationDB() }
|
||||
|
||||
// PieceExpirationDB returns database for storing piece expiration data
|
||||
func (db *InfoDB) PieceExpirationDB() pieces.PieceExpirationDB { return &db.pieceExpirationDB }
|
||||
// newPieceExpirationDB returns a new instance of pieceExpirationDB initialized with the specified database.
|
||||
func newPieceExpirationDB(db SQLDB, location string) *pieceExpirationDB {
|
||||
return &pieceExpirationDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// GetExpired gets piece IDs that expire or have expired before the given time
|
||||
func (db *pieceExpirationDB) GetExpired(ctx context.Context, expiresBefore time.Time, limit int64) (expiredPieceIDs []pieces.ExpiredInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, `
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT satellite_id, piece_id
|
||||
FROM piece_expirations
|
||||
WHERE piece_expiration < ?
|
||||
@ -35,7 +41,7 @@ func (db *pieceExpirationDB) GetExpired(ctx context.Context, expiresBefore time.
|
||||
LIMIT ?
|
||||
`, expiresBefore.UTC(), expiresBefore.UTC(), limit)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceExpiration.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
@ -44,7 +50,7 @@ func (db *pieceExpirationDB) GetExpired(ctx context.Context, expiresBefore time.
|
||||
var pieceID storj.PieceID
|
||||
err = rows.Scan(&satelliteID, &pieceID)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceExpiration.Wrap(err)
|
||||
}
|
||||
expiredPieceIDs = append(expiredPieceIDs, pieces.ExpiredInfo{
|
||||
SatelliteID: satelliteID,
|
||||
@ -59,18 +65,18 @@ func (db *pieceExpirationDB) GetExpired(ctx context.Context, expiresBefore time.
|
||||
func (db *pieceExpirationDB) SetExpiration(ctx context.Context, satellite storj.NodeID, pieceID storj.PieceID, expiresAt time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.ExecContext(ctx, `
|
||||
_, err = db.ExecContext(ctx, `
|
||||
INSERT INTO piece_expirations(satellite_id, piece_id, piece_expiration)
|
||||
VALUES (?,?,?)
|
||||
`, satellite, pieceID, expiresAt.UTC())
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceExpiration.Wrap(err)
|
||||
}
|
||||
|
||||
// DeleteExpiration removes an expiration record for the given piece ID on the given satellite
|
||||
func (db *pieceExpirationDB) DeleteExpiration(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (found bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
result, err := db.db.ExecContext(ctx, `
|
||||
result, err := db.ExecContext(ctx, `
|
||||
DELETE FROM piece_expirations
|
||||
WHERE satellite_id = ? AND piece_id = ?
|
||||
`, satelliteID, pieceID)
|
||||
@ -89,11 +95,11 @@ func (db *pieceExpirationDB) DeleteExpiration(ctx context.Context, satelliteID s
|
||||
func (db *pieceExpirationDB) DeleteFailed(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID, when time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.ExecContext(ctx, `
|
||||
_, err = db.ExecContext(ctx, `
|
||||
UPDATE piece_expirations
|
||||
SET deletion_failed_at = ?
|
||||
WHERE satellite_id = ?
|
||||
AND piece_id = ?
|
||||
`, when.UTC(), satelliteID, pieceID)
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceExpiration.Wrap(err)
|
||||
}
|
||||
|
@ -18,28 +18,34 @@ import (
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
type v0PieceInfo struct {
|
||||
*InfoDB
|
||||
// ErrPieceInfo represents errors from the piece info database.
|
||||
var ErrPieceInfo = errs.Class("v0pieceinfodb error")
|
||||
|
||||
type v0PieceInfoDB struct {
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// V0PieceInfo returns database for storing piece information
|
||||
func (db *DB) V0PieceInfo() pieces.V0PieceInfoDB { return db.info.V0PieceInfo() }
|
||||
|
||||
// V0PieceInfo returns database for storing piece information
|
||||
func (db *InfoDB) V0PieceInfo() pieces.V0PieceInfoDB { return &db.v0PieceInfo }
|
||||
// newV0PieceInfoDB returns a new instance of pieceinfo initialized with the specified database.
|
||||
func newV0PieceInfoDB(db SQLDB, location string) *v0PieceInfoDB {
|
||||
return &v0PieceInfoDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Add inserts piece information into the database.
|
||||
func (db *v0PieceInfo) Add(ctx context.Context, info *pieces.Info) (err error) {
|
||||
func (db *v0PieceInfoDB) Add(ctx context.Context, info *pieces.Info) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
orderLimit, err := proto.Marshal(info.OrderLimit)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
uplinkPieceHash, err := proto.Marshal(info.UplinkPieceHash)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
var pieceExpiration *time.Time
|
||||
@ -49,24 +55,24 @@ func (db *v0PieceInfo) Add(ctx context.Context, info *pieces.Info) (err error) {
|
||||
}
|
||||
|
||||
// TODO remove `uplink_cert_id` from DB
|
||||
_, err = db.db.ExecContext(ctx, db.Rebind(`
|
||||
_, err = db.ExecContext(ctx, `
|
||||
INSERT INTO
|
||||
pieceinfo_(satellite_id, piece_id, piece_size, piece_creation, piece_expiration, order_limit, uplink_piece_hash, uplink_cert_id)
|
||||
VALUES (?,?,?,?,?,?,?,?)
|
||||
`), info.SatelliteID, info.PieceID, info.PieceSize, info.PieceCreation.UTC(), pieceExpiration, orderLimit, uplinkPieceHash, 0)
|
||||
`, info.SatelliteID, info.PieceID, info.PieceSize, info.PieceCreation.UTC(), pieceExpiration, orderLimit, uplinkPieceHash, 0)
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
func (db *v0PieceInfo) getAllPiecesOwnedBy(ctx context.Context, blobStore storage.Blobs, satelliteID storj.NodeID) ([]v0StoredPieceAccess, error) {
|
||||
rows, err := db.db.QueryContext(ctx, db.Rebind(`
|
||||
func (db *v0PieceInfoDB) getAllPiecesOwnedBy(ctx context.Context, blobStore storage.Blobs, satelliteID storj.NodeID) ([]v0StoredPieceAccess, error) {
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT piece_id, piece_size, piece_creation, piece_expiration
|
||||
FROM pieceinfo_
|
||||
WHERE satellite_id = ?
|
||||
ORDER BY piece_id
|
||||
`), satelliteID)
|
||||
`, satelliteID)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
var pieceInfos []v0StoredPieceAccess
|
||||
@ -78,7 +84,7 @@ func (db *v0PieceInfo) getAllPiecesOwnedBy(ctx context.Context, blobStore storag
|
||||
thisAccess := &pieceInfos[len(pieceInfos)-1]
|
||||
err = rows.Scan(&thisAccess.pieceID, &thisAccess.pieceSize, &thisAccess.creationTime, &thisAccess.expirationTime)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
}
|
||||
return pieceInfos, nil
|
||||
@ -91,7 +97,7 @@ func (db *v0PieceInfo) getAllPiecesOwnedBy(ctx context.Context, blobStore storag
|
||||
//
|
||||
// If blobStore is nil, the .Stat() and .FullPath() methods of the provided StoredPieceAccess
|
||||
// instances will not work, but otherwise everything should be ok.
|
||||
func (db *v0PieceInfo) WalkSatelliteV0Pieces(ctx context.Context, blobStore storage.Blobs, satelliteID storj.NodeID, walkFunc func(pieces.StoredPieceAccess) error) (err error) {
|
||||
func (db *v0PieceInfoDB) WalkSatelliteV0Pieces(ctx context.Context, blobStore storage.Blobs, satelliteID storj.NodeID, walkFunc func(pieces.StoredPieceAccess) error) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: is it worth paging this query? we hope that SNs will not yet have too many V0 pieces.
|
||||
@ -113,7 +119,7 @@ func (db *v0PieceInfo) WalkSatelliteV0Pieces(ctx context.Context, blobStore stor
|
||||
}
|
||||
|
||||
// Get gets piece information by satellite id and piece id.
|
||||
func (db *v0PieceInfo) Get(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (_ *pieces.Info, err error) {
|
||||
func (db *v0PieceInfoDB) Get(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (_ *pieces.Info, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
info := &pieces.Info{}
|
||||
info.SatelliteID = satelliteID
|
||||
@ -123,13 +129,13 @@ func (db *v0PieceInfo) Get(ctx context.Context, satelliteID storj.NodeID, pieceI
|
||||
var uplinkPieceHash []byte
|
||||
var pieceExpiration *time.Time
|
||||
|
||||
err = db.db.QueryRowContext(ctx, db.Rebind(`
|
||||
err = db.QueryRowContext(ctx, `
|
||||
SELECT piece_size, piece_creation, piece_expiration, order_limit, uplink_piece_hash
|
||||
FROM pieceinfo_
|
||||
WHERE satellite_id = ? AND piece_id = ?
|
||||
`), satelliteID, pieceID).Scan(&info.PieceSize, &info.PieceCreation, &pieceExpiration, &orderLimit, &uplinkPieceHash)
|
||||
`, satelliteID, pieceID).Scan(&info.PieceSize, &info.PieceCreation, &pieceExpiration, &orderLimit, &uplinkPieceHash)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
if pieceExpiration != nil {
|
||||
@ -139,50 +145,50 @@ func (db *v0PieceInfo) Get(ctx context.Context, satelliteID storj.NodeID, pieceI
|
||||
info.OrderLimit = &pb.OrderLimit{}
|
||||
err = proto.Unmarshal(orderLimit, info.OrderLimit)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
info.UplinkPieceHash = &pb.PieceHash{}
|
||||
err = proto.Unmarshal(uplinkPieceHash, info.UplinkPieceHash)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Delete deletes piece information.
|
||||
func (db *v0PieceInfo) Delete(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (err error) {
|
||||
func (db *v0PieceInfoDB) Delete(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.ExecContext(ctx, db.Rebind(`
|
||||
_, err = db.ExecContext(ctx, `
|
||||
DELETE FROM pieceinfo_
|
||||
WHERE satellite_id = ?
|
||||
AND piece_id = ?
|
||||
`), satelliteID, pieceID)
|
||||
`, satelliteID, pieceID)
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
// DeleteFailed marks piece as a failed deletion.
|
||||
func (db *v0PieceInfo) DeleteFailed(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID, now time.Time) (err error) {
|
||||
func (db *v0PieceInfoDB) DeleteFailed(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID, now time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.ExecContext(ctx, db.Rebind(`
|
||||
_, err = db.ExecContext(ctx, `
|
||||
UPDATE pieceinfo_
|
||||
SET deletion_failed_at = ?
|
||||
WHERE satellite_id = ?
|
||||
AND piece_id = ?
|
||||
`), now.UTC(), satelliteID, pieceID)
|
||||
`, now.UTC(), satelliteID, pieceID)
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
|
||||
// GetExpired gets ExpiredInfo records for pieces that are expired.
|
||||
func (db *v0PieceInfo) GetExpired(ctx context.Context, expiredAt time.Time, limit int64) (infos []pieces.ExpiredInfo, err error) {
|
||||
func (db *v0PieceInfoDB) GetExpired(ctx context.Context, expiredAt time.Time, limit int64) (infos []pieces.ExpiredInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, db.Rebind(`
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT satellite_id, piece_id
|
||||
FROM pieceinfo_
|
||||
WHERE piece_expiration IS NOT NULL
|
||||
@ -190,16 +196,16 @@ func (db *v0PieceInfo) GetExpired(ctx context.Context, expiredAt time.Time, limi
|
||||
AND ((deletion_failed_at IS NULL) OR deletion_failed_at <> ?)
|
||||
ORDER BY satellite_id
|
||||
LIMIT ?
|
||||
`), expiredAt.UTC(), expiredAt.UTC(), limit)
|
||||
`, expiredAt.UTC(), expiredAt.UTC(), limit)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
for rows.Next() {
|
||||
info := pieces.ExpiredInfo{InPieceInfo: true}
|
||||
err = rows.Scan(&info.SatelliteID, &info.PieceID)
|
||||
if err != nil {
|
||||
return infos, ErrInfo.Wrap(err)
|
||||
return infos, ErrPieceInfo.Wrap(err)
|
||||
}
|
||||
infos = append(infos, info)
|
||||
}
|
||||
|
@ -10,22 +10,27 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
// ErrPieceSpaceUsed represents errors from the piece spaced used database.
|
||||
var ErrPieceSpaceUsed = errs.Class("piece space used error")
|
||||
|
||||
type pieceSpaceUsedDB struct {
|
||||
*InfoDB
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// PieceSpaceUsedDB returns database for storing piece expiration data from storagenode master db
|
||||
func (db *DB) PieceSpaceUsedDB() pieces.PieceSpaceUsedDB { return db.info.PieceSpaceUsedDB() }
|
||||
|
||||
// PieceSpaceUsedDB returns database for storing piece expiration data from infoDB
|
||||
func (db *InfoDB) PieceSpaceUsedDB() pieces.PieceSpaceUsedDB { return &db.pieceSpaceUsedDB }
|
||||
// newPieceSpaceUsedDB returns a new instance of pieceSpaceUsedDB initialized with the specified database.
|
||||
func newPieceSpaceUsedDB(db SQLDB, location string) *pieceSpaceUsedDB {
|
||||
return &pieceSpaceUsedDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Init creates the one total record if it doesn't already exist
|
||||
func (db *pieceSpaceUsedDB) Init(ctx context.Context) (err error) {
|
||||
row := db.db.QueryRow(`
|
||||
row := db.QueryRow(`
|
||||
SELECT total
|
||||
FROM piece_space_used
|
||||
WHERE satellite_id IS NULL;
|
||||
@ -37,25 +42,25 @@ func (db *pieceSpaceUsedDB) Init(ctx context.Context) (err error) {
|
||||
if err == sql.ErrNoRows {
|
||||
err = db.createInitTotal(ctx)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
|
||||
func (db *pieceSpaceUsedDB) createInitTotal(ctx context.Context) (err error) {
|
||||
_, err = db.db.Exec(`
|
||||
_, err = db.Exec(`
|
||||
INSERT INTO piece_space_used (total) VALUES (0)
|
||||
`)
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
|
||||
// GetTotal returns the total space used by all pieces stored on disk
|
||||
func (db *pieceSpaceUsedDB) GetTotal(ctx context.Context) (_ int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
row := db.db.QueryRow(`
|
||||
row := db.QueryRow(`
|
||||
SELECT total
|
||||
FROM piece_space_used
|
||||
WHERE satellite_id IS NULL;
|
||||
@ -67,7 +72,7 @@ func (db *pieceSpaceUsedDB) GetTotal(ctx context.Context) (_ int64, err error) {
|
||||
if err == sql.ErrNoRows {
|
||||
return total, nil
|
||||
}
|
||||
return total, ErrInfo.Wrap(err)
|
||||
return total, ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
@ -76,7 +81,7 @@ func (db *pieceSpaceUsedDB) GetTotal(ctx context.Context) (_ int64, err error) {
|
||||
func (db *pieceSpaceUsedDB) GetTotalsForAllSatellites(ctx context.Context) (_ map[storj.NodeID]int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, `
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT total, satellite_id
|
||||
FROM piece_space_used
|
||||
WHERE satellite_id IS NOT NULL
|
||||
@ -85,7 +90,7 @@ func (db *pieceSpaceUsedDB) GetTotalsForAllSatellites(ctx context.Context) (_ ma
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
@ -96,7 +101,7 @@ func (db *pieceSpaceUsedDB) GetTotalsForAllSatellites(ctx context.Context) (_ ma
|
||||
|
||||
err = rows.Scan(&total, &satelliteID)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
totalBySatellite[satelliteID] = total
|
||||
}
|
||||
@ -107,13 +112,13 @@ func (db *pieceSpaceUsedDB) GetTotalsForAllSatellites(ctx context.Context) (_ ma
|
||||
func (db *pieceSpaceUsedDB) UpdateTotal(ctx context.Context, newTotal int64) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.ExecContext(ctx, db.Rebind(`
|
||||
_, err = db.ExecContext(ctx, `
|
||||
UPDATE piece_space_used
|
||||
SET total = ?
|
||||
WHERE satellite_id IS NULL
|
||||
`), newTotal)
|
||||
`, newTotal)
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
|
||||
// UpdateTotalsForAllSatellites updates each record for total spaced used with a new value for each satelliteID
|
||||
@ -123,24 +128,24 @@ func (db *pieceSpaceUsedDB) UpdateTotalsForAllSatellites(ctx context.Context, ne
|
||||
for satelliteID, newTotal := range newTotalsBySatellites {
|
||||
if newTotal == 0 {
|
||||
if err := db.deleteTotalBySatellite(ctx, satelliteID); err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = db.db.ExecContext(ctx, db.Rebind(`
|
||||
_, err = db.ExecContext(ctx, `
|
||||
INSERT INTO piece_space_used (total, satellite_id)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT (satellite_id)
|
||||
DO UPDATE SET total = ?
|
||||
WHERE satellite_id = ?
|
||||
`), newTotal, satelliteID, newTotal, satelliteID)
|
||||
`, newTotal, satelliteID, newTotal, satelliteID)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
continue
|
||||
}
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrPieceSpaceUsed.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -149,7 +154,7 @@ func (db *pieceSpaceUsedDB) UpdateTotalsForAllSatellites(ctx context.Context, ne
|
||||
func (db *pieceSpaceUsedDB) deleteTotalBySatellite(ctx context.Context, satelliteID storj.NodeID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.ExecContext(ctx, `
|
||||
_, err = db.ExecContext(ctx, `
|
||||
DELETE FROM piece_space_used
|
||||
WHERE satellite_id = ?
|
||||
`, satelliteID)
|
||||
|
@ -6,19 +6,27 @@ package storagenodedb
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode/reputation"
|
||||
)
|
||||
|
||||
// Reputation returns reputation.DB
|
||||
func (db *InfoDB) Reputation() reputation.DB { return &reputationDB{db} }
|
||||
|
||||
// Reputation returns reputation.DB
|
||||
func (db *DB) Reputation() reputation.DB { return db.info.Reputation() }
|
||||
// ErrReputation represents errors from the reputation database.
|
||||
var ErrReputation = errs.Class("reputation error")
|
||||
|
||||
// reputation works with node reputation DB
|
||||
type reputationDB struct {
|
||||
*InfoDB
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// newReputationDB returns a new instance of reputationDB initialized with the specified database.
|
||||
func newReputationDB(db SQLDB, location string) *reputationDB {
|
||||
return &reputationDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Store inserts or updates reputation stats into the db
|
||||
@ -40,7 +48,7 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
|
||||
updated_at
|
||||
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)`
|
||||
|
||||
_, err = db.db.ExecContext(ctx, stmt,
|
||||
_, err = db.ExecContext(ctx, stmt,
|
||||
stats.SatelliteID,
|
||||
stats.Uptime.SuccessCount,
|
||||
stats.Uptime.TotalCount,
|
||||
@ -64,7 +72,7 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
|
||||
|
||||
stats := reputation.Stats{}
|
||||
|
||||
row := db.db.QueryRowContext(ctx,
|
||||
row := db.QueryRowContext(ctx,
|
||||
`SELECT * FROM reputation WHERE satellite_id = ?`,
|
||||
satelliteID,
|
||||
)
|
||||
|
@ -14,15 +14,18 @@ import (
|
||||
"storj.io/storj/storagenode/storageusage"
|
||||
)
|
||||
|
||||
// StorageUsage returns storageusage.DB
|
||||
func (db *InfoDB) StorageUsage() storageusage.DB { return &storageusageDB{db} }
|
||||
|
||||
// StorageUsage returns storageusage.DB
|
||||
func (db *DB) StorageUsage() storageusage.DB { return db.info.StorageUsage() }
|
||||
|
||||
// storageusageDB storage usage DB
|
||||
type storageusageDB struct {
|
||||
*InfoDB
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// newStorageusageDB returns a new instance of storageusageDB initialized with the specified database.
|
||||
func newStorageusageDB(db SQLDB, location string) *storageusageDB {
|
||||
return &storageusageDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Store stores storage usage stamps to db replacing conflicting entries
|
||||
@ -38,7 +41,7 @@ func (db *storageusageDB) Store(ctx context.Context, stamps []storageusage.Stamp
|
||||
|
||||
return db.withTx(ctx, func(tx *sql.Tx) error {
|
||||
for _, stamp := range stamps {
|
||||
_, err = db.db.ExecContext(ctx, query, stamp.SatelliteID, stamp.AtRestTotal, stamp.Timestamp.UTC())
|
||||
_, err = db.ExecContext(ctx, query, stamp.SatelliteID, stamp.AtRestTotal, stamp.Timestamp.UTC())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@ -64,7 +67,7 @@ func (db *storageusageDB) GetDaily(ctx context.Context, satelliteID storj.NodeID
|
||||
GROUP BY DATE(timestamp)
|
||||
)`
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, query, satelliteID, from.UTC(), to.UTC())
|
||||
rows, err := db.QueryContext(ctx, query, satelliteID, from.UTC(), to.UTC())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -108,7 +111,7 @@ func (db *storageusageDB) GetDailyTotal(ctx context.Context, from, to time.Time)
|
||||
GROUP BY DATE(timestamp), satellite_id
|
||||
) GROUP BY DATE(timestamp)`
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, query, from.UTC(), to.UTC())
|
||||
rows, err := db.QueryContext(ctx, query, from.UTC(), to.UTC())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -135,3 +138,22 @@ func (db *storageusageDB) GetDailyTotal(ctx context.Context, from, to time.Time)
|
||||
|
||||
return stamps, nil
|
||||
}
|
||||
|
||||
// withTx is a helper method which executes callback in transaction scope
|
||||
func (db *storageusageDB) withTx(ctx context.Context, cb func(tx *sql.Tx) error) error {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = errs.Combine(err, tx.Rollback())
|
||||
return
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
return cb(tx)
|
||||
}
|
||||
|
@ -13,46 +13,52 @@ import (
|
||||
"storj.io/storj/storagenode/piecestore"
|
||||
)
|
||||
|
||||
type usedSerials struct {
|
||||
*InfoDB
|
||||
// ErrUsedSerials represents errors from the used serials database.
|
||||
var ErrUsedSerials = errs.Class("usedserialsdb error")
|
||||
|
||||
type usedSerialsDB struct {
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// UsedSerials returns used serials database.
|
||||
func (db *DB) UsedSerials() piecestore.UsedSerials { return db.info.UsedSerials() }
|
||||
|
||||
// UsedSerials returns used serials database.
|
||||
func (db *InfoDB) UsedSerials() piecestore.UsedSerials { return &usedSerials{db} }
|
||||
// newUsedSerialsDB returns a new instance of usedSerials initialized with the specified database.
|
||||
func newUsedSerialsDB(db SQLDB, location string) *usedSerialsDB {
|
||||
return &usedSerialsDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a serial to the database.
|
||||
func (db *usedSerials) Add(ctx context.Context, satelliteID storj.NodeID, serialNumber storj.SerialNumber, expiration time.Time) (err error) {
|
||||
func (db *usedSerialsDB) Add(ctx context.Context, satelliteID storj.NodeID, serialNumber storj.SerialNumber, expiration time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.Exec(`
|
||||
_, err = db.Exec(`
|
||||
INSERT INTO
|
||||
used_serial_(satellite_id, serial_number, expiration)
|
||||
VALUES(?, ?, ?)`, satelliteID, serialNumber, expiration.UTC())
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrUsedSerials.Wrap(err)
|
||||
}
|
||||
|
||||
// DeleteExpired deletes expired serial numbers
|
||||
func (db *usedSerials) DeleteExpired(ctx context.Context, now time.Time) (err error) {
|
||||
func (db *usedSerialsDB) DeleteExpired(ctx context.Context, now time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = db.db.Exec(`DELETE FROM used_serial_ WHERE expiration < ?`, now.UTC())
|
||||
return ErrInfo.Wrap(err)
|
||||
_, err = db.Exec(`DELETE FROM used_serial_ WHERE expiration < ?`, now.UTC())
|
||||
return ErrUsedSerials.Wrap(err)
|
||||
}
|
||||
|
||||
// IterateAll iterates all serials.
|
||||
// Note, this will lock the database and should only be used during startup.
|
||||
func (db *usedSerials) IterateAll(ctx context.Context, fn piecestore.SerialNumberFn) (err error) {
|
||||
func (db *usedSerialsDB) IterateAll(ctx context.Context, fn piecestore.SerialNumberFn) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.Query(`SELECT satellite_id, serial_number, expiration FROM used_serial_`)
|
||||
rows, err := db.Query(`SELECT satellite_id, serial_number, expiration FROM used_serial_`)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrUsedSerials.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, ErrInfo.Wrap(rows.Close())) }()
|
||||
defer func() { err = errs.Combine(err, ErrUsedSerials.Wrap(rows.Close())) }()
|
||||
|
||||
for rows.Next() {
|
||||
var satelliteID storj.NodeID
|
||||
@ -61,11 +67,11 @@ func (db *usedSerials) IterateAll(ctx context.Context, fn piecestore.SerialNumbe
|
||||
|
||||
err := rows.Scan(&satelliteID, &serialNumber, &expiration)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrUsedSerials.Wrap(err)
|
||||
}
|
||||
|
||||
fn(satelliteID, serialNumber, expiration)
|
||||
}
|
||||
|
||||
return ErrInfo.Wrap(rows.Err())
|
||||
return ErrUsedSerials.Wrap(rows.Err())
|
||||
}
|
||||
|
27
storagenode/storagenodedb/versions.go
Normal file
27
storagenode/storagenodedb/versions.go
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package storagenodedb
|
||||
|
||||
// versions represents the database that contains the database schema version history.
|
||||
type versionsDB struct {
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
func newVersionsDB(db SQLDB, location string) *versionsDB {
|
||||
return &versionsDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Rebind rebind parameters
|
||||
// These are implemented because the migrate.DB interface requires them.
|
||||
// Maybe in the future we should untangle those.
|
||||
func (db *versionsDB) Rebind(s string) string { return s }
|
||||
|
||||
// Schema returns schema
|
||||
// These are implemented because the migrate.DB interface requires them.
|
||||
// Maybe in the future we should untangle those.
|
||||
func (db *versionsDB) Schema() string { return "" }
|
@ -13,27 +13,34 @@ import (
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode/vouchers"
|
||||
)
|
||||
|
||||
type vouchersdb struct{ *InfoDB }
|
||||
// ErrVouchers represents errors from the vouchers database.
|
||||
var ErrVouchers = errs.Class("vouchersdb error")
|
||||
|
||||
// Vouchers returns database for storing vouchers
|
||||
func (db *DB) Vouchers() vouchers.DB { return db.info.Vouchers() }
|
||||
type vouchersDB struct {
|
||||
location string
|
||||
SQLDB
|
||||
}
|
||||
|
||||
// Vouchers returns database for storing vouchers
|
||||
func (db *InfoDB) Vouchers() vouchers.DB { return &vouchersdb{db} }
|
||||
// newVouchersDB returns a new instance of vouchersdb initialized with the specified database.
|
||||
func newVouchersDB(db SQLDB, location string) *vouchersDB {
|
||||
return &vouchersDB{
|
||||
location: location,
|
||||
SQLDB: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Put inserts or updates a voucher from a satellite
|
||||
func (db *vouchersdb) Put(ctx context.Context, voucher *pb.Voucher) (err error) {
|
||||
func (db *vouchersDB) Put(ctx context.Context, voucher *pb.Voucher) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
voucherSerialized, err := proto.Marshal(voucher)
|
||||
if err != nil {
|
||||
return ErrInfo.Wrap(err)
|
||||
return ErrVouchers.Wrap(err)
|
||||
}
|
||||
|
||||
_, err = db.db.Exec(`
|
||||
_, err = db.Exec(`
|
||||
INSERT INTO vouchers(
|
||||
satellite_id,
|
||||
voucher_serialized,
|
||||
@ -48,13 +55,13 @@ func (db *vouchersdb) Put(ctx context.Context, voucher *pb.Voucher) (err error)
|
||||
}
|
||||
|
||||
// NeedVoucher returns true if a voucher from a particular satellite is expired, about to expire, or does not exist
|
||||
func (db *vouchersdb) NeedVoucher(ctx context.Context, satelliteID storj.NodeID, expirationBuffer time.Duration) (need bool, err error) {
|
||||
func (db *vouchersDB) NeedVoucher(ctx context.Context, satelliteID storj.NodeID, expirationBuffer time.Duration) (need bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
expiresBefore := time.Now().Add(expirationBuffer)
|
||||
|
||||
// query returns row if voucher is good. If not, it is either expiring or does not exist
|
||||
row := db.db.QueryRow(`
|
||||
row := db.QueryRow(`
|
||||
SELECT satellite_id
|
||||
FROM vouchers
|
||||
WHERE satellite_id = ? AND expiration >= ?
|
||||
@ -66,16 +73,16 @@ func (db *vouchersdb) NeedVoucher(ctx context.Context, satelliteID storj.NodeID,
|
||||
if err == sql.ErrNoRows {
|
||||
return true, nil
|
||||
}
|
||||
return false, ErrInfo.Wrap(err)
|
||||
return false, ErrVouchers.Wrap(err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetAll returns all vouchers in the table
|
||||
func (db *vouchersdb) GetAll(ctx context.Context) (vouchers []*pb.Voucher, err error) {
|
||||
func (db *vouchersDB) GetAll(ctx context.Context) (vouchers []*pb.Voucher, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
rows, err := db.db.Query(`
|
||||
rows, err := db.Query(`
|
||||
SELECT voucher_serialized
|
||||
FROM vouchers
|
||||
`)
|
||||
@ -83,7 +90,7 @@ func (db *vouchersdb) GetAll(ctx context.Context) (vouchers []*pb.Voucher, err e
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrVouchers.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
@ -91,12 +98,12 @@ func (db *vouchersdb) GetAll(ctx context.Context) (vouchers []*pb.Voucher, err e
|
||||
var voucherSerialized []byte
|
||||
err := rows.Scan(&voucherSerialized)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrVouchers.Wrap(err)
|
||||
}
|
||||
voucher := &pb.Voucher{}
|
||||
err = proto.Unmarshal(voucherSerialized, voucher)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
return nil, ErrVouchers.Wrap(err)
|
||||
}
|
||||
vouchers = append(vouchers, voucher)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user