storagenode: delete psserver (#1837)

This commit is contained in:
Egon Elbre 2019-04-26 08:17:18 +03:00 committed by GitHub
parent d843f80773
commit 60c4c10c79
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 59 additions and 1412 deletions

4
.github/CODEOWNERS vendored
View File

@ -1,5 +1,5 @@
# piecestore # storagenode
/pkg/piecestore/ @aleitner /storagenode/ @aleitner
# audits # audits
/pkg/audit/ @navillasa /pkg/audit/ @navillasa

View File

@ -9,15 +9,16 @@ import (
"path/filepath" "path/filepath"
"sort" "sort"
"text/tabwriter" "text/tabwriter"
"time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/storj/internal/fpath" "storj.io/storj/internal/fpath"
"storj.io/storj/internal/memory"
"storj.io/storj/internal/version" "storj.io/storj/internal/version"
"storj.io/storj/pkg/cfgstruct" "storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/process" "storj.io/storj/pkg/process"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
"storj.io/storj/storagenode" "storj.io/storj/storagenode"
@ -227,6 +228,8 @@ func cmdConfig(cmd *cobra.Command, args []string) (err error) {
} }
func cmdDiag(cmd *cobra.Command, args []string) (err error) { func cmdDiag(cmd *cobra.Command, args []string) (err error) {
ctx := process.Ctx(cmd)
diagDir, err := filepath.Abs(confDir) diagDir, err := filepath.Abs(confDir)
if err != nil { if err != nil {
return err return err
@ -235,7 +238,7 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
// check if the directory exists // check if the directory exists
_, err = os.Stat(diagDir) _, err = os.Stat(diagDir)
if err != nil { if err != nil {
fmt.Println("Storagenode directory doesn't exist", diagDir) fmt.Println("storage node directory doesn't exist", diagDir)
return err return err
} }
@ -247,76 +250,38 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, db.Close()) err = errs.Combine(err, db.Close())
}() }()
//get all bandwidth aggrements entries already ordered summaries, err := db.Bandwidth().SummaryBySatellite(ctx, time.Time{}, time.Now())
bwAgreements, err := db.PSDB().GetBandwidthAllocations()
if err != nil { if err != nil {
fmt.Printf("storage node 'bandwidth_agreements' table read error: %v\n", err) fmt.Printf("unable to get bandwidth summary: %v\n", err)
return err return err
} }
// Agreement is a struct that contains a bandwidth agreement and the associated signature satellites := storj.NodeIDList{}
type SatelliteSummary struct { for id := range summaries {
TotalBytes int64 satellites = append(satellites, id)
PutActionCount int64 }
GetActionCount int64 sort.Sort(satellites)
GetAuditActionCount int64
GetRepairActionCount int64 w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.AlignRight|tabwriter.Debug)
PutRepairActionCount int64 defer func() { err = errs.Combine(err, w.Flush()) }()
TotalTransactions int64
// additional attributes add here ... fmt.Fprint(w, "Satellite\tTotal\tPut\tGet\tDelete\tAudit Get\tRepair Get\tRepair Put\n")
for _, id := range satellites {
summary := summaries[id]
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
id,
memory.Size(summary.Total()),
memory.Size(summary.Put),
memory.Size(summary.Get),
memory.Size(summary.Delete),
memory.Size(summary.GetAudit),
memory.Size(summary.GetRepair),
memory.Size(summary.PutRepair),
)
} }
// attributes per satelliteid return nil
summaries := make(map[storj.NodeID]*SatelliteSummary)
satelliteIDs := storj.NodeIDList{}
for _, rbaVal := range bwAgreements {
for _, rbaDataVal := range rbaVal {
rba := rbaDataVal.Agreement
pba := rba.PayerAllocation
summary, ok := summaries[pba.SatelliteId]
if !ok {
summaries[pba.SatelliteId] = &SatelliteSummary{}
satelliteIDs = append(satelliteIDs, pba.SatelliteId)
summary = summaries[pba.SatelliteId]
}
// fill the summary info
summary.TotalBytes += rba.Total
summary.TotalTransactions++
switch pba.Action {
case pb.BandwidthAction_PUT:
summary.PutActionCount++
case pb.BandwidthAction_GET:
summary.GetActionCount++
case pb.BandwidthAction_GET_AUDIT:
summary.GetAuditActionCount++
case pb.BandwidthAction_GET_REPAIR:
summary.GetRepairActionCount++
case pb.BandwidthAction_PUT_REPAIR:
summary.PutRepairActionCount++
}
}
}
// initialize the table header (fields)
const padding = 3
w := tabwriter.NewWriter(os.Stdout, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)
fmt.Fprintln(w, "SatelliteID\tTotal\t# Of Transactions\tPUT Action\tGET Action\tGET (Audit) Action\tGET (Repair) Action\tPUT (Repair) Action\t")
// populate the row fields
sort.Sort(satelliteIDs)
for _, satelliteID := range satelliteIDs {
summary := summaries[satelliteID]
fmt.Fprint(w, satelliteID, "\t", summary.TotalBytes, "\t", summary.TotalTransactions, "\t",
summary.PutActionCount, "\t", summary.GetActionCount, "\t", summary.GetAuditActionCount,
"\t", summary.GetRepairActionCount, "\t", summary.PutRepairActionCount, "\t\n")
}
// display the data
err = w.Flush()
return err
} }
func main() { func main() {

2
go.mod
View File

@ -108,7 +108,7 @@ require (
go.uber.org/multierr v1.1.0 // indirect go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.9.1 go.uber.org/zap v1.9.1
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c
golang.org/x/net v0.0.0-20190328230028-74de082e2cca golang.org/x/net v0.0.0-20190328230028-74de082e2cca // indirect
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
golang.org/x/sys v0.0.0-20190402142545-baf5eb976a8c golang.org/x/sys v0.0.0-20190402142545-baf5eb976a8c
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect

View File

@ -43,7 +43,6 @@ import (
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/extensions" "storj.io/storj/pkg/peertls/extensions"
"storj.io/storj/pkg/peertls/tlsopts" "storj.io/storj/pkg/peertls/tlsopts"
"storj.io/storj/pkg/piecestore/psserver"
"storj.io/storj/pkg/server" "storj.io/storj/pkg/server"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
"storj.io/storj/satellite" "storj.io/storj/satellite"
@ -593,7 +592,7 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatelliteIDs []strin
Wallet: "0x" + strings.Repeat("00", 20), Wallet: "0x" + strings.Repeat("00", 20),
}, },
}, },
Storage: psserver.Config{ Storage: piecestore.OldConfig{
Path: "", // TODO: this argument won't be needed with master storagenodedb Path: "", // TODO: this argument won't be needed with master storagenodedb
AllocatedDiskSpace: 1500 * memory.GB, AllocatedDiskSpace: 1500 * memory.GB,
AllocatedBandwidth: memory.TB, AllocatedBandwidth: memory.TB,

View File

@ -1,153 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package agreementsender
import (
"io"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
)
var (
// ASError wraps errors returned from agreementsender package
ASError = errs.Class("agreement sender error")
)
// AgreementSender maintains variables required for reading bandwidth agreements from a DB and sending them to a Payers
type AgreementSender struct { // TODO: rename to service
DB *psdb.DB
log *zap.Logger
transport transport.Client
kad *kademlia.Kademlia
checkInterval time.Duration
}
// TODO: take transport instead of identity as argument
// New creates an Agreement Sender
func New(log *zap.Logger, DB *psdb.DB, tc transport.Client, kad *kademlia.Kademlia, checkInterval time.Duration) *AgreementSender {
return &AgreementSender{DB: DB, log: log, transport: tc, kad: kad, checkInterval: checkInterval}
}
// Run the agreement sender with a context to check for cancel
func (as *AgreementSender) Run(ctx context.Context) error {
//todo: we likely don't want to stop on err, but consider returning errors via a channel
ticker := time.NewTicker(as.checkInterval)
defer ticker.Stop()
for {
as.log.Debug("is running", zap.Duration("duration", as.checkInterval))
agreementGroups, err := as.DB.GetBandwidthAllocations()
if err != nil {
as.log.Error("could not retrieve bandwidth allocations", zap.Error(err))
continue
}
if len(agreementGroups) > 0 {
var group errgroup.Group
// send agreement payouts
for satellite, agreements := range agreementGroups {
satellite, agreements := satellite, agreements
group.Go(func() error {
timedCtx, cancel := context.WithTimeout(ctx, time.Hour)
defer cancel()
as.SettleAgreements(timedCtx, satellite, agreements)
return nil
})
}
_ = group.Wait() // doesn't return errors
}
// Delete older payout irrespective of its status
if err = as.DB.DeleteBandwidthAllocationPayouts(); err != nil {
as.log.Error("failed to delete bandwidth allocation", zap.Error(err))
}
select {
case <-ticker.C:
case <-ctx.Done():
return ctx.Err()
}
}
}
// SettleAgreements uploads agreements to the satellite
func (as *AgreementSender) SettleAgreements(ctx context.Context, satelliteID storj.NodeID, agreements []*psdb.Agreement) {
as.log.Info("sending agreements to satellite", zap.Int("number of agreements", len(agreements)), zap.String("satellite id", satelliteID.String()))
satellite, err := as.kad.FindNode(ctx, satelliteID)
if err != nil {
as.log.Warn("could not find satellite", zap.String("satellite id", satelliteID.String()), zap.Error(err))
return
}
conn, err := as.transport.DialNode(ctx, &satellite)
if err != nil {
as.log.Warn("could not dial satellite", zap.String("satellite id", satelliteID.String()), zap.Error(err))
return
}
defer func() {
if err := conn.Close(); err != nil {
as.log.Warn("failed to close connection", zap.String("satellite id", satelliteID.String()), zap.Error(err))
}
}()
client, err := pb.NewBandwidthClient(conn).Settlement(ctx)
if err != nil {
as.log.Error("failed to start settlement", zap.String("satellite id", satelliteID.String()), zap.Error(err))
return
}
var group errgroup.Group
group.Go(func() error {
for _, agreement := range agreements {
err := client.Send(&pb.BandwidthSettlementRequest{
Allocation: &agreement.Agreement,
})
if err != nil {
return err
}
}
return client.CloseSend()
})
for {
response, err := client.Recv()
if err != nil {
if err == io.EOF {
break
}
as.log.Error("failed to recv response", zap.String("satellite id", satelliteID.String()), zap.Error(err))
break
}
switch response.Status {
case pb.AgreementsSummary_REJECTED:
err = as.DB.UpdateBandwidthAllocationStatus(response.SerialNumber, psdb.AgreementStatusReject)
if err != nil {
as.log.Error("error", zap.String("satellite id", satelliteID.String()), zap.Error(err))
}
case pb.AgreementsSummary_OK:
err = as.DB.UpdateBandwidthAllocationStatus(response.SerialNumber, psdb.AgreementStatusSent)
if err != nil {
as.log.Error("error", zap.String("satellite id", satelliteID.String()), zap.Error(err))
}
default:
as.log.Error("unexpected response", zap.String("satellite id", satelliteID.String()), zap.Error(err))
}
}
if err := group.Wait(); err != nil {
as.log.Error("sending agreements returned an error", zap.String("satellite id", satelliteID.String()), zap.Error(err))
}
}

View File

@ -1,24 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package psserver
import (
"time"
"storj.io/storj/internal/memory"
)
// Config contains everything necessary for a server
type Config struct {
Path string `help:"path to store data in" default:"$CONFDIR/storage"`
WhitelistedSatelliteIDs string `help:"a comma-separated list of approved satellite node ids" default:""`
SatelliteIDRestriction bool `help:"if true, only allow data from approved satellites" devDefault:"false" releaseDefault:"true"`
AllocatedDiskSpace memory.Size `user:"true" help:"total allocated disk space in bytes" default:"1TB"`
AllocatedBandwidth memory.Size `user:"true" help:"total allocated bandwidth in bytes" default:"500GiB"`
KBucketRefreshInterval time.Duration `help:"how frequently Kademlia bucket should be refreshed with node stats" default:"1h0m0s"`
AgreementSenderCheckInterval time.Duration `help:"duration between agreement checks" default:"1h0m0s"`
CollectorInterval time.Duration `help:"interval to check for expired pieces" default:"1h0m0s"`
}

View File

@ -1,142 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package psdb_test
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/storj/internal/dbutil/dbschema"
"storj.io/storj/internal/dbutil/sqliteutil"
"storj.io/storj/pkg/piecestore/psserver/psdb"
)
// loadSnapshots loads all the dbschemas from testdata/db.* caching the result
func loadSnapshots() (*dbschema.Snapshots, error) {
snapshots := &dbschema.Snapshots{}
// snapshot represents clean DB state
snapshots.Add(&dbschema.Snapshot{
Version: -1,
Schema: &dbschema.Schema{},
Script: "",
})
// find all sql files
matches, err := filepath.Glob("testdata/sqlite.*")
if err != nil {
return nil, err
}
for _, match := range matches {
versionStr := match[17 : len(match)-4] // hack to avoid trim issues with path differences in windows/linux
version, err := strconv.Atoi(versionStr)
if err != nil {
return nil, err
}
scriptData, err := ioutil.ReadFile(match)
if err != nil {
return nil, err
}
snapshot, err := sqliteutil.LoadSnapshotFromSQL(string(scriptData))
if err != nil {
return nil, err
}
snapshot.Version = version
snapshots.Add(snapshot)
}
snapshots.Sort()
return snapshots, nil
}
const newDataSeparator = `-- NEW DATA --`
func newData(snap *dbschema.Snapshot) string {
tokens := strings.SplitN(snap.Script, newDataSeparator, 2)
if len(tokens) != 2 {
return ""
}
return tokens[1]
}
const (
minBaseVersion = -1 // clean DB
maxBaseVersion = 0
)
func TestMigrate(t *testing.T) {
snapshots, err := loadSnapshots()
require.NoError(t, err)
for _, base := range snapshots.List {
if base.Version < minBaseVersion || maxBaseVersion < base.Version {
continue
}
t.Run(strconv.Itoa(base.Version), func(t *testing.T) {
log := zaptest.NewLogger(t)
// create a new satellitedb connection
db, err := psdb.OpenInMemory()
require.NoError(t, err)
defer func() { require.NoError(t, db.Close()) }()
// insert the base data into sqlite
_, err = db.RawDB().Exec(base.Script)
require.NoError(t, err)
// get migration for this database
migrations := db.Migration()
for i, step := range migrations.Steps {
// the schema is different when migration step is before the step, cannot test the layout
if step.Version < base.Version {
continue
}
tag := fmt.Sprintf("#%d - v%d", i, step.Version)
// run migration up to a specific version
err := migrations.TargetVersion(step.Version).Run(log.Named("migrate"), db)
require.NoError(t, err, tag)
// find the matching expected version
expected, ok := snapshots.FindVersion(step.Version)
require.True(t, ok)
// insert data for new tables
if newdata := newData(expected); newdata != "" && step.Version > base.Version {
_, err = db.RawDB().Exec(newdata)
require.NoError(t, err, tag)
}
// load schema from database
currentSchema, err := sqliteutil.QuerySchema(db.RawDB())
require.NoError(t, err, tag)
// we don't care changes in versions table
currentSchema.DropTable("versions")
// load data from database
currentData, err := sqliteutil.QueryData(db.RawDB(), currentSchema)
require.NoError(t, err, tag)
// verify schema and data
require.Equal(t, expected.Schema, currentSchema, tag)
require.Equal(t, expected.Data, currentData, tag)
}
})
}
}

View File

@ -1,465 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package psdb
import (
"database/sql"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"github.com/gogo/protobuf/proto"
_ "github.com/mattn/go-sqlite3" // register sqlite to sql
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/internal/migrate"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
var (
// Error is the default psdb errs class
Error = errs.Class("psdb")
)
// AgreementStatus keep tracks of the agreement payout status
type AgreementStatus int32
const (
// AgreementStatusUnsent sets the agreement status to UNSENT
AgreementStatusUnsent = iota
// AgreementStatusSent sets the agreement status to SENT
AgreementStatusSent
// AgreementStatusReject sets the agreement status to REJECT
AgreementStatusReject
// add new status here ...
)
// DB is a piece store database
type DB struct {
mu sync.Mutex
db *sql.DB
dbPath string
}
// Agreement is a struct that contains a bandwidth agreement and the associated signature
type Agreement struct {
Agreement pb.Order
Signature []byte
}
// Open opens DB at DBPath
func Open(DBPath string) (db *DB, err error) {
if err = os.MkdirAll(filepath.Dir(DBPath), 0700); err != nil {
return nil, err
}
sqlite, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?%s", DBPath, "_journal=WAL"))
if err != nil {
return nil, Error.Wrap(err)
}
db = &DB{
db: sqlite,
dbPath: DBPath,
}
return db, nil
}
// OpenInMemory opens sqlite DB inmemory
func OpenInMemory() (db *DB, err error) {
sqlite, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return nil, err
}
db = &DB{
db: sqlite,
}
return db, nil
}
// Migration define piecestore DB migration
func (db *DB) Migration() *migrate.Migration {
migration := &migrate.Migration{
Table: "versions",
Steps: []*migrate.Step{
{
Description: "Initial setup",
Version: 0,
Action: migrate.SQL{
`CREATE TABLE IF NOT EXISTS ttl (
id BLOB UNIQUE,
created INT(10),
expires INT(10),
size INT(10)
)`,
`CREATE TABLE IF NOT EXISTS bandwidth_agreements (
satellite BLOB,
agreement BLOB,
signature BLOB
)`,
`CREATE INDEX IF NOT EXISTS idx_ttl_expires ON ttl (
expires
)`,
`CREATE TABLE IF NOT EXISTS bwusagetbl (
size INT(10),
daystartdate INT(10),
dayenddate INT(10)
)`,
},
},
{
Description: "Extending bandwidth_agreements table and drop bwusagetbl",
Version: 1,
Action: migrate.Func(func(log *zap.Logger, db migrate.DB, tx *sql.Tx) error {
v1sql := migrate.SQL{
`ALTER TABLE bandwidth_agreements ADD COLUMN uplink BLOB`,
`ALTER TABLE bandwidth_agreements ADD COLUMN serial_num BLOB`,
`ALTER TABLE bandwidth_agreements ADD COLUMN total INT(10)`,
`ALTER TABLE bandwidth_agreements ADD COLUMN max_size INT(10)`,
`ALTER TABLE bandwidth_agreements ADD COLUMN created_utc_sec INT(10)`,
`ALTER TABLE bandwidth_agreements ADD COLUMN expiration_utc_sec INT(10)`,
`ALTER TABLE bandwidth_agreements ADD COLUMN action INT(10)`,
`ALTER TABLE bandwidth_agreements ADD COLUMN daystart_utc_sec INT(10)`,
}
err := v1sql.Run(log, db, tx)
if err != nil {
return err
}
// iterate through the table and fill
err = func() error {
rows, err := tx.Query(`SELECT agreement, signature FROM bandwidth_agreements`)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var rbaBytes, signature []byte
rba := &pb.RenterBandwidthAllocation{}
err := rows.Scan(&rbaBytes, &signature)
if err != nil {
return err
}
// unmarshal the rbaBytes
err = proto.Unmarshal(rbaBytes, rba)
if err != nil {
return err
}
// update the new columns data
t := time.Unix(rba.PayerAllocation.CreatedUnixSec, 0)
startofthedayUnixSec := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC).Unix()
// update the row by signature as it is unique
_, err = tx.Exec(`UPDATE bandwidth_agreements SET
uplink = ?,
serial_num = ?,
total = ?,
max_size = ?,
created_utc_sec = ?,
expiration_utc_sec = ?,
action = ?,
daystart_utc_sec = ?
WHERE signature = ?
`,
rba.PayerAllocation.UplinkId.Bytes(), rba.PayerAllocation.SerialNumber,
rba.Total, rba.PayerAllocation.MaxSize, rba.PayerAllocation.CreatedUnixSec,
rba.PayerAllocation.ExpirationUnixSec, rba.PayerAllocation.GetAction(),
startofthedayUnixSec, signature)
if err != nil {
return err
}
}
return rows.Err()
}()
if err != nil {
return err
}
_, err = tx.Exec(`DROP TABLE bwusagetbl;`)
if err != nil {
return err
}
return nil
}),
},
{
Description: "Add status column for bandwidth_agreements",
Version: 2,
Action: migrate.SQL{
`ALTER TABLE bandwidth_agreements ADD COLUMN status INT(10) DEFAULT 0`,
},
},
{
Description: "Add index on serial number for bandwidth_agreements",
Version: 3,
Action: migrate.SQL{
`CREATE INDEX IF NOT EXISTS idx_bwa_serial ON bandwidth_agreements (serial_num)`,
},
},
{
Description: "Initiate Network reset",
Version: 4,
Action: migrate.SQL{
`UPDATE ttl SET expires = 1553727600 WHERE created <= 1553727600 `,
},
},
{
Description: "delete obsolete pieces",
Version: 5,
Action: migrate.Func(func(log *zap.Logger, mdb migrate.DB, tx *sql.Tx) error {
path := db.dbPath
if path == "" {
log.Warn("Empty path")
return nil
}
err := db.DeleteObsolete(path)
if err != nil {
log.Warn("err deleting obsolete paths: ", zap.Error(err))
}
return nil
}),
},
},
}
return migration
}
// Close the database
func (db *DB) Close() error {
return db.db.Close()
}
func (db *DB) locked() func() {
db.mu.Lock()
return db.mu.Unlock
}
// DeleteObsolete deletes obsolete pieces
func (db *DB) DeleteObsolete(path string) (err error) {
path = filepath.Dir(path)
files, err := ioutil.ReadDir(path)
if err != nil {
return err
}
var errList errs.Group
// iterate thru files list
for _, f := range files {
if len(f.Name()) == 2 {
errList.Add(os.RemoveAll(filepath.Join(path, f.Name())))
}
}
return errList.Err()
}
// WriteBandwidthAllocToDB inserts bandwidth agreement into DB
func (db *DB) WriteBandwidthAllocToDB(rba *pb.Order) error {
rbaBytes, err := proto.Marshal(rba)
if err != nil {
return err
}
defer db.locked()()
// We begin extracting the satellite_id
// The satellite id can be used to sort the bandwidth agreements
// If the agreements are sorted we can send them in bulk streams to the satellite
t := time.Now()
startofthedayunixsec := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()).Unix()
_, err = db.db.Exec(`INSERT INTO bandwidth_agreements (satellite, agreement, signature, uplink, serial_num, total, max_size, created_utc_sec, status, expiration_utc_sec, action, daystart_utc_sec) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
rba.PayerAllocation.SatelliteId.Bytes(), rbaBytes, rba.GetSignature(),
rba.PayerAllocation.UplinkId.Bytes(), rba.PayerAllocation.SerialNumber,
rba.Total, rba.PayerAllocation.MaxSize, rba.PayerAllocation.CreatedUnixSec, AgreementStatusUnsent,
rba.PayerAllocation.ExpirationUnixSec, rba.PayerAllocation.GetAction().String(),
startofthedayunixsec)
return err
}
// DeleteBandwidthAllocationPayouts delete paid and/or old payout enteries based on days old
func (db *DB) DeleteBandwidthAllocationPayouts() error {
defer db.locked()()
//@TODO make a config value for older days
t := time.Now().Add(time.Hour * 24 * -90).Unix()
_, err := db.db.Exec(`DELETE FROM bandwidth_agreements WHERE created_utc_sec < ?`, t)
if err == sql.ErrNoRows {
err = nil
}
return err
}
// UpdateBandwidthAllocationStatus update the bwa payout status
func (db *DB) UpdateBandwidthAllocationStatus(serialnum string, status AgreementStatus) (err error) {
defer db.locked()()
_, err = db.db.Exec(`UPDATE bandwidth_agreements SET status = ? WHERE serial_num = ?`, status, serialnum)
return err
}
// DeleteBandwidthAllocationBySerialnum finds an allocation by signature and deletes it
func (db *DB) DeleteBandwidthAllocationBySerialnum(serialnum string) error {
defer db.locked()()
_, err := db.db.Exec(`DELETE FROM bandwidth_agreements WHERE serial_num=?`, serialnum)
if err == sql.ErrNoRows {
err = nil
}
return err
}
// GetBandwidthAllocationBySignature finds allocation info by signature
func (db *DB) GetBandwidthAllocationBySignature(signature []byte) ([]*pb.Order, error) {
defer db.locked()()
rows, err := db.db.Query(`SELECT agreement FROM bandwidth_agreements WHERE signature = ?`, signature)
if err != nil {
return nil, err
}
defer func() {
if closeErr := rows.Close(); closeErr != nil {
zap.S().Errorf("failed to close rows when selecting from bandwidth_agreements: %+v", closeErr)
}
}()
agreements := []*pb.Order{}
for rows.Next() {
var rbaBytes []byte
err := rows.Scan(&rbaBytes)
if err != nil {
return agreements, err
}
rba := &pb.Order{}
err = proto.Unmarshal(rbaBytes, rba)
if err != nil {
return agreements, err
}
agreements = append(agreements, rba)
}
return agreements, nil
}
// GetBandwidthAllocations all bandwidth agreements
func (db *DB) GetBandwidthAllocations() (map[storj.NodeID][]*Agreement, error) {
defer db.locked()()
rows, err := db.db.Query(`SELECT satellite, agreement FROM bandwidth_agreements WHERE status = ?`, AgreementStatusUnsent)
if err != nil {
return nil, err
}
defer func() {
if closeErr := rows.Close(); closeErr != nil {
zap.S().Errorf("failed to close rows when selecting from bandwidth_agreements: %+v", closeErr)
}
}()
agreements := make(map[storj.NodeID][]*Agreement)
for rows.Next() {
rbaBytes := []byte{}
agreement := &Agreement{}
var satellite []byte
err := rows.Scan(&satellite, &rbaBytes)
if err != nil {
return agreements, err
}
err = proto.Unmarshal(rbaBytes, &agreement.Agreement)
if err != nil {
return agreements, err
}
satelliteID, err := storj.NodeIDFromBytes(satellite)
if err != nil {
return nil, err
}
agreements[satelliteID] = append(agreements[satelliteID], agreement)
}
return agreements, nil
}
// GetBwaStatusBySerialNum get BWA status by serial num
func (db *DB) GetBwaStatusBySerialNum(serialnum string) (status AgreementStatus, err error) {
defer db.locked()()
err = db.db.QueryRow(`SELECT status FROM bandwidth_agreements WHERE serial_num=?`, serialnum).Scan(&status)
return status, err
}
// AddTTL adds TTL into database by id
func (db *DB) AddTTL(id string, expiration, size int64) error {
defer db.locked()()
created := time.Now().Unix()
_, err := db.db.Exec("INSERT OR REPLACE INTO ttl (id, created, expires, size) VALUES (?, ?, ?, ?)", id, created, expiration, size)
return err
}
// GetTTLByID finds the TTL in the database by id and return it
func (db *DB) GetTTLByID(id string) (expiration int64, err error) {
defer db.locked()()
err = db.db.QueryRow(`SELECT expires FROM ttl WHERE id=?`, id).Scan(&expiration)
return expiration, err
}
// SumTTLSizes sums the size column on the ttl table
func (db *DB) SumTTLSizes() (int64, error) {
defer db.locked()()
var sum *int64
err := db.db.QueryRow(`SELECT SUM(size) FROM ttl;`).Scan(&sum)
if err == sql.ErrNoRows || sum == nil {
return 0, nil
}
return *sum, err
}
// DeleteTTLByID finds the TTL in the database by id and delete it
func (db *DB) DeleteTTLByID(id string) error {
defer db.locked()()
_, err := db.db.Exec(`DELETE FROM ttl WHERE id=?`, id)
if err == sql.ErrNoRows {
err = nil
}
return err
}
// GetBandwidthUsedByDay finds the so far bw used by day and return it
func (db *DB) GetBandwidthUsedByDay(t time.Time) (size int64, err error) {
return db.GetTotalBandwidthBetween(t, t)
}
// GetTotalBandwidthBetween each row in the bwusagetbl contains the total bw used per day
func (db *DB) GetTotalBandwidthBetween(startdate time.Time, enddate time.Time) (int64, error) {
defer db.locked()()
startTimeUnix := time.Date(startdate.Year(), startdate.Month(), startdate.Day(), 0, 0, 0, 0, startdate.Location()).Unix()
endTimeUnix := time.Date(enddate.Year(), enddate.Month(), enddate.Day(), 24, 0, 0, 0, enddate.Location()).Unix()
defaultunixtime := time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day(), 0, 0, 0, 0, time.Now().Location()).Unix()
if (endTimeUnix < startTimeUnix) && (startTimeUnix > defaultunixtime || endTimeUnix > defaultunixtime) {
return 0, errors.New("Invalid date range")
}
var totalUsage *int64
err := db.db.QueryRow(`SELECT SUM(total) FROM bandwidth_agreements WHERE daystart_utc_sec BETWEEN ? AND ?`, startTimeUnix, endTimeUnix).Scan(&totalUsage)
if err == sql.ErrNoRows || totalUsage == nil {
return 0, nil
}
return *totalUsage, err
}
// RawDB returns access to the raw database, only for migration tests.
func (db *DB) RawDB() *sql.DB { return db.db }
// Begin begins transaction
func (db *DB) Begin() (*sql.Tx, error) { return db.db.Begin() }
// Rebind rebind parameters
func (db *DB) Rebind(s string) string { return s }
// Schema returns schema
func (db *DB) Schema() string { return "" }

View File

@ -1,392 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package psdb_test
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/storj"
)
const concurrency = 10
func newDB(t testing.TB, id string) (*psdb.DB, string, func()) {
tmpdir, err := ioutil.TempDir("", "storj-psdb-"+id)
require.NoError(t, err)
dbpath := filepath.Join(tmpdir, "psdb.db")
db, err := psdb.Open(dbpath)
require.NoError(t, err)
err = db.Migration().Run(zaptest.NewLogger(t), db)
require.NoError(t, err)
return db, dbpath, func() {
err := db.Close()
require.NoError(t, err)
err = os.RemoveAll(tmpdir)
require.NoError(t, err)
}
}
func TestNewInmemory(t *testing.T) {
db, err := psdb.OpenInMemory()
if err != nil {
t.Fatal(err)
}
if err := db.Close(); err != nil {
t.Fatal(err)
}
}
func TestHappyPath(t *testing.T) {
db, dbPath, cleanup := newDB(t, "1")
defer cleanup()
type TTL struct {
ID string
Expiration int64
}
tests := []TTL{
{ID: "", Expiration: 0},
{ID: "\x00", Expiration: ^int64(0)},
{ID: "test", Expiration: 666},
}
bandwidthAllocation := func(serialnum, signature string, satelliteID storj.NodeID, total int64) *pb.Order {
return &pb.Order{
PayerAllocation: pb.OrderLimit{SatelliteId: satelliteID, SerialNumber: serialnum},
Total: total,
Signature: []byte(signature),
}
}
//TODO: use better data
nodeIDAB := teststorj.NodeIDFromString("AB")
allocationTests := []*pb.Order{
bandwidthAllocation("serialnum_1", "signed by test", nodeIDAB, 0),
bandwidthAllocation("serialnum_2", "signed by sigma", nodeIDAB, 10),
bandwidthAllocation("serialnum_3", "signed by sigma", nodeIDAB, 98),
bandwidthAllocation("serialnum_4", "signed by test", nodeIDAB, 3),
}
type bwUsage struct {
size int64
timenow time.Time
}
bwtests := []bwUsage{
// size is total size stored
{size: 1110, timenow: time.Now()},
}
t.Run("Empty", func(t *testing.T) {
t.Run("Bandwidth Allocation", func(t *testing.T) {
for _, test := range allocationTests {
agreements, err := db.GetBandwidthAllocationBySignature(test.Signature)
require.Len(t, agreements, 0)
require.NoError(t, err)
}
})
t.Run("Get all Bandwidth Allocations", func(t *testing.T) {
agreementGroups, err := db.GetBandwidthAllocations()
require.Len(t, agreementGroups, 0)
require.NoError(t, err)
})
t.Run("GetBandwidthUsedByDay", func(t *testing.T) {
for _, bw := range bwtests {
size, err := db.GetBandwidthUsedByDay(bw.timenow)
require.NoError(t, err)
require.Equal(t, int64(0), size)
}
})
t.Run("GetTotalBandwidthBetween", func(t *testing.T) {
for _, bw := range bwtests {
size, err := db.GetTotalBandwidthBetween(bw.timenow, bw.timenow)
require.NoError(t, err)
require.Equal(t, int64(0), size)
}
})
})
t.Run("Create", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, ttl := range tests {
err := db.AddTTL(ttl.ID, ttl.Expiration, 0)
if err != nil {
t.Fatal(err)
}
}
})
}
})
t.Run("Get", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, ttl := range tests {
expiration, err := db.GetTTLByID(ttl.ID)
if err != nil {
t.Fatal(err)
}
if ttl.Expiration != expiration {
t.Fatalf("expected %d got %d", ttl.Expiration, expiration)
}
}
})
}
})
t.Run("Delete", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("Delete", func(t *testing.T) {
t.Parallel()
for _, ttl := range tests {
err := db.DeleteTTLByID(ttl.ID)
if err != nil {
t.Fatal(err)
}
}
})
}
})
t.Run("Get Deleted", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, ttl := range tests {
expiration, err := db.GetTTLByID(ttl.ID)
if err == nil {
t.Fatal(err)
}
if expiration != 0 {
t.Fatalf("expected expiration 0 got %d", expiration)
}
}
})
}
})
t.Run("Bandwidth Allocation", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, test := range allocationTests {
err := db.WriteBandwidthAllocToDB(test)
if err != nil {
t.Fatal(err)
}
agreements, err := db.GetBandwidthAllocationBySignature(test.Signature)
if err != nil {
t.Fatal(err)
}
found := false
for _, agreement := range agreements {
if pb.Equal(agreement, test) {
found = true
break
}
}
if !found {
t.Fatal("did not find added bandwidth allocation")
}
}
})
}
})
t.Run("Get all Bandwidth Allocations", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
agreementGroups, err := db.GetBandwidthAllocations()
if err != nil {
t.Fatal(err)
}
found := false
for _, agreements := range agreementGroups {
for _, agreement := range agreements {
for _, test := range allocationTests {
if pb.Equal(&agreement.Agreement, test) {
found = true
break
}
}
}
}
if !found {
t.Fatal("did not find added bandwidth allocation")
}
})
}
})
t.Run("GetBandwidthUsedByDay", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, bw := range bwtests {
size, err := db.GetBandwidthUsedByDay(bw.timenow)
if err != nil {
t.Fatal(err)
}
if bw.size != size {
t.Fatalf("expected %d got %d", bw.size, size)
}
}
})
}
})
t.Run("GetTotalBandwidthBetween", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, bw := range bwtests {
size, err := db.GetTotalBandwidthBetween(bw.timenow, bw.timenow)
if err != nil {
t.Fatal(err)
}
if bw.size != size {
t.Fatalf("expected %d got %d", bw.size, size)
}
}
})
}
})
type bwaUsage struct {
serialnum string
status psdb.AgreementStatus
}
bwatests := []bwaUsage{
{serialnum: "serialnum_1", status: psdb.AgreementStatusUnsent},
{serialnum: "serialnum_2", status: psdb.AgreementStatusReject},
{serialnum: "serialnum_3", status: psdb.AgreementStatusSent},
}
t.Run("UpdateBandwidthAllocationStatus", func(t *testing.T) {
for P := 0; P < concurrency; P++ {
t.Run("#"+strconv.Itoa(P), func(t *testing.T) {
t.Parallel()
for _, bw := range bwatests {
err := db.UpdateBandwidthAllocationStatus(bw.serialnum, bw.status)
require.NoError(t, err)
status, err := db.GetBwaStatusBySerialNum(bw.serialnum)
require.NoError(t, err)
assert.Equal(t, bw.status, status)
}
})
}
})
// mocked of storage directory, add here new files/directory ...
// to create a directory prefix with 'D' and for file 'F'
storagenodeDir := []string{"Dblob", "Finfo.db", "Finfo.db-shm", "Finfo.db-wal", "Fpiecestore.db", "Fpiecestore.db-shm", "Fpiecestore.db-wal", "Dtmp", "Dtrash"}
t.Run("DeleteObsolete", func(t *testing.T) {
//mock the storagenode's storage directory creation
for _, d := range storagenodeDir {
switch d[0] {
// create a mock directory
case 'D':
dir := filepath.Join(filepath.Dir(dbPath), d[1:])
err := os.MkdirAll(dir, os.ModePerm)
require.NoError(t, err)
// create a mock file
case 'F':
file := filepath.Join(filepath.Dir(dbPath))
fmt.Println("file=", file)
message := []byte("Hello, Gophers!")
tmpfile := filepath.Join(file, d[1:])
err := ioutil.WriteFile(tmpfile, message, 0644)
require.NoError(t, err)
default:
t.Fatalf("shouldnt ever come here")
}
}
for i := 0; i < 10; i++ {
subdir := filepath.Join(filepath.Dir(dbPath), stringWithCharset(2))
subsubdir := filepath.Join(subdir, stringWithCharset(2))
err := os.MkdirAll(subsubdir, os.ModePerm)
require.NoError(t, err)
message := []byte("Undisputedly secure Storj's piece data !")
tmpfile := filepath.Join(subsubdir, stringWithCharset(20))
err = ioutil.WriteFile(tmpfile, message, 0644)
require.NoError(t, err)
}
err := db.DeleteObsolete(dbPath)
assert.NoError(t, err)
// verify all the 2letter directories are removed
path := filepath.Dir(dbPath)
files, err := ioutil.ReadDir(path)
require.NoError(t, err)
for _, f := range files {
if info, err := os.Stat(filepath.Join(path, f.Name())); err == nil && info.IsDir() && len(f.Name()) == 2 {
t.Fatalf("obsolete files not cleaneup")
}
}
})
}
func stringWithCharset(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
func BenchmarkWriteBandwidthAllocation(b *testing.B) {
db, _, cleanup := newDB(b, "3")
defer cleanup()
const WritesPerLoop = 10
b.RunParallel(func(b *testing.PB) {
for b.Next() {
for i := 0; i < WritesPerLoop; i++ {
_ = db.WriteBandwidthAllocToDB(&pb.Order{
PayerAllocation: pb.OrderLimit{},
Total: 156,
Signature: []byte("signed by test"),
})
}
}
})
}

View File

@ -1,14 +0,0 @@
CREATE TABLE `ttl` (`id` BLOB UNIQUE, `created` INT(10), `expires` INT(10), `size` INT(10));
CREATE TABLE `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB);
CREATE TABLE `bwusagetbl` (`size` INT(10), `daystartdate` INT(10), `dayenddate` INT(10));
CREATE INDEX idx_ttl_expires ON ttl (expires);
-- NEW DATA --
INSERT INTO ttl VALUES(1,2,3,4);
INSERT INTO bandwidth_agreements VALUES (
X'0fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500',
X'0a8a070a200fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500122018964640433f24595097466bd08d35bdd1ddf5082f75a821613a6c8b445e0000208fbda2e5052a2430343039363865662d653435342d343432342d616131662d336437333635666365393031388f96b5e30542e6023082016230820108a003020102021100b8be6e42d5b98e0757cda2fa5088e3dd300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004e94b91d488fd492e0097199632ce29dc1e0ada0edd03387d63b5f64e3108519eaaa391cafe0e74a5ae0c39f3056a630a57d55e05c2b160461541aa2bdf1f5978a33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d040302034800304502202ed2c4bdfab3a56cd7694e0e67cadaa48d2e6ae3a87dbd4e310395190374e3a0022100eba5c24ed07e4e539948bbd6bdcdc6000633a95373380e6bddc23b2e43f4288142e0023082015c30820101a003020102021100c387ca34d032179981de0f1c642ab820300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004d5c4e88ea884c28d98ed162f9c43d6e056e83a6d9315dc08b2a63e5416867a1e3a2c2f670e4a47e5b6cef6b9df9c0eb46debbf7a70557065479da617ce50b1aaa3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d0403020349003046022100a078a46470e233cd315329725435357fc93f778bb85cdab00a2178660fabf48f022100ed4d90f661ccb7abb7c996626179707c00497957c75dfb8db1ed17d2d80b4cf04a46304402203288c3a91734196d64dbb44390204899e7fb46fb569910665ce7ef924aadf2b502203565082f9b370c1cf89e854fece05033f9ab6e0d3d6dc08b6defb729ea6574301080f8a2011a20555937661ba23a14e4f32e69ba8b8d503f046bc94f2622b50ceae5327bf1f90022e6023082016230820107a00302010202104edd0ae5652847fb1293a10892c25f24300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d030107034200041ed2af8a599e9ed6ea5c85c92a09a42b4530fd1fda0ef481d891ae08c9cea684046073f57f50ee23c1def0ca9158ba470a0f445ffe99d977a60f6e6fc6f6a57aa33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d04030203490030460221009585d52e9e12223b11153d8d187349f1f591f6acad41594635e51d359f00d39e0221009a11bd9e81d94a4ee2c3c7adbe362df1c47d3ef4f8ba11d94971f9d4eb59eb1b22df023082015b30820100a00302010202106861fa849f76dabd1a66a3d2a5fb4eb7300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004ea0ea4bda75425e4277a3a8f0b2fb7adc718c4bad684b082f798b1a2fc6847949719f2230a8b0ef70ea03d18acba93e4b6a3b9b664abc0383a57ec6b8c442905a3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d04030203490030460221009795c5e7a3cc6ff459cb5816cc5f96cce1894c6ab60c8df3bfe5066cd8aede22022100f61dc807be119c4248de952f5e5ab2e0cf447992749ed3cbe75f351819433e2e2a46304402207eb5bf07d082b299351e89633e82f2e9514f2982d3fddcacf2e331e28c61c95202200c1790520d2983bb768b97f0ed772262ceed5230ecd42a7ba06b7f4a32a1fdcd',
X'7369676e6174757265'
);
INSERT INTO bwusagetbl VALUES(1,3,4);

View File

@ -1,17 +0,0 @@
CREATE TABLE `ttl` (`id` BLOB UNIQUE, `created` INT(10), `expires` INT(10), `size` INT(10));
CREATE TABLE `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB, `uplink` BLOB, `serial_num` BLOB, `total` INT(10), `max_size` INT(10), `created_utc_sec` INT(10), `expiration_utc_sec` INT(10), `action` INT(10), `daystart_utc_sec` INT(10));
CREATE INDEX idx_ttl_expires ON ttl (expires);
INSERT INTO ttl VALUES(1,2,3,4);
INSERT INTO bandwidth_agreements VALUES(
X'0fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500',
X'0a8a070a200fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500122018964640433f24595097466bd08d35bdd1ddf5082f75a821613a6c8b445e0000208fbda2e5052a2430343039363865662d653435342d343432342d616131662d336437333635666365393031388f96b5e30542e6023082016230820108a003020102021100b8be6e42d5b98e0757cda2fa5088e3dd300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004e94b91d488fd492e0097199632ce29dc1e0ada0edd03387d63b5f64e3108519eaaa391cafe0e74a5ae0c39f3056a630a57d55e05c2b160461541aa2bdf1f5978a33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d040302034800304502202ed2c4bdfab3a56cd7694e0e67cadaa48d2e6ae3a87dbd4e310395190374e3a0022100eba5c24ed07e4e539948bbd6bdcdc6000633a95373380e6bddc23b2e43f4288142e0023082015c30820101a003020102021100c387ca34d032179981de0f1c642ab820300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004d5c4e88ea884c28d98ed162f9c43d6e056e83a6d9315dc08b2a63e5416867a1e3a2c2f670e4a47e5b6cef6b9df9c0eb46debbf7a70557065479da617ce50b1aaa3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d0403020349003046022100a078a46470e233cd315329725435357fc93f778bb85cdab00a2178660fabf48f022100ed4d90f661ccb7abb7c996626179707c00497957c75dfb8db1ed17d2d80b4cf04a46304402203288c3a91734196d64dbb44390204899e7fb46fb569910665ce7ef924aadf2b502203565082f9b370c1cf89e854fece05033f9ab6e0d3d6dc08b6defb729ea6574301080f8a2011a20555937661ba23a14e4f32e69ba8b8d503f046bc94f2622b50ceae5327bf1f90022e6023082016230820107a00302010202104edd0ae5652847fb1293a10892c25f24300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d030107034200041ed2af8a599e9ed6ea5c85c92a09a42b4530fd1fda0ef481d891ae08c9cea684046073f57f50ee23c1def0ca9158ba470a0f445ffe99d977a60f6e6fc6f6a57aa33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d04030203490030460221009585d52e9e12223b11153d8d187349f1f591f6acad41594635e51d359f00d39e0221009a11bd9e81d94a4ee2c3c7adbe362df1c47d3ef4f8ba11d94971f9d4eb59eb1b22df023082015b30820100a00302010202106861fa849f76dabd1a66a3d2a5fb4eb7300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004ea0ea4bda75425e4277a3a8f0b2fb7adc718c4bad684b082f798b1a2fc6847949719f2230a8b0ef70ea03d18acba93e4b6a3b9b664abc0383a57ec6b8c442905a3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d04030203490030460221009795c5e7a3cc6ff459cb5816cc5f96cce1894c6ab60c8df3bfe5066cd8aede22022100f61dc807be119c4248de952f5e5ab2e0cf447992749ed3cbe75f351819433e2e2a46304402207eb5bf07d082b299351e89633e82f2e9514f2982d3fddcacf2e331e28c61c95202200c1790520d2983bb768b97f0ed772262ceed5230ecd42a7ba06b7f4a32a1fdcd',
X'7369676e6174757265',
X'18964640433F24595097466BD08D35BDD1DDF5082F75A821613A6C8B445E0000',
'040968ef-e454-4424-aa1f-3d7365fce901',
2669568,
0,
1550666511,
1554554511,
0,
1550620800
);

View File

@ -1,18 +0,0 @@
CREATE TABLE `ttl` (`id` BLOB UNIQUE, `created` INT(10), `expires` INT(10), `size` INT(10));
CREATE TABLE `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB, `uplink` BLOB, `serial_num` BLOB, `total` INT(10), `max_size` INT(10), `created_utc_sec` INT(10), `expiration_utc_sec` INT(10), `action` INT(10), `daystart_utc_sec` INT(10), `status` INT(10) DEFAULT 0);
CREATE INDEX idx_ttl_expires ON ttl (expires);
INSERT INTO ttl VALUES(1,2,3,4);
INSERT INTO bandwidth_agreements VALUES(
X'0fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500',
X'0a8a070a200fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500122018964640433f24595097466bd08d35bdd1ddf5082f75a821613a6c8b445e0000208fbda2e5052a2430343039363865662d653435342d343432342d616131662d336437333635666365393031388f96b5e30542e6023082016230820108a003020102021100b8be6e42d5b98e0757cda2fa5088e3dd300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004e94b91d488fd492e0097199632ce29dc1e0ada0edd03387d63b5f64e3108519eaaa391cafe0e74a5ae0c39f3056a630a57d55e05c2b160461541aa2bdf1f5978a33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d040302034800304502202ed2c4bdfab3a56cd7694e0e67cadaa48d2e6ae3a87dbd4e310395190374e3a0022100eba5c24ed07e4e539948bbd6bdcdc6000633a95373380e6bddc23b2e43f4288142e0023082015c30820101a003020102021100c387ca34d032179981de0f1c642ab820300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004d5c4e88ea884c28d98ed162f9c43d6e056e83a6d9315dc08b2a63e5416867a1e3a2c2f670e4a47e5b6cef6b9df9c0eb46debbf7a70557065479da617ce50b1aaa3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d0403020349003046022100a078a46470e233cd315329725435357fc93f778bb85cdab00a2178660fabf48f022100ed4d90f661ccb7abb7c996626179707c00497957c75dfb8db1ed17d2d80b4cf04a46304402203288c3a91734196d64dbb44390204899e7fb46fb569910665ce7ef924aadf2b502203565082f9b370c1cf89e854fece05033f9ab6e0d3d6dc08b6defb729ea6574301080f8a2011a20555937661ba23a14e4f32e69ba8b8d503f046bc94f2622b50ceae5327bf1f90022e6023082016230820107a00302010202104edd0ae5652847fb1293a10892c25f24300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d030107034200041ed2af8a599e9ed6ea5c85c92a09a42b4530fd1fda0ef481d891ae08c9cea684046073f57f50ee23c1def0ca9158ba470a0f445ffe99d977a60f6e6fc6f6a57aa33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d04030203490030460221009585d52e9e12223b11153d8d187349f1f591f6acad41594635e51d359f00d39e0221009a11bd9e81d94a4ee2c3c7adbe362df1c47d3ef4f8ba11d94971f9d4eb59eb1b22df023082015b30820100a00302010202106861fa849f76dabd1a66a3d2a5fb4eb7300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004ea0ea4bda75425e4277a3a8f0b2fb7adc718c4bad684b082f798b1a2fc6847949719f2230a8b0ef70ea03d18acba93e4b6a3b9b664abc0383a57ec6b8c442905a3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d04030203490030460221009795c5e7a3cc6ff459cb5816cc5f96cce1894c6ab60c8df3bfe5066cd8aede22022100f61dc807be119c4248de952f5e5ab2e0cf447992749ed3cbe75f351819433e2e2a46304402207eb5bf07d082b299351e89633e82f2e9514f2982d3fddcacf2e331e28c61c95202200c1790520d2983bb768b97f0ed772262ceed5230ecd42a7ba06b7f4a32a1fdcd',
X'7369676e6174757265',
X'18964640433F24595097466BD08D35BDD1DDF5082F75A821613A6C8B445E0000',
'040968ef-e454-4424-aa1f-3d7365fce901',
2669568,
0,
1550666511,
1554554511,
0,
1550620800,
0
);

View File

@ -1,19 +0,0 @@
CREATE TABLE `ttl` (`id` BLOB UNIQUE, `created` INT(10), `expires` INT(10), `size` INT(10));
CREATE TABLE `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB, `uplink` BLOB, `serial_num` BLOB, `total` INT(10), `max_size` INT(10), `created_utc_sec` INT(10), `expiration_utc_sec` INT(10), `action` INT(10), `daystart_utc_sec` INT(10), `status` INT(10) DEFAULT 0);
CREATE INDEX idx_ttl_expires ON ttl (expires);
CREATE INDEX idx_bwa_serial ON bandwidth_agreements (serial_num);
INSERT INTO ttl VALUES(1,2,3,4);
INSERT INTO bandwidth_agreements VALUES(
X'0fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500',
X'0a8a070a200fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500122018964640433f24595097466bd08d35bdd1ddf5082f75a821613a6c8b445e0000208fbda2e5052a2430343039363865662d653435342d343432342d616131662d336437333635666365393031388f96b5e30542e6023082016230820108a003020102021100b8be6e42d5b98e0757cda2fa5088e3dd300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004e94b91d488fd492e0097199632ce29dc1e0ada0edd03387d63b5f64e3108519eaaa391cafe0e74a5ae0c39f3056a630a57d55e05c2b160461541aa2bdf1f5978a33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d040302034800304502202ed2c4bdfab3a56cd7694e0e67cadaa48d2e6ae3a87dbd4e310395190374e3a0022100eba5c24ed07e4e539948bbd6bdcdc6000633a95373380e6bddc23b2e43f4288142e0023082015c30820101a003020102021100c387ca34d032179981de0f1c642ab820300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004d5c4e88ea884c28d98ed162f9c43d6e056e83a6d9315dc08b2a63e5416867a1e3a2c2f670e4a47e5b6cef6b9df9c0eb46debbf7a70557065479da617ce50b1aaa3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d0403020349003046022100a078a46470e233cd315329725435357fc93f778bb85cdab00a2178660fabf48f022100ed4d90f661ccb7abb7c996626179707c00497957c75dfb8db1ed17d2d80b4cf04a46304402203288c3a91734196d64dbb44390204899e7fb46fb569910665ce7ef924aadf2b502203565082f9b370c1cf89e854fece05033f9ab6e0d3d6dc08b6defb729ea6574301080f8a2011a20555937661ba23a14e4f32e69ba8b8d503f046bc94f2622b50ceae5327bf1f90022e6023082016230820107a00302010202104edd0ae5652847fb1293a10892c25f24300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d030107034200041ed2af8a599e9ed6ea5c85c92a09a42b4530fd1fda0ef481d891ae08c9cea684046073f57f50ee23c1def0ca9158ba470a0f445ffe99d977a60f6e6fc6f6a57aa33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d04030203490030460221009585d52e9e12223b11153d8d187349f1f591f6acad41594635e51d359f00d39e0221009a11bd9e81d94a4ee2c3c7adbe362df1c47d3ef4f8ba11d94971f9d4eb59eb1b22df023082015b30820100a00302010202106861fa849f76dabd1a66a3d2a5fb4eb7300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004ea0ea4bda75425e4277a3a8f0b2fb7adc718c4bad684b082f798b1a2fc6847949719f2230a8b0ef70ea03d18acba93e4b6a3b9b664abc0383a57ec6b8c442905a3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d04030203490030460221009795c5e7a3cc6ff459cb5816cc5f96cce1894c6ab60c8df3bfe5066cd8aede22022100f61dc807be119c4248de952f5e5ab2e0cf447992749ed3cbe75f351819433e2e2a46304402207eb5bf07d082b299351e89633e82f2e9514f2982d3fddcacf2e331e28c61c95202200c1790520d2983bb768b97f0ed772262ceed5230ecd42a7ba06b7f4a32a1fdcd',
X'7369676e6174757265',
X'18964640433F24595097466BD08D35BDD1DDF5082F75A821613A6C8B445E0000',
'040968ef-e454-4424-aa1f-3d7365fce901',
2669568,
0,
1550666511,
1554554511,
0,
1550620800,
0
);

View File

@ -1,19 +0,0 @@
CREATE TABLE `ttl` (`id` BLOB UNIQUE, `created` INT(10), `expires` INT(10), `size` INT(10));
CREATE TABLE `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB, `uplink` BLOB, `serial_num` BLOB, `total` INT(10), `max_size` INT(10), `created_utc_sec` INT(10), `expiration_utc_sec` INT(10), `action` INT(10), `daystart_utc_sec` INT(10), `status` INT(10) DEFAULT 0);
CREATE INDEX idx_ttl_expires ON ttl (expires);
CREATE INDEX idx_bwa_serial ON bandwidth_agreements (serial_num);
INSERT INTO ttl VALUES(1,2,1553727600,4);
INSERT INTO bandwidth_agreements VALUES(
X'0fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500',
X'0a8a070a200fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500122018964640433f24595097466bd08d35bdd1ddf5082f75a821613a6c8b445e0000208fbda2e5052a2430343039363865662d653435342d343432342d616131662d336437333635666365393031388f96b5e30542e6023082016230820108a003020102021100b8be6e42d5b98e0757cda2fa5088e3dd300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004e94b91d488fd492e0097199632ce29dc1e0ada0edd03387d63b5f64e3108519eaaa391cafe0e74a5ae0c39f3056a630a57d55e05c2b160461541aa2bdf1f5978a33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d040302034800304502202ed2c4bdfab3a56cd7694e0e67cadaa48d2e6ae3a87dbd4e310395190374e3a0022100eba5c24ed07e4e539948bbd6bdcdc6000633a95373380e6bddc23b2e43f4288142e0023082015c30820101a003020102021100c387ca34d032179981de0f1c642ab820300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004d5c4e88ea884c28d98ed162f9c43d6e056e83a6d9315dc08b2a63e5416867a1e3a2c2f670e4a47e5b6cef6b9df9c0eb46debbf7a70557065479da617ce50b1aaa3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d0403020349003046022100a078a46470e233cd315329725435357fc93f778bb85cdab00a2178660fabf48f022100ed4d90f661ccb7abb7c996626179707c00497957c75dfb8db1ed17d2d80b4cf04a46304402203288c3a91734196d64dbb44390204899e7fb46fb569910665ce7ef924aadf2b502203565082f9b370c1cf89e854fece05033f9ab6e0d3d6dc08b6defb729ea6574301080f8a2011a20555937661ba23a14e4f32e69ba8b8d503f046bc94f2622b50ceae5327bf1f90022e6023082016230820107a00302010202104edd0ae5652847fb1293a10892c25f24300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d030107034200041ed2af8a599e9ed6ea5c85c92a09a42b4530fd1fda0ef481d891ae08c9cea684046073f57f50ee23c1def0ca9158ba470a0f445ffe99d977a60f6e6fc6f6a57aa33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d04030203490030460221009585d52e9e12223b11153d8d187349f1f591f6acad41594635e51d359f00d39e0221009a11bd9e81d94a4ee2c3c7adbe362df1c47d3ef4f8ba11d94971f9d4eb59eb1b22df023082015b30820100a00302010202106861fa849f76dabd1a66a3d2a5fb4eb7300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004ea0ea4bda75425e4277a3a8f0b2fb7adc718c4bad684b082f798b1a2fc6847949719f2230a8b0ef70ea03d18acba93e4b6a3b9b664abc0383a57ec6b8c442905a3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d04030203490030460221009795c5e7a3cc6ff459cb5816cc5f96cce1894c6ab60c8df3bfe5066cd8aede22022100f61dc807be119c4248de952f5e5ab2e0cf447992749ed3cbe75f351819433e2e2a46304402207eb5bf07d082b299351e89633e82f2e9514f2982d3fddcacf2e331e28c61c95202200c1790520d2983bb768b97f0ed772262ceed5230ecd42a7ba06b7f4a32a1fdcd',
X'7369676e6174757265',
X'18964640433F24595097466BD08D35BDD1DDF5082F75A821613A6C8B445E0000',
'040968ef-e454-4424-aa1f-3d7365fce901',
2669568,
0,
1550666511,
1554554511,
0,
1550620800,
0
);

View File

@ -1,19 +0,0 @@
CREATE TABLE `ttl` (`id` BLOB UNIQUE, `created` INT(10), `expires` INT(10), `size` INT(10));
CREATE TABLE `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB, `uplink` BLOB, `serial_num` BLOB, `total` INT(10), `max_size` INT(10), `created_utc_sec` INT(10), `expiration_utc_sec` INT(10), `action` INT(10), `daystart_utc_sec` INT(10), `status` INT(10) DEFAULT 0);
CREATE INDEX idx_ttl_expires ON ttl (expires);
CREATE INDEX idx_bwa_serial ON bandwidth_agreements (serial_num);
INSERT INTO ttl VALUES(1,2,1553727600,4);
INSERT INTO bandwidth_agreements VALUES(
X'0fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500',
X'0a8a070a200fac57151affd454b6884e2ee085097ef9581edea7ccfe6b6ba6401beac06500122018964640433f24595097466bd08d35bdd1ddf5082f75a821613a6c8b445e0000208fbda2e5052a2430343039363865662d653435342d343432342d616131662d336437333635666365393031388f96b5e30542e6023082016230820108a003020102021100b8be6e42d5b98e0757cda2fa5088e3dd300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004e94b91d488fd492e0097199632ce29dc1e0ada0edd03387d63b5f64e3108519eaaa391cafe0e74a5ae0c39f3056a630a57d55e05c2b160461541aa2bdf1f5978a33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d040302034800304502202ed2c4bdfab3a56cd7694e0e67cadaa48d2e6ae3a87dbd4e310395190374e3a0022100eba5c24ed07e4e539948bbd6bdcdc6000633a95373380e6bddc23b2e43f4288142e0023082015c30820101a003020102021100c387ca34d032179981de0f1c642ab820300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004d5c4e88ea884c28d98ed162f9c43d6e056e83a6d9315dc08b2a63e5416867a1e3a2c2f670e4a47e5b6cef6b9df9c0eb46debbf7a70557065479da617ce50b1aaa3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d0403020349003046022100a078a46470e233cd315329725435357fc93f778bb85cdab00a2178660fabf48f022100ed4d90f661ccb7abb7c996626179707c00497957c75dfb8db1ed17d2d80b4cf04a46304402203288c3a91734196d64dbb44390204899e7fb46fb569910665ce7ef924aadf2b502203565082f9b370c1cf89e854fece05033f9ab6e0d3d6dc08b6defb729ea6574301080f8a2011a20555937661ba23a14e4f32e69ba8b8d503f046bc94f2622b50ceae5327bf1f90022e6023082016230820107a00302010202104edd0ae5652847fb1293a10892c25f24300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d030107034200041ed2af8a599e9ed6ea5c85c92a09a42b4530fd1fda0ef481d891ae08c9cea684046073f57f50ee23c1def0ca9158ba470a0f445ffe99d977a60f6e6fc6f6a57aa33f303d300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff04023000300a06082a8648ce3d04030203490030460221009585d52e9e12223b11153d8d187349f1f591f6acad41594635e51d359f00d39e0221009a11bd9e81d94a4ee2c3c7adbe362df1c47d3ef4f8ba11d94971f9d4eb59eb1b22df023082015b30820100a00302010202106861fa849f76dabd1a66a3d2a5fb4eb7300a06082a8648ce3d0403023010310e300c060355040a130553746f726a3022180f30303031303130313030303030305a180f30303031303130313030303030305a3010310e300c060355040a130553746f726a3059301306072a8648ce3d020106082a8648ce3d03010703420004ea0ea4bda75425e4277a3a8f0b2fb7adc718c4bad684b082f798b1a2fc6847949719f2230a8b0ef70ea03d18acba93e4b6a3b9b664abc0383a57ec6b8c442905a3383036300e0603551d0f0101ff04040302020430130603551d25040c300a06082b06010505070301300f0603551d130101ff040530030101ff300a06082a8648ce3d04030203490030460221009795c5e7a3cc6ff459cb5816cc5f96cce1894c6ab60c8df3bfe5066cd8aede22022100f61dc807be119c4248de952f5e5ab2e0cf447992749ed3cbe75f351819433e2e2a46304402207eb5bf07d082b299351e89633e82f2e9514f2982d3fddcacf2e331e28c61c95202200c1790520d2983bb768b97f0ed772262ceed5230ecd42a7ba06b7f4a32a1fdcd',
X'7369676e6174757265',
X'18964640433F24595097466BD08D35BDD1DDF5082F75A821613A6C8B445E0000',
'040968ef-e454-4424-aa1f-3d7365fce901',
2669568,
0,
1550666511,
1554554511,
0,
1550620800,
0
);

View File

@ -15,11 +15,10 @@ import (
"storj.io/storj/pkg/kademlia" "storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/psserver"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
"storj.io/storj/storagenode/bandwidth" "storj.io/storj/storagenode/bandwidth"
"storj.io/storj/storagenode/pieces" "storj.io/storj/storagenode/pieces"
"storj.io/storj/storagenode/piecestore"
) )
var ( var (
@ -35,20 +34,18 @@ type Endpoint struct {
pieceInfo pieces.DB pieceInfo pieces.DB
kademlia *kademlia.Kademlia kademlia *kademlia.Kademlia
usageDB bandwidth.DB usageDB bandwidth.DB
psdbDB *psdb.DB // TODO remove after complete migration
startTime time.Time startTime time.Time
config psserver.Config config piecestore.OldConfig
} }
// NewEndpoint creates piecestore inspector instance // NewEndpoint creates piecestore inspector instance
func NewEndpoint(log *zap.Logger, pieceInfo pieces.DB, kademlia *kademlia.Kademlia, usageDB bandwidth.DB, psdbDB *psdb.DB, config psserver.Config) *Endpoint { func NewEndpoint(log *zap.Logger, pieceInfo pieces.DB, kademlia *kademlia.Kademlia, usageDB bandwidth.DB, config piecestore.OldConfig) *Endpoint {
return &Endpoint{ return &Endpoint{
log: log, log: log,
pieceInfo: pieceInfo, pieceInfo: pieceInfo,
kademlia: kademlia, kademlia: kademlia,
usageDB: usageDB, usageDB: usageDB,
psdbDB: psdbDB,
config: config, config: config,
startTime: time.Now(), startTime: time.Now(),
} }

View File

@ -18,9 +18,6 @@ import (
"storj.io/storj/pkg/overlay" "storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts" "storj.io/storj/pkg/peertls/tlsopts"
"storj.io/storj/pkg/piecestore/psserver"
"storj.io/storj/pkg/piecestore/psserver/agreementsender"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/server" "storj.io/storj/pkg/server"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport" "storj.io/storj/pkg/transport"
@ -50,7 +47,6 @@ type DB interface {
UsedSerials() piecestore.UsedSerials UsedSerials() piecestore.UsedSerials
// TODO: use better interfaces // TODO: use better interfaces
PSDB() *psdb.DB
RoutingTable() (kdb, ndb storage.KeyValueStore) RoutingTable() (kdb, ndb storage.KeyValueStore)
} }
@ -60,8 +56,8 @@ type Config struct {
Server server.Config Server server.Config
Kademlia kademlia.Config Kademlia kademlia.Config
Storage psserver.Config
Storage piecestore.OldConfig
Storage2 piecestore.Config Storage2 piecestore.Config
Version version.Config Version version.Config
@ -94,10 +90,6 @@ type Peer struct {
Inspector *kademlia.Inspector Inspector *kademlia.Inspector
} }
Agreements struct {
Sender *agreementsender.AgreementSender
}
Storage2 struct { Storage2 struct {
Trust *trust.Pool Trust *trust.Pool
Store *pieces.Store Store *pieces.Store
@ -189,15 +181,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config, ver
pb.RegisterKadInspectorServer(peer.Server.PrivateGRPC(), peer.Kademlia.Inspector) pb.RegisterKadInspectorServer(peer.Server.PrivateGRPC(), peer.Kademlia.Inspector)
} }
{ // agreements
config := config.Storage // TODO: separate config
peer.Agreements.Sender = agreementsender.New(
peer.Log.Named("agreements"),
peer.DB.PSDB(), peer.Transport, peer.Kademlia.Service,
config.AgreementSenderCheckInterval,
)
}
{ // setup storage 2 { // setup storage 2
trustAllSatellites := !config.Storage.SatelliteIDRestriction trustAllSatellites := !config.Storage.SatelliteIDRestriction
peer.Storage2.Trust, err = trust.NewPool(peer.Kademlia.Service, trustAllSatellites, config.Storage.WhitelistedSatelliteIDs) peer.Storage2.Trust, err = trust.NewPool(peer.Kademlia.Service, trustAllSatellites, config.Storage.WhitelistedSatelliteIDs)
@ -241,7 +224,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config, ver
peer.DB.PieceInfo(), peer.DB.PieceInfo(),
peer.Kademlia.Service, peer.Kademlia.Service,
peer.DB.Bandwidth(), peer.DB.Bandwidth(),
peer.DB.PSDB(),
config.Storage, config.Storage,
) )
pb.RegisterPieceStoreInspectorServer(peer.Server.PrivateGRPC(), peer.Storage2.Inspector) pb.RegisterPieceStoreInspectorServer(peer.Server.PrivateGRPC(), peer.Storage2.Inspector)
@ -271,9 +253,6 @@ func (peer *Peer) Run(ctx context.Context) error {
group.Go(func() error { group.Go(func() error {
return errs2.IgnoreCanceled(peer.Kademlia.Service.Run(ctx)) return errs2.IgnoreCanceled(peer.Kademlia.Service.Run(ctx))
}) })
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Agreements.Sender.Run(ctx))
})
group.Go(func() error { group.Go(func() error {
return errs2.IgnoreCanceled(peer.Storage2.Sender.Run(ctx)) return errs2.IgnoreCanceled(peer.Storage2.Sender.Run(ctx))
}) })

View File

@ -38,6 +38,20 @@ var (
) )
var _ pb.PiecestoreServer = (*Endpoint)(nil) var _ pb.PiecestoreServer = (*Endpoint)(nil)
// OldConfig contains everything necessary for a server
type OldConfig struct {
Path string `help:"path to store data in" default:"$CONFDIR/storage"`
WhitelistedSatelliteIDs string `help:"a comma-separated list of approved satellite node ids" default:""`
SatelliteIDRestriction bool `help:"if true, only allow data from approved satellites" devDefault:"false" releaseDefault:"true"`
AllocatedDiskSpace memory.Size `user:"true" help:"total allocated disk space in bytes" default:"1TB"`
AllocatedBandwidth memory.Size `user:"true" help:"total allocated bandwidth in bytes" default:"500GiB"`
KBucketRefreshInterval time.Duration `help:"how frequently Kademlia bucket should be refreshed with node stats" default:"1h0m0s"`
AgreementSenderCheckInterval time.Duration `help:"duration between agreement checks" default:"1h0m0s"`
CollectorInterval time.Duration `help:"interval to check for expired pieces" default:"1h0m0s"`
}
// Config defines parameters for piecestore endpoint. // Config defines parameters for piecestore endpoint.
type Config struct { type Config struct {
ExpirationGracePeriod time.Duration `help:"how soon before expiration date should things be considered expired" default:"48h0m0s"` ExpirationGracePeriod time.Duration `help:"how soon before expiration date should things be considered expired" default:"48h0m0s"`

View File

@ -8,7 +8,6 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/storj/pkg/kademlia" "storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/storage" "storj.io/storj/storage"
"storj.io/storj/storage/boltdb" "storj.io/storj/storage/boltdb"
"storj.io/storj/storage/filestore" "storj.io/storj/storage/filestore"
@ -32,7 +31,6 @@ type Config struct {
// DB contains access to different database tables // DB contains access to different database tables
type DB struct { type DB struct {
log *zap.Logger log *zap.Logger
psdb *psdb.DB
pieces interface { pieces interface {
storage.Blobs storage.Blobs
@ -57,11 +55,6 @@ func New(log *zap.Logger, config Config) (*DB, error) {
return nil, err return nil, err
} }
psdb, err := psdb.Open(config.Info)
if err != nil {
return nil, err
}
dbs, err := boltdb.NewShared(config.Kademlia, kademlia.KademliaBucket, kademlia.NodeBucket) dbs, err := boltdb.NewShared(config.Kademlia, kademlia.KademliaBucket, kademlia.NodeBucket)
if err != nil { if err != nil {
return nil, err return nil, err
@ -69,7 +62,6 @@ func New(log *zap.Logger, config Config) (*DB, error) {
return &DB{ return &DB{
log: log, log: log,
psdb: psdb,
pieces: pieces, pieces: pieces,
@ -94,17 +86,10 @@ func NewInMemory(log *zap.Logger, storageDir string) (*DB, error) {
return nil, err return nil, err
} }
psdb, err := psdb.OpenInMemory()
if err != nil {
return nil, err
}
return &DB{ return &DB{
log: log, log: log,
psdb: psdb,
pieces: pieces, pieces: pieces,
info: infodb, info: infodb,
kdb: teststore.New(), kdb: teststore.New(),
@ -114,17 +99,12 @@ func NewInMemory(log *zap.Logger, storageDir string) (*DB, error) {
// CreateTables creates any necessary tables. // CreateTables creates any necessary tables.
func (db *DB) CreateTables() error { func (db *DB) CreateTables() error {
migration := db.psdb.Migration() return db.info.CreateTables(db.log)
return errs.Combine(
migration.Run(db.log.Named("migration"), db.psdb),
db.info.CreateTables(db.log.Named("info")),
)
} }
// Close closes any resources. // Close closes any resources.
func (db *DB) Close() error { func (db *DB) Close() error {
return errs.Combine( return errs.Combine(
db.psdb.Close(),
db.kdb.Close(), db.kdb.Close(),
db.ndb.Close(), db.ndb.Close(),
@ -138,11 +118,6 @@ func (db *DB) Pieces() storage.Blobs {
return db.pieces return db.pieces
} }
// PSDB returns piecestore database
func (db *DB) PSDB() *psdb.DB {
return db.psdb
}
// RoutingTable returns kademlia routing table // RoutingTable returns kademlia routing table
func (db *DB) RoutingTable() (kdb, ndb storage.KeyValueStore) { func (db *DB) RoutingTable() (kdb, ndb storage.KeyValueStore) {
return db.kdb, db.ndb return db.kdb, db.ndb