2019-05-10 20:05:42 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/storj"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil"
|
|
|
|
"storj.io/private/dbutil/cockroachutil"
|
|
|
|
"storj.io/private/dbutil/pgutil"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/accounting"
|
2020-03-10 20:42:11 +00:00
|
|
|
"storj.io/storj/satellite/compensation"
|
2020-01-15 02:29:51 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/dbx"
|
2019-05-10 20:05:42 +01:00
|
|
|
)
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// StoragenodeAccounting implements the accounting/db StoragenodeAccounting interface.
|
2019-05-10 20:05:42 +01:00
|
|
|
type StoragenodeAccounting struct {
|
2019-12-14 02:29:54 +00:00
|
|
|
db *satelliteDB
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// SaveTallies records raw tallies of at rest data to the database.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *StoragenodeAccounting) SaveTallies(ctx context.Context, latestTally time.Time, nodeData map[storj.NodeID]float64) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
if len(nodeData) == 0 {
|
|
|
|
return Error.New("In SaveTallies with empty nodeData")
|
|
|
|
}
|
2020-01-15 20:35:08 +00:00
|
|
|
var nodeIDs []storj.NodeID
|
|
|
|
var totals []float64
|
|
|
|
for id, total := range nodeData {
|
|
|
|
nodeIDs = append(nodeIDs, id)
|
|
|
|
totals = append(totals, total)
|
|
|
|
}
|
|
|
|
|
2019-06-04 12:55:38 +01:00
|
|
|
err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
2020-01-15 20:35:08 +00:00
|
|
|
_, err = tx.Tx.ExecContext(ctx, db.db.Rebind(`
|
|
|
|
INSERT INTO storagenode_storage_tallies (
|
|
|
|
interval_end_time,
|
|
|
|
node_id, data_total)
|
|
|
|
SELECT
|
|
|
|
$1,
|
|
|
|
unnest($2::bytea[]), unnest($3::float8[])`),
|
|
|
|
latestTally,
|
2020-06-28 04:56:29 +01:00
|
|
|
pgutil.NodeIDArray(nodeIDs), pgutil.Float8Array(totals))
|
2020-01-15 20:35:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2019-09-12 18:31:50 +01:00
|
|
|
return tx.UpdateNoReturn_AccountingTimestamps_By_Name(ctx,
|
|
|
|
dbx.AccountingTimestamps_Name(accounting.LastAtRestTally),
|
|
|
|
dbx.AccountingTimestamps_Update_Fields{
|
|
|
|
Value: dbx.AccountingTimestamps_Value(latestTally),
|
|
|
|
},
|
|
|
|
)
|
2019-05-10 20:05:42 +01:00
|
|
|
})
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// GetTallies retrieves all raw tallies.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *StoragenodeAccounting) GetTallies(ctx context.Context) (_ []*accounting.StoragenodeStorageTally, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
raws, err := db.db.All_StoragenodeStorageTally(ctx)
|
|
|
|
out := make([]*accounting.StoragenodeStorageTally, len(raws))
|
|
|
|
for i, r := range raws {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.NodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
out[i] = &accounting.StoragenodeStorageTally{
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalEndTime: r.IntervalEndTime,
|
|
|
|
DataTotal: r.DataTotal,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// GetTalliesSince retrieves all raw tallies since latestRollup.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *StoragenodeAccounting) GetTalliesSince(ctx context.Context, latestRollup time.Time) (_ []*accounting.StoragenodeStorageTally, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
raws, err := db.db.All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx, dbx.StoragenodeStorageTally_IntervalEndTime(latestRollup))
|
|
|
|
out := make([]*accounting.StoragenodeStorageTally, len(raws))
|
|
|
|
for i, r := range raws {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.NodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
out[i] = &accounting.StoragenodeStorageTally{
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalEndTime: r.IntervalEndTime,
|
|
|
|
DataTotal: r.DataTotal,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-12-21 15:37:01 +00:00
|
|
|
func (db *StoragenodeAccounting) getNodeIdsSince(ctx context.Context, since time.Time) (nodeids [][]byte, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2022-02-22 14:41:20 +00:00
|
|
|
rows, err := db.db.QueryContext(ctx, db.db.Rebind(`SELECT DISTINCT storagenode_id FROM storagenode_bandwidth_rollups WHERE interval_start >= $1`), since)
|
2020-11-12 19:01:55 +00:00
|
|
|
if err != nil {
|
2020-11-29 19:59:27 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2020-11-12 19:01:55 +00:00
|
|
|
}
|
2020-11-29 16:13:06 +00:00
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, Error.Wrap(rows.Close()))
|
|
|
|
}()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var nodeid []byte
|
|
|
|
err := rows.Scan(&nodeid)
|
2019-05-10 20:05:42 +01:00
|
|
|
if err != nil {
|
2020-11-29 19:59:27 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2020-11-29 16:13:06 +00:00
|
|
|
nodeids = append(nodeids, nodeid)
|
2020-11-12 19:01:55 +00:00
|
|
|
}
|
2020-11-29 16:13:06 +00:00
|
|
|
err = rows.Err()
|
2020-11-12 19:01:55 +00:00
|
|
|
if err != nil {
|
2020-11-29 19:59:27 +00:00
|
|
|
return nil, Error.Wrap(rows.Err())
|
2020-11-12 19:01:55 +00:00
|
|
|
}
|
|
|
|
|
2020-11-29 19:59:27 +00:00
|
|
|
return nodeids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *StoragenodeAccounting) getBandwidthByNodeSince(ctx context.Context, latestRollup time.Time, nodeid []byte,
|
|
|
|
cb func(context.Context, *accounting.StoragenodeBandwidthRollup) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-11-29 16:13:06 +00:00
|
|
|
pageLimit := db.db.opts.ReadRollupBatchSize
|
|
|
|
if pageLimit <= 0 {
|
|
|
|
pageLimit = 10000
|
|
|
|
}
|
|
|
|
|
2020-11-29 19:59:27 +00:00
|
|
|
var cursor *dbx.Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation
|
|
|
|
for {
|
|
|
|
rollups, next, err := db.db.Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx,
|
|
|
|
dbx.StoragenodeBandwidthRollup_StoragenodeId(nodeid), dbx.StoragenodeBandwidthRollup_IntervalStart(latestRollup),
|
|
|
|
pageLimit, cursor)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
cursor = next
|
|
|
|
for _, r := range rollups {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.StoragenodeId)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
err = cb(ctx, &accounting.StoragenodeBandwidthRollup{
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalStart: r.IntervalStart,
|
|
|
|
Action: r.Action,
|
|
|
|
Settled: r.Settled,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-03-01 20:04:00 +00:00
|
|
|
if cursor == nil {
|
2020-11-29 19:59:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *StoragenodeAccounting) getBandwidthPhase2ByNodeSince(ctx context.Context, latestRollup time.Time, nodeid []byte,
|
|
|
|
cb func(context.Context, *accounting.StoragenodeBandwidthRollup) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-11-29 16:13:06 +00:00
|
|
|
|
2020-11-29 19:59:27 +00:00
|
|
|
pageLimit := db.db.opts.ReadRollupBatchSize
|
|
|
|
if pageLimit <= 0 {
|
|
|
|
pageLimit = 10000
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursor *dbx.Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation
|
|
|
|
for {
|
|
|
|
rollups, next, err := db.db.Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx,
|
|
|
|
dbx.StoragenodeBandwidthRollupPhase2_StoragenodeId(nodeid), dbx.StoragenodeBandwidthRollupPhase2_IntervalStart(latestRollup),
|
|
|
|
pageLimit, cursor)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
cursor = next
|
|
|
|
for _, r := range rollups {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.StoragenodeId)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
err = cb(ctx, &accounting.StoragenodeBandwidthRollup{
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalStart: r.IntervalStart,
|
|
|
|
Action: r.Action,
|
|
|
|
Settled: r.Settled,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-11-29 16:13:06 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-01 20:04:00 +00:00
|
|
|
if cursor == nil {
|
2020-11-29 19:59:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-29 16:13:06 +00:00
|
|
|
|
2020-11-29 19:59:27 +00:00
|
|
|
// GetBandwidthSince retrieves all storagenode_bandwidth_rollup entires since latestRollup.
|
|
|
|
func (db *StoragenodeAccounting) GetBandwidthSince(ctx context.Context, latestRollup time.Time,
|
|
|
|
cb func(context.Context, *accounting.StoragenodeBandwidthRollup) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
// This table's key structure is storagenode_id, interval_start, so we're going to try and make
|
|
|
|
// things easier on the database by making individual requests node by node. This is also
|
|
|
|
// going to allow us to avoid 16 minute queries.
|
|
|
|
var nodeids [][]byte
|
|
|
|
for {
|
2020-12-21 15:37:01 +00:00
|
|
|
nodeids, err = db.getNodeIdsSince(ctx, latestRollup)
|
2020-11-29 19:59:27 +00:00
|
|
|
if err != nil {
|
|
|
|
if cockroachutil.NeedsRetry(err) {
|
|
|
|
continue
|
2020-11-29 16:13:06 +00:00
|
|
|
}
|
2020-11-29 19:59:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, nodeid := range nodeids {
|
|
|
|
err = db.getBandwidthByNodeSince(ctx, latestRollup, nodeid, cb)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = db.getBandwidthPhase2ByNodeSince(ctx, latestRollup, nodeid, cb)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
}
|
2020-11-12 19:01:55 +00:00
|
|
|
|
2020-11-29 16:13:06 +00:00
|
|
|
return nil
|
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// SaveRollup records raw tallies of at rest data to the database.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *StoragenodeAccounting) SaveRollup(ctx context.Context, latestRollup time.Time, stats accounting.RollupStats) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
if len(stats) == 0 {
|
|
|
|
return Error.New("In SaveRollup with empty nodeData")
|
|
|
|
}
|
2020-11-28 20:54:52 +00:00
|
|
|
|
|
|
|
batchSize := db.db.opts.SaveRollupBatchSize
|
|
|
|
if batchSize <= 0 {
|
|
|
|
batchSize = 1000
|
|
|
|
}
|
|
|
|
|
|
|
|
var rollups []*accounting.Rollup
|
|
|
|
for _, arsByDate := range stats {
|
|
|
|
for _, ar := range arsByDate {
|
|
|
|
rollups = append(rollups, ar)
|
|
|
|
}
|
|
|
|
}
|
2019-09-12 18:31:50 +01:00
|
|
|
|
2021-03-16 14:02:53 +00:00
|
|
|
insertBatch := func(ctx context.Context, db *dbx.DB, batch []*accounting.Rollup) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
n := len(batch)
|
|
|
|
|
|
|
|
nodeID := make([]storj.NodeID, n)
|
|
|
|
startTime := make([]time.Time, n)
|
|
|
|
putTotal := make([]int64, n)
|
|
|
|
getTotal := make([]int64, n)
|
|
|
|
getAuditTotal := make([]int64, n)
|
|
|
|
getRepairTotal := make([]int64, n)
|
|
|
|
putRepairTotal := make([]int64, n)
|
|
|
|
atRestTotal := make([]float64, n)
|
|
|
|
|
|
|
|
for i, ar := range batch {
|
|
|
|
nodeID[i] = ar.NodeID
|
|
|
|
startTime[i] = ar.StartTime
|
|
|
|
putTotal[i] = ar.PutTotal
|
|
|
|
getTotal[i] = ar.GetTotal
|
|
|
|
getAuditTotal[i] = ar.GetAuditTotal
|
|
|
|
getRepairTotal[i] = ar.GetRepairTotal
|
|
|
|
putRepairTotal[i] = ar.PutRepairTotal
|
|
|
|
atRestTotal[i] = ar.AtRestTotal
|
|
|
|
}
|
2020-11-28 20:54:52 +00:00
|
|
|
|
2021-03-16 14:02:53 +00:00
|
|
|
_, err = db.ExecContext(ctx, `
|
|
|
|
INSERT INTO accounting_rollups (
|
|
|
|
node_id, start_time,
|
|
|
|
put_total, get_total,
|
|
|
|
get_audit_total, get_repair_total, put_repair_total,
|
|
|
|
at_rest_total
|
|
|
|
)
|
|
|
|
SELECT * FROM unnest(
|
|
|
|
$1::bytea[], $2::timestamptz[],
|
|
|
|
$3::int8[], $4::int8[],
|
|
|
|
$5::int8[], $6::int8[], $7::int8[],
|
|
|
|
$8::float8[]
|
|
|
|
)
|
|
|
|
ON CONFLICT ( node_id, start_time )
|
|
|
|
DO UPDATE SET
|
|
|
|
put_total = EXCLUDED.put_total,
|
|
|
|
get_total = EXCLUDED.get_total,
|
|
|
|
get_audit_total = EXCLUDED.get_audit_total,
|
|
|
|
get_repair_total = EXCLUDED.get_repair_total,
|
|
|
|
put_repair_total = EXCLUDED.put_repair_total,
|
|
|
|
at_rest_total = EXCLUDED.at_rest_total
|
|
|
|
`, pgutil.NodeIDArray(nodeID), pgutil.TimestampTZArray(startTime),
|
|
|
|
pgutil.Int8Array(putTotal), pgutil.Int8Array(getTotal),
|
|
|
|
pgutil.Int8Array(getAuditTotal), pgutil.Int8Array(getRepairTotal), pgutil.Int8Array(putRepairTotal),
|
|
|
|
pgutil.Float8Array(atRestTotal))
|
|
|
|
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: we do not need here a transaction because we will "update" the
|
|
|
|
// columns when we do not update accounting.LastRollup. We will end up
|
|
|
|
// with partial data in the database, however in the next runs, we will
|
|
|
|
// try to fix them.
|
|
|
|
|
|
|
|
for len(rollups) > 0 {
|
|
|
|
batch := rollups
|
|
|
|
if len(batch) > batchSize {
|
|
|
|
batch = batch[:batchSize]
|
|
|
|
}
|
|
|
|
rollups = rollups[len(batch):]
|
|
|
|
|
|
|
|
if err := insertBatch(ctx, db.db.DB, batch); err != nil {
|
2020-11-28 20:54:52 +00:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
2021-03-16 14:02:53 +00:00
|
|
|
|
|
|
|
err = db.db.UpdateNoReturn_AccountingTimestamps_By_Name(ctx,
|
|
|
|
dbx.AccountingTimestamps_Name(accounting.LastRollup),
|
|
|
|
dbx.AccountingTimestamps_Update_Fields{
|
|
|
|
Value: dbx.AccountingTimestamps_Value(latestRollup),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
return Error.Wrap(err)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// LastTimestamp records the greatest last tallied time.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *StoragenodeAccounting) LastTimestamp(ctx context.Context, timestampType string) (_ time.Time, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
lastTally := time.Time{}
|
2019-06-04 12:55:38 +01:00
|
|
|
err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
2019-05-10 20:05:42 +01:00
|
|
|
lt, err := tx.Find_AccountingTimestamps_Value_By_Name(ctx, dbx.AccountingTimestamps_Name(timestampType))
|
|
|
|
if lt == nil {
|
2019-09-12 18:31:50 +01:00
|
|
|
return tx.CreateNoReturn_AccountingTimestamps(ctx,
|
|
|
|
dbx.AccountingTimestamps_Name(timestampType),
|
|
|
|
dbx.AccountingTimestamps_Value(lastTally),
|
|
|
|
)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
lastTally = lt.Value
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return lastTally, err
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// QueryPaymentInfo queries Overlay, Accounting Rollup on nodeID.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *StoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) (_ []*accounting.CSVRow, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-11-04 17:24:11 +00:00
|
|
|
sqlStmt := `SELECT n.id, n.created_at, r.at_rest_total, r.get_repair_total,
|
2020-03-10 20:42:11 +00:00
|
|
|
r.put_repair_total, r.get_audit_total, r.put_total, r.get_total, n.wallet, n.disqualified
|
|
|
|
FROM (
|
2020-04-01 22:58:46 +01:00
|
|
|
SELECT node_id, SUM(at_rest_total::decimal) AS at_rest_total, SUM(get_repair_total) AS get_repair_total,
|
2019-05-10 20:05:42 +01:00
|
|
|
SUM(put_repair_total) AS put_repair_total, SUM(get_audit_total) AS get_audit_total,
|
|
|
|
SUM(put_total) AS put_total, SUM(get_total) AS get_total
|
|
|
|
FROM accounting_rollups
|
|
|
|
WHERE start_time >= ? AND start_time < ?
|
|
|
|
GROUP BY node_id
|
|
|
|
) r
|
|
|
|
LEFT JOIN nodes n ON n.id = r.node_id
|
2020-03-10 20:42:11 +00:00
|
|
|
ORDER BY n.id`
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
rows, err := db.db.DB.QueryContext(ctx, db.db.Rebind(sqlStmt), start.UTC(), end.UTC())
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2019-08-22 12:40:15 +01:00
|
|
|
csv := []*accounting.CSVRow{}
|
2019-05-10 20:05:42 +01:00
|
|
|
for rows.Next() {
|
|
|
|
var nodeID []byte
|
|
|
|
r := &accounting.CSVRow{}
|
|
|
|
var wallet sql.NullString
|
2019-06-21 15:21:15 +01:00
|
|
|
var disqualified *time.Time
|
2019-06-21 18:14:53 +01:00
|
|
|
err := rows.Scan(&nodeID, &r.NodeCreationDate, &r.AtRestTotal, &r.GetRepairTotal,
|
2019-06-21 15:21:15 +01:00
|
|
|
&r.PutRepairTotal, &r.GetAuditTotal, &r.PutTotal, &r.GetTotal, &wallet, &disqualified)
|
2019-05-10 20:05:42 +01:00
|
|
|
if err != nil {
|
|
|
|
return csv, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if wallet.Valid {
|
|
|
|
r.Wallet = wallet.String
|
|
|
|
}
|
|
|
|
id, err := storj.NodeIDFromBytes(nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return csv, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
r.NodeID = id
|
2019-06-21 15:21:15 +01:00
|
|
|
r.Disqualified = disqualified
|
2019-05-10 20:05:42 +01:00
|
|
|
csv = append(csv, r)
|
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
return csv, rows.Err()
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// QueryStorageNodePeriodUsage returns usage invoices for nodes for a compensation period.
|
2020-03-10 20:42:11 +00:00
|
|
|
func (db *StoragenodeAccounting) QueryStorageNodePeriodUsage(ctx context.Context, period compensation.Period) (_ []accounting.StorageNodePeriodUsage, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
stmt := db.db.Rebind(`
|
|
|
|
SELECT
|
|
|
|
node_id,
|
2020-04-01 22:58:46 +01:00
|
|
|
SUM(at_rest_total::decimal) AS at_rest_total,
|
2020-03-10 20:42:11 +00:00
|
|
|
SUM(get_total) AS get_total,
|
|
|
|
SUM(put_total) AS put_total,
|
|
|
|
SUM(get_repair_total) AS get_repair_total,
|
|
|
|
SUM(put_repair_total) AS put_repair_total,
|
|
|
|
SUM(get_audit_total) AS get_audit_total
|
|
|
|
FROM
|
|
|
|
accounting_rollups
|
|
|
|
WHERE
|
|
|
|
start_time >= ? AND start_time < ?
|
|
|
|
GROUP BY
|
|
|
|
node_id
|
|
|
|
ORDER BY
|
|
|
|
node_id ASC
|
|
|
|
`)
|
|
|
|
|
|
|
|
rows, err := db.db.DB.QueryContext(ctx, stmt, period.StartDate(), period.EndDateExclusive())
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
|
|
|
|
usages := []accounting.StorageNodePeriodUsage{}
|
|
|
|
for rows.Next() {
|
|
|
|
var nodeID []byte
|
|
|
|
usage := accounting.StorageNodePeriodUsage{}
|
|
|
|
if err := rows.Scan(
|
|
|
|
&nodeID,
|
|
|
|
&usage.AtRestTotal,
|
|
|
|
&usage.GetTotal,
|
|
|
|
&usage.PutTotal,
|
|
|
|
&usage.GetRepairTotal,
|
|
|
|
&usage.PutRepairTotal,
|
|
|
|
&usage.GetAuditTotal,
|
|
|
|
); err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
usage.NodeID, err = storj.NodeIDFromBytes(nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
usages = append(usages, usage)
|
|
|
|
}
|
|
|
|
return usages, rows.Err()
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// QueryStorageNodeUsage returns slice of StorageNodeUsage for given period.
|
2019-08-08 14:47:04 +01:00
|
|
|
func (db *StoragenodeAccounting) QueryStorageNodeUsage(ctx context.Context, nodeID storj.NodeID, start time.Time, end time.Time) (_ []accounting.StorageNodeUsage, err error) {
|
2019-07-02 11:42:09 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-04 17:05:34 +01:00
|
|
|
lastRollup, err := db.db.Find_AccountingTimestamps_Value_By_Name(ctx, dbx.AccountingTimestamps_Name(accounting.LastRollup))
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if lastRollup == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
start, end = start.UTC(), end.UTC()
|
|
|
|
|
2019-12-03 16:24:45 +00:00
|
|
|
query := `
|
2020-07-28 15:58:47 +01:00
|
|
|
SELECT SUM(at_rest_total), (start_time at time zone 'UTC')::date as start_time
|
2019-12-03 16:24:45 +00:00
|
|
|
FROM accounting_rollups
|
|
|
|
WHERE node_id = $1
|
|
|
|
AND $2 <= start_time AND start_time <= $3
|
2020-07-28 15:58:47 +01:00
|
|
|
GROUP BY (start_time at time zone 'UTC')::date
|
2019-09-04 17:05:34 +01:00
|
|
|
UNION
|
2020-05-01 20:43:32 +01:00
|
|
|
SELECT SUM(data_total) AS at_rest_total, (interval_end_time at time zone 'UTC')::date AS start_time
|
2019-12-03 16:24:45 +00:00
|
|
|
FROM storagenode_storage_tallies
|
|
|
|
WHERE node_id = $1
|
|
|
|
AND NOT EXISTS (
|
|
|
|
SELECT 1 FROM accounting_rollups
|
|
|
|
WHERE node_id = $1
|
|
|
|
AND $2 <= start_time AND start_time <= $3
|
2020-05-01 20:43:32 +01:00
|
|
|
AND (start_time at time zone 'UTC')::date = (interval_end_time at time zone 'UTC')::date
|
2019-12-03 16:24:45 +00:00
|
|
|
)
|
|
|
|
AND (SELECT value FROM accounting_timestamps WHERE name = $4) < interval_end_time AND interval_end_time <= $3
|
2020-05-01 20:43:32 +01:00
|
|
|
GROUP BY (interval_end_time at time zone 'UTC')::date
|
2019-12-03 16:24:45 +00:00
|
|
|
ORDER BY start_time;
|
|
|
|
`
|
2019-09-04 17:05:34 +01:00
|
|
|
|
|
|
|
rows, err := db.db.QueryContext(ctx, db.db.Rebind(query),
|
2019-12-03 16:24:45 +00:00
|
|
|
nodeID, start, end, accounting.LastRollup,
|
|
|
|
)
|
2019-07-02 11:42:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
2019-07-02 11:42:09 +01:00
|
|
|
|
2019-08-08 14:47:04 +01:00
|
|
|
var nodeStorageUsages []accounting.StorageNodeUsage
|
2019-07-02 11:42:09 +01:00
|
|
|
for rows.Next() {
|
|
|
|
var atRestTotal float64
|
2019-09-04 17:05:34 +01:00
|
|
|
var startTime dbutil.NullTime
|
2019-07-02 11:42:09 +01:00
|
|
|
|
2019-09-04 17:05:34 +01:00
|
|
|
err = rows.Scan(&atRestTotal, &startTime)
|
2019-07-02 11:42:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-08-08 14:47:04 +01:00
|
|
|
nodeStorageUsages = append(nodeStorageUsages, accounting.StorageNodeUsage{
|
2019-07-02 11:42:09 +01:00
|
|
|
NodeID: nodeID,
|
2019-09-04 17:05:34 +01:00
|
|
|
StorageUsed: atRestTotal,
|
|
|
|
Timestamp: startTime.Time,
|
2019-07-02 11:42:09 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-01-16 14:27:24 +00:00
|
|
|
return nodeStorageUsages, rows.Err()
|
2019-07-02 11:42:09 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// DeleteTalliesBefore deletes all raw tallies prior to some time.
|
2022-07-20 09:10:29 +01:00
|
|
|
func (db *StoragenodeAccounting) DeleteTalliesBefore(ctx context.Context, latestRollup time.Time, batchSize int) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2022-07-20 09:10:29 +01:00
|
|
|
|
|
|
|
if batchSize <= 0 {
|
|
|
|
batchSize = 10000
|
|
|
|
}
|
|
|
|
|
|
|
|
var query string
|
|
|
|
switch db.db.impl {
|
|
|
|
case dbutil.Cockroach:
|
|
|
|
query = `
|
|
|
|
DELETE FROM storagenode_storage_tallies
|
|
|
|
WHERE interval_end_time < ?
|
|
|
|
LIMIT ?`
|
|
|
|
case dbutil.Postgres:
|
|
|
|
query = `
|
|
|
|
DELETE FROM storagenode_storage_tallies
|
|
|
|
WHERE ctid IN (
|
|
|
|
SELECT ctid
|
|
|
|
FROM storagenode_storage_tallies
|
|
|
|
WHERE interval_end_time < ?
|
|
|
|
ORDER BY interval_end_time
|
|
|
|
LIMIT ?
|
|
|
|
)`
|
|
|
|
default:
|
|
|
|
return Error.New("unsupported database: %v", db.db.impl)
|
|
|
|
}
|
|
|
|
query = db.db.Rebind(query)
|
|
|
|
|
|
|
|
for {
|
|
|
|
res, err := db.db.DB.ExecContext(ctx, query, latestRollup, batchSize)
|
|
|
|
if err != nil {
|
|
|
|
if errs.Is(err, sql.ErrNoRows) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
affected, err := res.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if affected == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2020-11-30 19:34:42 +00:00
|
|
|
|
|
|
|
// ArchiveRollupsBefore archives rollups older than a given time.
|
|
|
|
func (db *StoragenodeAccounting) ArchiveRollupsBefore(ctx context.Context, before time.Time, batchSize int) (nodeRollupsDeleted int, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if batchSize <= 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2021-05-11 09:49:26 +01:00
|
|
|
switch db.db.impl {
|
2020-11-30 19:34:42 +00:00
|
|
|
case dbutil.Cockroach:
|
|
|
|
for {
|
|
|
|
row := db.db.QueryRow(ctx, `
|
|
|
|
WITH rollups_to_move AS (
|
|
|
|
DELETE FROM storagenode_bandwidth_rollups
|
|
|
|
WHERE interval_start <= $1
|
|
|
|
LIMIT $2 RETURNING *
|
|
|
|
), moved_rollups AS (
|
|
|
|
INSERT INTO storagenode_bandwidth_rollup_archives SELECT * FROM rollups_to_move RETURNING *
|
|
|
|
)
|
|
|
|
SELECT count(*) FROM moved_rollups
|
|
|
|
`, before, batchSize)
|
|
|
|
|
|
|
|
var rowCount int
|
|
|
|
err = row.Scan(&rowCount)
|
|
|
|
if err != nil {
|
|
|
|
return nodeRollupsDeleted, err
|
|
|
|
}
|
|
|
|
nodeRollupsDeleted += rowCount
|
|
|
|
|
|
|
|
if rowCount < batchSize {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2021-05-11 09:49:26 +01:00
|
|
|
return nodeRollupsDeleted, nil
|
|
|
|
|
2020-11-30 19:34:42 +00:00
|
|
|
case dbutil.Postgres:
|
|
|
|
storagenodeStatement := `
|
|
|
|
WITH rollups_to_move AS (
|
|
|
|
DELETE FROM storagenode_bandwidth_rollups
|
|
|
|
WHERE interval_start <= $1
|
|
|
|
RETURNING *
|
|
|
|
), moved_rollups AS (
|
|
|
|
INSERT INTO storagenode_bandwidth_rollup_archives SELECT * FROM rollups_to_move RETURNING *
|
|
|
|
)
|
|
|
|
SELECT count(*) FROM moved_rollups
|
|
|
|
`
|
|
|
|
row := db.db.DB.QueryRow(ctx, storagenodeStatement, before)
|
2021-05-11 09:49:26 +01:00
|
|
|
err = row.Scan(&nodeRollupsDeleted)
|
|
|
|
return nodeRollupsDeleted, err
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0, Error.New("unsupported database: %v", db.db.impl)
|
2020-11-30 19:34:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetRollupsSince retrieves all archived bandwidth rollup records since a given time.
|
|
|
|
func (db *StoragenodeAccounting) GetRollupsSince(ctx context.Context, since time.Time) (bwRollups []accounting.StoragenodeBandwidthRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
pageLimit := db.db.opts.ReadRollupBatchSize
|
|
|
|
if pageLimit <= 0 {
|
|
|
|
pageLimit = 10000
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursor *dbx.Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
|
|
for {
|
|
|
|
dbxRollups, next, err := db.db.Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx,
|
|
|
|
dbx.StoragenodeBandwidthRollup_IntervalStart(since),
|
|
|
|
pageLimit, cursor)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
cursor = next
|
|
|
|
for _, dbxRollup := range dbxRollups {
|
|
|
|
id, err := storj.NodeIDFromBytes(dbxRollup.StoragenodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
bwRollups = append(bwRollups, accounting.StoragenodeBandwidthRollup{
|
|
|
|
NodeID: id,
|
|
|
|
IntervalStart: dbxRollup.IntervalStart,
|
|
|
|
Action: dbxRollup.Action,
|
|
|
|
Settled: dbxRollup.Settled,
|
|
|
|
})
|
|
|
|
}
|
2021-03-01 20:04:00 +00:00
|
|
|
if cursor == nil {
|
2020-11-30 19:34:42 +00:00
|
|
|
return bwRollups, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetArchivedRollupsSince retrieves all archived bandwidth rollup records since a given time.
|
|
|
|
func (db *StoragenodeAccounting) GetArchivedRollupsSince(ctx context.Context, since time.Time) (bwRollups []accounting.StoragenodeBandwidthRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
pageLimit := db.db.opts.ReadRollupBatchSize
|
|
|
|
if pageLimit <= 0 {
|
|
|
|
pageLimit = 10000
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursor *dbx.Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
|
|
for {
|
|
|
|
dbxRollups, next, err := db.db.Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx,
|
|
|
|
dbx.StoragenodeBandwidthRollupArchive_IntervalStart(since),
|
|
|
|
pageLimit, cursor)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
cursor = next
|
|
|
|
for _, dbxRollup := range dbxRollups {
|
|
|
|
id, err := storj.NodeIDFromBytes(dbxRollup.StoragenodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
bwRollups = append(bwRollups, accounting.StoragenodeBandwidthRollup{
|
|
|
|
NodeID: id,
|
|
|
|
IntervalStart: dbxRollup.IntervalStart,
|
|
|
|
Action: dbxRollup.Action,
|
|
|
|
Settled: dbxRollup.Settled,
|
|
|
|
})
|
|
|
|
}
|
2021-03-01 20:04:00 +00:00
|
|
|
if cursor == nil {
|
2020-11-30 19:34:42 +00:00
|
|
|
return bwRollups, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|