2019-03-27 10:24:35 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-04-01 21:14:58 +01:00
|
|
|
"database/sql"
|
2020-07-14 14:04:38 +01:00
|
|
|
"errors"
|
2020-06-11 19:31:45 +01:00
|
|
|
"reflect"
|
2019-03-28 20:09:23 +00:00
|
|
|
"time"
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
"github.com/jackc/pgx/v4"
|
2019-08-15 20:05:43 +01:00
|
|
|
"github.com/zeebo/errs"
|
2020-01-10 18:53:42 +00:00
|
|
|
"go.uber.org/zap"
|
2019-06-10 15:58:28 +01:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil/pgutil"
|
2021-05-17 15:07:59 +01:00
|
|
|
"storj.io/private/dbutil/pgxutil"
|
2019-04-04 15:42:01 +01:00
|
|
|
"storj.io/storj/satellite/orders"
|
2020-01-15 02:29:51 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/dbx"
|
2019-03-27 10:24:35 +00:00
|
|
|
)
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
const defaultIntervalSeconds = int(time.Hour / time.Second)
|
|
|
|
|
2019-09-06 15:49:30 +01:00
|
|
|
var (
|
|
|
|
// ErrDifferentStorageNodes is returned when ProcessOrders gets orders from different storage nodes.
|
|
|
|
ErrDifferentStorageNodes = errs.Class("different storage nodes")
|
2020-08-11 15:50:01 +01:00
|
|
|
// ErrBucketFromSerial is returned when there is an error trying to get the bucket name from the serial number.
|
2020-06-11 19:31:45 +01:00
|
|
|
ErrBucketFromSerial = errs.Class("bucket from serial number")
|
2020-08-11 15:50:01 +01:00
|
|
|
// ErrUpdateBucketBandwidthSettle is returned when there is an error updating bucket bandwidth.
|
2020-06-11 19:31:45 +01:00
|
|
|
ErrUpdateBucketBandwidthSettle = errs.Class("update bucket bandwidth settle")
|
2020-08-11 15:50:01 +01:00
|
|
|
// ErrProcessOrderWithWindowTx is returned when there is an error with the ProcessOrders transaction.
|
2020-06-11 19:31:45 +01:00
|
|
|
ErrProcessOrderWithWindowTx = errs.Class("process order with window transaction")
|
2020-08-11 15:50:01 +01:00
|
|
|
// ErrGetStoragenodeBandwidthInWindow is returned when there is an error getting all storage node bandwidth for a window.
|
2020-06-11 19:31:45 +01:00
|
|
|
ErrGetStoragenodeBandwidthInWindow = errs.Class("get storagenode bandwidth in window")
|
2020-08-11 15:50:01 +01:00
|
|
|
// ErrCreateStoragenodeBandwidth is returned when there is an error updating storage node bandwidth.
|
2020-06-11 19:31:45 +01:00
|
|
|
ErrCreateStoragenodeBandwidth = errs.Class("create storagenode bandwidth")
|
2019-09-06 15:49:30 +01:00
|
|
|
)
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
type ordersDB struct {
|
2019-12-14 02:29:54 +00:00
|
|
|
db *satelliteDB
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
2020-02-18 12:03:23 +00:00
|
|
|
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket.
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthAllocation(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
return pgxutil.Conn(ctx, db.db, func(conn *pgx.Conn) error {
|
|
|
|
var batch pgx.Batch
|
|
|
|
|
|
|
|
// TODO decide if we need to have transaction here
|
|
|
|
batch.Queue(`START TRANSACTION`)
|
|
|
|
|
|
|
|
statement := db.db.Rebind(
|
2020-05-01 14:24:12 +01:00
|
|
|
`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
2021-05-17 15:07:59 +01:00
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
|
|
|
DO UPDATE SET allocated = bucket_bandwidth_rollups.allocated + ?`,
|
2020-05-01 14:24:12 +01:00
|
|
|
)
|
2021-05-17 15:07:59 +01:00
|
|
|
batch.Queue(statement, bucketName, projectID[:], intervalStart.UTC(), defaultIntervalSeconds, action, 0, uint64(amount), 0, uint64(amount))
|
2020-05-01 14:24:12 +01:00
|
|
|
|
|
|
|
if action == pb.PieceAction_GET {
|
2021-05-17 15:07:59 +01:00
|
|
|
// TODO remove when project_bandwidth_daily_rollups will be used
|
2020-05-01 14:24:12 +01:00
|
|
|
projectInterval := time.Date(intervalStart.Year(), intervalStart.Month(), 1, 0, 0, 0, 0, time.UTC)
|
2021-05-17 15:07:59 +01:00
|
|
|
statement = db.db.Rebind(
|
2020-05-01 14:24:12 +01:00
|
|
|
`INSERT INTO project_bandwidth_rollups (project_id, interval_month, egress_allocated)
|
|
|
|
VALUES (?, ?, ?)
|
|
|
|
ON CONFLICT(project_id, interval_month)
|
|
|
|
DO UPDATE SET egress_allocated = project_bandwidth_rollups.egress_allocated + EXCLUDED.egress_allocated::bigint`,
|
|
|
|
)
|
2021-05-17 15:07:59 +01:00
|
|
|
batch.Queue(statement, projectID[:], projectInterval, uint64(amount))
|
|
|
|
|
|
|
|
dailyInterval := time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), 0, 0, 0, 0, time.UTC)
|
|
|
|
statement = db.db.Rebind(
|
|
|
|
`INSERT INTO project_bandwidth_daily_rollups (project_id, interval_day, egress_allocated, egress_settled)
|
|
|
|
VALUES (?, ?, ?, ?)
|
|
|
|
ON CONFLICT(project_id, interval_day)
|
|
|
|
DO UPDATE SET egress_allocated = project_bandwidth_daily_rollups.egress_allocated + EXCLUDED.egress_allocated::BIGINT`,
|
|
|
|
)
|
|
|
|
batch.Queue(statement, projectID[:], dailyInterval, uint64(amount), 0)
|
2020-05-01 14:24:12 +01:00
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
batch.Queue(`COMMIT TRANSACTION`)
|
|
|
|
|
|
|
|
results := conn.SendBatch(ctx, &batch)
|
|
|
|
defer func() { err = errs.Combine(err, results.Close()) }()
|
|
|
|
|
|
|
|
var errlist errs.Group
|
|
|
|
for i := 0; i < batch.Len(); i++ {
|
|
|
|
_, err := results.Exec()
|
|
|
|
errlist.Add(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errlist.Err()
|
|
|
|
})
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
2020-02-18 12:03:23 +00:00
|
|
|
// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket.
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthSettle(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
return db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
|
|
|
statement := db.db.Rebind(
|
|
|
|
`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
|
|
|
DO UPDATE SET settled = bucket_bandwidth_rollups.settled + ?`,
|
|
|
|
)
|
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
|
|
|
bucketName, projectID[:], intervalStart.UTC(), defaultIntervalSeconds, action, 0, 0, uint64(amount), uint64(amount),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return ErrUpdateBucketBandwidthSettle.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if action == pb.PieceAction_GET {
|
|
|
|
dailyInterval := time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), 0, 0, 0, 0, time.UTC)
|
|
|
|
statement = tx.Rebind(
|
|
|
|
`INSERT INTO project_bandwidth_daily_rollups (project_id, interval_day, egress_allocated, egress_settled)
|
|
|
|
VALUES (?, ?, ?, ?)
|
|
|
|
ON CONFLICT(project_id, interval_day)
|
|
|
|
DO UPDATE SET egress_settled = project_bandwidth_daily_rollups.egress_settled + EXCLUDED.egress_settled::BIGINT`,
|
|
|
|
)
|
|
|
|
_, err = tx.Tx.ExecContext(ctx, statement, projectID[:], dailyInterval, 0, uint64(amount))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 12:03:23 +00:00
|
|
|
// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket.
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthInline(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
2019-04-02 19:21:18 +01:00
|
|
|
`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
2019-04-01 21:14:58 +01:00
|
|
|
DO UPDATE SET inline = bucket_bandwidth_rollups.inline + ?`,
|
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
2020-02-14 00:03:41 +00:00
|
|
|
bucketName, projectID[:], intervalStart.UTC(), defaultIntervalSeconds, action, uint64(amount), 0, 0, uint64(amount),
|
2019-04-02 19:21:18 +01:00
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2020-02-18 12:03:23 +00:00
|
|
|
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node for the given intervalStart time.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
2020-01-12 15:20:47 +00:00
|
|
|
`INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?)
|
2019-04-01 21:14:58 +01:00
|
|
|
ON CONFLICT(storagenode_id, interval_start, action)
|
|
|
|
DO UPDATE SET settled = storagenode_bandwidth_rollups.settled + ?`,
|
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
2020-02-14 00:03:41 +00:00
|
|
|
storageNode.Bytes(), intervalStart.UTC(), defaultIntervalSeconds, action, uint64(amount), uint64(amount),
|
2019-04-02 19:21:18 +01:00
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-18 12:03:23 +00:00
|
|
|
// GetBucketBandwidth gets total bucket bandwidth from period of time.
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) GetBucketBandwidth(ctx context.Context, projectID uuid.UUID, bucketName []byte, from, to time.Time) (_ int64, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
var sum *int64
|
2019-04-02 19:21:18 +01:00
|
|
|
query := `SELECT SUM(settled) FROM bucket_bandwidth_rollups WHERE bucket_name = ? AND project_id = ? AND interval_start > ? AND interval_start <= ?`
|
2020-02-14 00:03:41 +00:00
|
|
|
err = db.db.QueryRow(ctx, db.db.Rebind(query), bucketName, projectID[:], from.UTC(), to.UTC()).Scan(&sum)
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) || sum == nil {
|
2019-04-01 21:14:58 +01:00
|
|
|
return 0, nil
|
|
|
|
}
|
2020-01-15 21:45:17 +00:00
|
|
|
return *sum, Error.Wrap(err)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2020-02-18 12:03:23 +00:00
|
|
|
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from, to time.Time) (_ int64, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2020-11-12 19:01:55 +00:00
|
|
|
var sum1, sum2 int64
|
|
|
|
|
|
|
|
err1 := db.db.QueryRow(ctx, db.db.Rebind(`
|
|
|
|
SELECT COALESCE(SUM(settled), 0)
|
|
|
|
FROM storagenode_bandwidth_rollups
|
|
|
|
WHERE storagenode_id = ?
|
|
|
|
AND interval_start > ?
|
|
|
|
AND interval_start <= ?
|
|
|
|
`), nodeID.Bytes(), from.UTC(), to.UTC()).Scan(&sum1)
|
|
|
|
|
|
|
|
err2 := db.db.QueryRow(ctx, db.db.Rebind(`
|
|
|
|
SELECT COALESCE(SUM(settled), 0)
|
|
|
|
FROM storagenode_bandwidth_rollups_phase2
|
|
|
|
WHERE storagenode_id = ?
|
|
|
|
AND interval_start > ?
|
|
|
|
AND interval_start <= ?
|
|
|
|
`), nodeID.Bytes(), from.UTC(), to.UTC()).Scan(&sum2)
|
|
|
|
|
|
|
|
if err1 != nil && !errors.Is(err1, sql.ErrNoRows) {
|
|
|
|
return 0, err1
|
|
|
|
} else if err2 != nil && !errors.Is(err2, sql.ErrNoRows) {
|
|
|
|
return 0, err2
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
2020-11-12 19:01:55 +00:00
|
|
|
|
|
|
|
return sum1 + sum2, nil
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []orders.BucketBandwidthRollup) (err error) {
|
2019-08-15 20:05:43 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
return db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-08-15 20:05:43 +01:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
if len(rollups) == 0 {
|
|
|
|
return nil
|
2019-09-06 15:49:30 +01:00
|
|
|
}
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
orders.SortBucketBandwidthRollups(rollups)
|
|
|
|
|
|
|
|
intervalStart = intervalStart.UTC()
|
|
|
|
intervalStart = time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), intervalStart.Hour(), 0, 0, 0, time.UTC)
|
|
|
|
|
|
|
|
var bucketNames [][]byte
|
|
|
|
var projectIDs [][]byte
|
|
|
|
var actionSlice []int32
|
|
|
|
var inlineSlice []int64
|
|
|
|
var allocatedSlice []int64
|
|
|
|
var settledSlice []int64
|
2021-05-17 15:07:59 +01:00
|
|
|
|
|
|
|
type bandwidth struct {
|
|
|
|
Allocated int64
|
|
|
|
Settled int64
|
|
|
|
}
|
|
|
|
projectRUMap := make(map[uuid.UUID]bandwidth)
|
2021-01-22 13:51:29 +00:00
|
|
|
|
|
|
|
for _, rollup := range rollups {
|
|
|
|
rollup := rollup
|
|
|
|
bucketNames = append(bucketNames, []byte(rollup.BucketName))
|
|
|
|
projectIDs = append(projectIDs, rollup.ProjectID[:])
|
|
|
|
actionSlice = append(actionSlice, int32(rollup.Action))
|
|
|
|
inlineSlice = append(inlineSlice, rollup.Inline)
|
|
|
|
allocatedSlice = append(allocatedSlice, rollup.Allocated)
|
|
|
|
settledSlice = append(settledSlice, rollup.Settled)
|
|
|
|
|
|
|
|
if rollup.Action == pb.PieceAction_GET {
|
2021-05-17 15:07:59 +01:00
|
|
|
b := projectRUMap[rollup.ProjectID]
|
|
|
|
b.Allocated += rollup.Allocated
|
|
|
|
b.Settled += rollup.Settled
|
|
|
|
projectRUMap[rollup.ProjectID] = b
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|
2020-05-01 14:24:12 +01:00
|
|
|
}
|
2020-01-14 00:36:12 +00:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
_, err = tx.Tx.ExecContext(ctx, `
|
2020-01-24 08:47:15 +00:00
|
|
|
INSERT INTO bucket_bandwidth_rollups (
|
|
|
|
bucket_name, project_id,
|
|
|
|
interval_start, interval_seconds,
|
|
|
|
action, inline, allocated, settled)
|
|
|
|
SELECT
|
|
|
|
unnest($1::bytea[]), unnest($2::bytea[]),
|
|
|
|
$3, $4,
|
2020-09-29 10:53:15 +01:00
|
|
|
unnest($5::int4[]), unnest($6::bigint[]), unnest($7::bigint[]), unnest($8::bigint[])
|
2020-01-24 08:47:15 +00:00
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
|
|
|
DO UPDATE SET
|
|
|
|
allocated = bucket_bandwidth_rollups.allocated + EXCLUDED.allocated,
|
|
|
|
inline = bucket_bandwidth_rollups.inline + EXCLUDED.inline,
|
|
|
|
settled = bucket_bandwidth_rollups.settled + EXCLUDED.settled`,
|
2021-01-22 13:51:29 +00:00
|
|
|
pgutil.ByteaArray(bucketNames), pgutil.ByteaArray(projectIDs),
|
|
|
|
intervalStart, defaultIntervalSeconds,
|
|
|
|
pgutil.Int4Array(actionSlice), pgutil.Int8Array(inlineSlice), pgutil.Int8Array(allocatedSlice), pgutil.Int8Array(settledSlice))
|
|
|
|
if err != nil {
|
|
|
|
db.db.log.Error("Bucket bandwidth rollup batch flush failed.", zap.Error(err))
|
|
|
|
}
|
2020-05-01 14:24:12 +01:00
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
projectRUIDs := make([]uuid.UUID, 0, len(projectRUMap))
|
2021-01-22 13:51:29 +00:00
|
|
|
var projectRUAllocated []int64
|
2021-05-17 15:07:59 +01:00
|
|
|
var projectRUSettled []int64
|
2021-01-22 13:51:29 +00:00
|
|
|
projectInterval := time.Date(intervalStart.Year(), intervalStart.Month(), 1, intervalStart.Hour(), 0, 0, 0, time.UTC)
|
2021-05-17 15:07:59 +01:00
|
|
|
dailyInterval := time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), 0, 0, 0, 0, time.UTC)
|
2020-05-01 14:24:12 +01:00
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
for projectID, v := range projectRUMap {
|
|
|
|
projectRUIDs = append(projectRUIDs, projectID)
|
|
|
|
projectRUAllocated = append(projectRUAllocated, v.Allocated)
|
|
|
|
projectRUSettled = append(projectRUSettled, v.Settled)
|
2020-05-01 14:24:12 +01:00
|
|
|
}
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
if len(projectRUIDs) > 0 {
|
2021-05-17 15:07:59 +01:00
|
|
|
// TODO remove when project_bandwidth_daily_rollups will be used
|
2021-01-22 13:51:29 +00:00
|
|
|
_, err = tx.Tx.ExecContext(ctx, `
|
2021-05-17 15:07:59 +01:00
|
|
|
INSERT INTO project_bandwidth_rollups(project_id, interval_month, egress_allocated)
|
|
|
|
SELECT unnest($1::bytea[]), $2, unnest($3::bigint[])
|
|
|
|
ON CONFLICT(project_id, interval_month)
|
|
|
|
DO UPDATE SET egress_allocated = project_bandwidth_rollups.egress_allocated + EXCLUDED.egress_allocated::bigint;
|
|
|
|
`,
|
|
|
|
pgutil.UUIDArray(projectRUIDs), projectInterval, pgutil.Int8Array(projectRUAllocated))
|
2021-01-22 13:51:29 +00:00
|
|
|
if err != nil {
|
|
|
|
db.db.log.Error("Project bandwidth rollup batch flush failed.", zap.Error(err))
|
|
|
|
}
|
2021-05-17 15:07:59 +01:00
|
|
|
|
|
|
|
_, err = tx.Tx.ExecContext(ctx, `
|
|
|
|
INSERT INTO project_bandwidth_daily_rollups(project_id, interval_day, egress_allocated, egress_settled)
|
|
|
|
SELECT unnest($1::bytea[]), $2, unnest($3::bigint[]), unnest($4::bigint[])
|
|
|
|
ON CONFLICT(project_id, interval_day)
|
|
|
|
DO UPDATE SET
|
|
|
|
egress_allocated = project_bandwidth_daily_rollups.egress_allocated + EXCLUDED.egress_allocated::bigint,
|
|
|
|
egress_settled = project_bandwidth_daily_rollups.egress_settled + EXCLUDED.egress_settled::bigint
|
|
|
|
`, pgutil.UUIDArray(projectRUIDs), dailyInterval, pgutil.Int8Array(projectRUAllocated), pgutil.Int8Array(projectRUSettled))
|
|
|
|
if err != nil {
|
|
|
|
db.db.log.Error("Project bandwidth daily rollup batch flush failed.", zap.Error(err))
|
|
|
|
}
|
2020-05-01 14:24:12 +01:00
|
|
|
}
|
2021-01-22 13:51:29 +00:00
|
|
|
return err
|
|
|
|
})
|
2020-02-14 00:03:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// transaction/batch methods
|
|
|
|
//
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// UpdateStoragenodeBandwidthSettleWithWindow adds a record to for each action and settled amount.
|
|
|
|
// If any of these orders already exist in the database, then all of these orders have already been processed.
|
|
|
|
// Orders within a single window may only be processed once to prevent double spending.
|
|
|
|
func (db *ordersDB) UpdateStoragenodeBandwidthSettleWithWindow(ctx context.Context, storageNodeID storj.NodeID, actionAmounts map[int32]int64, window time.Time) (status pb.SettlementWithWindowResponse_Status, alreadyProcessed bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var batchStatus pb.SettlementWithWindowResponse_Status
|
|
|
|
var retryCount int
|
|
|
|
for {
|
|
|
|
err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
|
|
|
// try to get all rows from the storage node bandwidth table for the 1 hr window
|
|
|
|
// if there are already existing rows for the 1 hr window that means these orders have
|
|
|
|
// already been processed
|
|
|
|
rows, err := tx.All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx,
|
|
|
|
dbx.StoragenodeBandwidthRollup_StoragenodeId(storageNodeID[:]),
|
|
|
|
dbx.StoragenodeBandwidthRollup_IntervalStart(window),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return ErrGetStoragenodeBandwidthInWindow.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(rows) != 0 {
|
|
|
|
// if there are already rows in the storagenode bandwidth table for this 1 hr window
|
|
|
|
// that means these orders have already been processed
|
|
|
|
// if these orders that the storagenode is trying to process again match what in the
|
|
|
|
// storagenode bandwidth table, then send a successful response to the storagenode
|
|
|
|
// so they don't keep trying to settle these orders again
|
|
|
|
// if these orders do not match what we have in the storage node bandwidth table then send
|
|
|
|
// back an invalid response
|
|
|
|
if SettledAmountsMatch(rows, actionAmounts) {
|
|
|
|
batchStatus = pb.SettlementWithWindowResponse_ACCEPTED
|
|
|
|
alreadyProcessed = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
batchStatus = pb.SettlementWithWindowResponse_REJECTED
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// if there aren't any rows in the storagenode bandwidth table for this 1 hr window
|
|
|
|
// that means these orders have not been processed before so we can continue to process them
|
|
|
|
for action, amount := range actionAmounts {
|
|
|
|
_, err := tx.Create_StoragenodeBandwidthRollup(ctx,
|
|
|
|
dbx.StoragenodeBandwidthRollup_StoragenodeId(storageNodeID[:]),
|
|
|
|
dbx.StoragenodeBandwidthRollup_IntervalStart(window),
|
|
|
|
dbx.StoragenodeBandwidthRollup_IntervalSeconds(uint(defaultIntervalSeconds)),
|
|
|
|
dbx.StoragenodeBandwidthRollup_Action(uint(action)),
|
|
|
|
dbx.StoragenodeBandwidthRollup_Settled(uint64(amount)),
|
|
|
|
dbx.StoragenodeBandwidthRollup_Create_Fields{},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return ErrCreateStoragenodeBandwidth.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
batchStatus = pb.SettlementWithWindowResponse_ACCEPTED
|
|
|
|
return nil
|
|
|
|
})
|
2021-05-14 16:05:42 +01:00
|
|
|
if dbx.IsConstraintError(err) {
|
2020-06-11 19:31:45 +01:00
|
|
|
retryCount++
|
|
|
|
if retryCount > 5 {
|
|
|
|
return 0, alreadyProcessed, errs.New("process order with window retry count too high")
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
} else if err != nil {
|
|
|
|
return 0, alreadyProcessed, ErrProcessOrderWithWindowTx.Wrap(err)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
return batchStatus, alreadyProcessed, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SettledAmountsMatch checks if database rows match the orders. If the settled amount for
|
|
|
|
// each action are not the same then false is returned.
|
|
|
|
func SettledAmountsMatch(rows []*dbx.StoragenodeBandwidthRollup, orderActionAmounts map[int32]int64) bool {
|
2020-11-04 17:24:11 +00:00
|
|
|
rowsSumByAction := map[int32]int64{}
|
2020-06-11 19:31:45 +01:00
|
|
|
for _, row := range rows {
|
|
|
|
rowsSumByAction[int32(row.Action)] += int64(row.Settled)
|
|
|
|
}
|
|
|
|
|
|
|
|
return reflect.DeepEqual(rowsSumByAction, orderActionAmounts)
|
|
|
|
}
|