2019-03-27 10:24:35 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-04-01 21:14:58 +01:00
|
|
|
"database/sql"
|
2019-08-15 20:05:43 +01:00
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
2019-06-10 15:58:28 +01:00
|
|
|
"sort"
|
2019-08-15 20:05:43 +01:00
|
|
|
"strings"
|
2019-03-28 20:09:23 +00:00
|
|
|
"time"
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2019-06-10 15:58:28 +01:00
|
|
|
"github.com/lib/pq"
|
|
|
|
sqlite3 "github.com/mattn/go-sqlite3"
|
2019-06-25 16:58:42 +01:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
2019-08-15 20:05:43 +01:00
|
|
|
"github.com/zeebo/errs"
|
2019-06-10 15:58:28 +01:00
|
|
|
|
2019-04-04 15:42:01 +01:00
|
|
|
"storj.io/storj/internal/dbutil/pgutil"
|
|
|
|
"storj.io/storj/internal/dbutil/sqliteutil"
|
2019-03-27 10:24:35 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
2019-03-28 20:09:23 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2019-04-04 15:42:01 +01:00
|
|
|
"storj.io/storj/satellite/orders"
|
2019-03-27 10:24:35 +00:00
|
|
|
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
|
|
|
)
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
const defaultIntervalSeconds = int(time.Hour / time.Second)
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
type ordersDB struct {
|
|
|
|
db *dbx.DB
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// CreateSerialInfo creates serial number entry in database
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
_, err = db.db.Create_SerialNumber(
|
2019-03-28 20:09:23 +00:00
|
|
|
ctx,
|
|
|
|
dbx.SerialNumber_SerialNumber(serialNumber.Bytes()),
|
|
|
|
dbx.SerialNumber_BucketId(bucketID),
|
|
|
|
dbx.SerialNumber_ExpiresAt(limitExpiration),
|
|
|
|
)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-27 18:12:38 +01:00
|
|
|
// DeleteExpiredSerials deletes all expired serials in serial_number and used_serials table.
|
|
|
|
func (db *ordersDB) DeleteExpiredSerials(ctx context.Context, now time.Time) (_ int, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
count, err := db.db.Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx, dbx.SerialNumber_ExpiresAt(now))
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return int(count), nil
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// UseSerialNumber creates serial number entry in database
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) (_ []byte, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
|
|
|
`INSERT INTO used_serials (serial_number_id, storage_node_id)
|
|
|
|
SELECT id, ? FROM serial_numbers WHERE serial_number = ?`,
|
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement, storageNodeID.Bytes(), serialNumber.Bytes())
|
2019-04-01 21:14:58 +01:00
|
|
|
if err != nil {
|
2019-04-04 15:42:01 +01:00
|
|
|
if pgutil.IsConstraintError(err) || sqliteutil.IsConstraintError(err) {
|
|
|
|
return nil, orders.ErrUsingSerialNumber.New("serial number already used")
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil, err
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
dbxSerialNumber, err := db.db.Find_SerialNumber_By_SerialNumber(
|
|
|
|
ctx,
|
|
|
|
dbx.SerialNumber_SerialNumber(serialNumber.Bytes()),
|
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil, err
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
2019-04-04 15:42:01 +01:00
|
|
|
if dbxSerialNumber == nil {
|
|
|
|
return nil, orders.ErrUsingSerialNumber.New("serial number not found")
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return dbxSerialNumber.BucketId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthAllocation(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
2019-04-02 19:21:18 +01:00
|
|
|
`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
2019-04-01 21:14:58 +01:00
|
|
|
DO UPDATE SET allocated = bucket_bandwidth_rollups.allocated + ?`,
|
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
2019-06-21 16:38:37 +01:00
|
|
|
bucketName, projectID[:], intervalStart, defaultIntervalSeconds, action, 0, uint64(amount), 0, uint64(amount),
|
2019-04-02 19:21:18 +01:00
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthSettle(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
2019-04-02 19:21:18 +01:00
|
|
|
`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
2019-04-01 21:14:58 +01:00
|
|
|
DO UPDATE SET settled = bucket_bandwidth_rollups.settled + ?`,
|
2019-03-27 10:24:35 +00:00
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
2019-06-21 16:38:37 +01:00
|
|
|
bucketName, projectID[:], intervalStart, defaultIntervalSeconds, action, 0, 0, uint64(amount), uint64(amount),
|
2019-04-02 19:21:18 +01:00
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
2019-04-01 21:14:58 +01:00
|
|
|
return err
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) UpdateBucketBandwidthInline(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
2019-04-02 19:21:18 +01:00
|
|
|
`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
2019-04-01 21:14:58 +01:00
|
|
|
DO UPDATE SET inline = bucket_bandwidth_rollups.inline + ?`,
|
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
2019-06-21 16:38:37 +01:00
|
|
|
bucketName, projectID[:], intervalStart, defaultIntervalSeconds, action, uint64(amount), 0, 0, uint64(amount),
|
2019-04-02 19:21:18 +01:00
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// UpdateStoragenodeBandwidthAllocation updates 'allocated' bandwidth for given storage node
|
2019-06-10 15:58:28 +01:00
|
|
|
func (db *ordersDB) UpdateStoragenodeBandwidthAllocation(ctx context.Context, storageNodes []storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-06-10 15:58:28 +01:00
|
|
|
|
|
|
|
switch t := db.db.Driver().(type) {
|
|
|
|
case *sqlite3.SQLiteDriver:
|
|
|
|
statement := db.db.Rebind(
|
|
|
|
`INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT(storagenode_id, interval_start, action)
|
|
|
|
DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + excluded.allocated`,
|
|
|
|
)
|
|
|
|
for _, storageNode := range storageNodes {
|
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
|
|
|
storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case *pq.Driver:
|
|
|
|
// sort nodes to avoid update deadlock
|
|
|
|
sort.Sort(storj.NodeIDList(storageNodes))
|
|
|
|
|
|
|
|
_, err := db.db.ExecContext(ctx, `
|
|
|
|
INSERT INTO storagenode_bandwidth_rollups
|
|
|
|
(storagenode_id, interval_start, interval_seconds, action, allocated, settled)
|
|
|
|
SELECT unnest($1::bytea[]), $2, $3, $4, $5, $6
|
|
|
|
ON CONFLICT(storagenode_id, interval_start, action)
|
|
|
|
DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + excluded.allocated
|
|
|
|
`, postgresNodeIDList(storageNodes), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return Error.New("Unsupported database %t", t)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
2019-06-10 15:58:28 +01:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2019-04-04 16:20:59 +01:00
|
|
|
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node for the given intervalStart time
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := db.db.Rebind(
|
2019-04-02 19:21:18 +01:00
|
|
|
`INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, allocated, settled)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?)
|
2019-04-01 21:14:58 +01:00
|
|
|
ON CONFLICT(storagenode_id, interval_start, action)
|
|
|
|
DO UPDATE SET settled = storagenode_bandwidth_rollups.settled + ?`,
|
|
|
|
)
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, statement,
|
2019-04-02 19:21:18 +01:00
|
|
|
storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, 0, uint64(amount), uint64(amount),
|
|
|
|
)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketBandwidth gets total bucket bandwidth from period of time
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ordersDB) GetBucketBandwidth(ctx context.Context, projectID uuid.UUID, bucketName []byte, from, to time.Time) (_ int64, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
var sum *int64
|
2019-04-02 19:21:18 +01:00
|
|
|
query := `SELECT SUM(settled) FROM bucket_bandwidth_rollups WHERE bucket_name = ? AND project_id = ? AND interval_start > ? AND interval_start <= ?`
|
2019-06-21 16:38:37 +01:00
|
|
|
err = db.db.QueryRow(db.db.Rebind(query), bucketName, projectID[:], from, to).Scan(&sum)
|
2019-04-01 21:14:58 +01:00
|
|
|
if err == sql.ErrNoRows || sum == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return *sum, err
|
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from, to time.Time) (_ int64, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
var sum *int64
|
|
|
|
query := `SELECT SUM(settled) FROM storagenode_bandwidth_rollups WHERE storagenode_id = ? AND interval_start > ? AND interval_start <= ?`
|
2019-06-04 12:55:38 +01:00
|
|
|
err = db.db.QueryRow(db.db.Rebind(query), nodeID.Bytes(), from, to).Scan(&sum)
|
2019-04-01 21:14:58 +01:00
|
|
|
if err == sql.ErrNoRows || sum == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return *sum, err
|
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// UnuseSerialNumber removes pair serial number -> storage node id from database
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ordersDB) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-04-01 21:14:58 +01:00
|
|
|
statement := `DELETE FROM used_serials WHERE storage_node_id = ? AND
|
|
|
|
serial_number_id IN (SELECT id FROM serial_numbers WHERE serial_number = ?)`
|
2019-06-04 12:55:38 +01:00
|
|
|
_, err = db.db.ExecContext(ctx, db.db.Rebind(statement), storageNodeID.Bytes(), serialNumber.Bytes())
|
2019-04-01 21:14:58 +01:00
|
|
|
return err
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
2019-08-15 20:05:43 +01:00
|
|
|
|
|
|
|
// ProcessOrders take a list of order requests and "settles" them in one transaction
|
2019-08-19 14:36:11 +01:00
|
|
|
func (db *ordersDB) ProcessOrders(ctx context.Context, requests []*orders.ProcessOrderRequest) (responses []*orders.ProcessOrderResponse, err error) {
|
2019-08-15 20:05:43 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if len(requests) == 0 {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx, err := db.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
err = tx.Commit()
|
|
|
|
} else {
|
|
|
|
err = errs.Combine(err, tx.Rollback())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
now := time.Now().UTC()
|
|
|
|
intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
|
|
|
|
|
|
|
rejectedRequests := make(map[storj.SerialNumber]bool)
|
|
|
|
reject := func(serialNumber storj.SerialNumber) {
|
2019-08-19 14:36:11 +01:00
|
|
|
r := &orders.ProcessOrderResponse{
|
2019-08-15 20:05:43 +01:00
|
|
|
SerialNumber: serialNumber,
|
|
|
|
Status: pb.SettlementResponse_REJECTED,
|
|
|
|
}
|
|
|
|
rejectedRequests[serialNumber] = true
|
|
|
|
responses = append(responses, r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// processes the insert to used serials table individually so we can handle
|
|
|
|
// the case where the order has already been processed. Duplicates and previously
|
|
|
|
// processed orders are rejected
|
|
|
|
for _, request := range requests {
|
|
|
|
insert := "INSERT INTO used_serials (serial_number_id, storage_node_id) SELECT id, ? FROM serial_numbers WHERE serial_number = ?"
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
_, err = tx.Exec(db.db.Rebind(insert), request.OrderLimit.StorageNodeId.Bytes(), request.OrderLimit.SerialNumber.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
if pgutil.IsConstraintError(err) || sqliteutil.IsConstraintError(err) {
|
|
|
|
reject(request.OrderLimit.SerialNumber)
|
|
|
|
} else {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// call to get all the bucket IDs
|
|
|
|
query := db.buildGetBucketIdsQuery(len(requests))
|
|
|
|
statement := db.db.Rebind(query)
|
|
|
|
|
|
|
|
args := make([]interface{}, len(requests))
|
|
|
|
for i, request := range requests {
|
|
|
|
args[i] = request.OrderLimit.SerialNumber.Bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
rows, err := tx.Query(statement, args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
bucketMap := make(map[storj.SerialNumber][]byte)
|
|
|
|
for rows.Next() {
|
|
|
|
var serialNumber, bucketID []byte
|
|
|
|
err := rows.Scan(&serialNumber, &bucketID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
sn, err := storj.SerialNumberFromBytes(serialNumber)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
bucketMap[sn] = bucketID
|
|
|
|
}
|
|
|
|
|
|
|
|
// build all the bandwidth updates into one sql statement
|
|
|
|
var updateRollupStatement string
|
|
|
|
for _, request := range requests {
|
|
|
|
_, rejected := rejectedRequests[request.OrderLimit.SerialNumber]
|
|
|
|
if rejected {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bucketID, ok := bucketMap[request.OrderLimit.SerialNumber]
|
|
|
|
if !ok {
|
|
|
|
reject(request.OrderLimit.SerialNumber)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
projectID, bucketName, err := orders.SplitBucketID(bucketID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
stmt, err := db.buildUpdateBucketBandwidthRollupStatements(request.OrderLimit, request.Order, projectID[:], bucketName, intervalStart)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
updateRollupStatement += stmt
|
|
|
|
|
|
|
|
stmt, err = db.buildUpdateStorageNodeBandwidthRollupStatements(request.OrderLimit, request.Order, intervalStart)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
updateRollupStatement += stmt
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = tx.Exec(updateRollupStatement)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
for _, request := range requests {
|
|
|
|
_, rejected := rejectedRequests[request.OrderLimit.SerialNumber]
|
|
|
|
if !rejected {
|
2019-08-19 14:36:11 +01:00
|
|
|
r := &orders.ProcessOrderResponse{
|
2019-08-15 20:05:43 +01:00
|
|
|
SerialNumber: request.OrderLimit.SerialNumber,
|
|
|
|
Status: pb.SettlementResponse_ACCEPTED,
|
|
|
|
}
|
|
|
|
|
|
|
|
responses = append(responses, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return responses, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *ordersDB) buildGetBucketIdsQuery(argCount int) string {
|
|
|
|
args := make([]string, argCount)
|
|
|
|
for i := 0; i < argCount; i++ {
|
|
|
|
args[i] = "?"
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("SELECT serial_number, bucket_id FROM serial_numbers WHERE serial_number IN (%s);\n", strings.Join(args, ","))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *ordersDB) buildUpdateBucketBandwidthRollupStatements(orderLimit *pb.OrderLimit, order *pb.Order, projectID []byte, bucketName []byte, intervalStart time.Time) (string, error) {
|
|
|
|
hexName, err := db.toHex(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
hexProjectID, err := db.toHex(projectID)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return fmt.Sprintf(`INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
VALUES (%s, %s, '%s', %d, %d, %d, %d, %d)
|
|
|
|
ON CONFLICT(bucket_name, project_id, interval_start, action)
|
|
|
|
DO UPDATE SET settled = bucket_bandwidth_rollups.settled + %d;
|
|
|
|
`, hexName, hexProjectID, intervalStart.Format("2006-01-02 15:04:05+00:00"), defaultIntervalSeconds, orderLimit.Action, 0, 0, order.Amount, order.Amount), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *ordersDB) buildUpdateStorageNodeBandwidthRollupStatements(orderLimit *pb.OrderLimit, order *pb.Order, intervalStart time.Time) (string, error) {
|
|
|
|
hexNodeID, err := db.toHex(orderLimit.StorageNodeId.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf(`INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, allocated, settled)
|
|
|
|
VALUES (%s, '%s', %d, %d, %d, %d)
|
|
|
|
ON CONFLICT(storagenode_id, interval_start, action)
|
|
|
|
DO UPDATE SET settled = storagenode_bandwidth_rollups.settled + %d;
|
|
|
|
`, hexNodeID, intervalStart.Format("2006-01-02 15:04:05+00:00"), defaultIntervalSeconds, orderLimit.Action, 0, order.Amount, order.Amount), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *ordersDB) toHex(value []byte) (string, error) {
|
|
|
|
hexValue := hex.EncodeToString(value)
|
|
|
|
switch t := db.db.Driver().(type) {
|
|
|
|
case *sqlite3.SQLiteDriver:
|
|
|
|
return fmt.Sprintf("X'%v'", hexValue), nil
|
|
|
|
case *pq.Driver:
|
|
|
|
return fmt.Sprintf("decode('%v', 'hex')", hexValue), nil
|
|
|
|
default:
|
|
|
|
return "", errs.New("Unsupported DB type %q", t)
|
|
|
|
}
|
|
|
|
}
|