2020-01-15 21:45:17 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package reportedrollup
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2020-01-15 21:45:17 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2020-02-14 00:03:41 +00:00
|
|
|
"storj.io/common/pb"
|
2020-05-29 09:52:10 +01:00
|
|
|
"storj.io/common/storj"
|
2020-01-15 21:45:17 +00:00
|
|
|
"storj.io/common/sync2"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2020-08-28 12:56:09 +01:00
|
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
2020-01-15 21:45:17 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
|
2020-08-11 15:50:01 +01:00
|
|
|
// Error is the error class for this package.
|
2020-01-15 21:45:17 +00:00
|
|
|
Error = errs.Class("reportedrollup")
|
|
|
|
)
|
|
|
|
|
2020-01-16 18:02:15 +00:00
|
|
|
// Config is a configuration struct for the Chore.
|
2020-01-15 21:45:17 +00:00
|
|
|
type Config struct {
|
2020-05-30 17:02:01 +01:00
|
|
|
Interval time.Duration `help:"how often to flush the reported serial rollups to the database" default:"5m"`
|
|
|
|
QueueBatchSize int `help:"default queue batch size" default:"10000"`
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Chore for flushing reported serials to the database as rollups.
|
|
|
|
//
|
|
|
|
// architecture: Chore
|
|
|
|
type Chore struct {
|
2020-05-30 17:02:01 +01:00
|
|
|
log *zap.Logger
|
|
|
|
db orders.DB
|
|
|
|
config Config
|
|
|
|
|
2020-01-15 21:45:17 +00:00
|
|
|
Loop *sync2.Cycle
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewChore creates new chore for flushing the reported serials to the database as rollups.
|
|
|
|
func NewChore(log *zap.Logger, db orders.DB, config Config) *Chore {
|
2020-05-30 17:02:01 +01:00
|
|
|
if config.QueueBatchSize == 0 {
|
|
|
|
config.QueueBatchSize = 10000
|
|
|
|
}
|
|
|
|
|
2020-01-15 21:45:17 +00:00
|
|
|
return &Chore{
|
2020-05-30 17:02:01 +01:00
|
|
|
log: log,
|
|
|
|
db: db,
|
|
|
|
config: config,
|
|
|
|
|
2020-01-15 21:45:17 +00:00
|
|
|
Loop: sync2.NewCycle(config.Interval),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run starts the reported rollups chore.
|
|
|
|
func (chore *Chore) Run(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return chore.Loop.Run(ctx, func(ctx context.Context) error {
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
err := chore.runOnceNow(ctx, time.Now)
|
2020-01-15 21:45:17 +00:00
|
|
|
if err != nil {
|
|
|
|
chore.log.Error("error flushing reported rollups", zap.Error(err))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close stops the reported rollups chore.
|
|
|
|
func (chore *Chore) Close() error {
|
|
|
|
chore.Loop.Close()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RunOnce finds expired bandwidth as of 'now' and inserts rollups into the appropriate tables.
|
|
|
|
func (chore *Chore) RunOnce(ctx context.Context, now time.Time) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
return chore.runOnceNow(ctx, func() time.Time { return now })
|
|
|
|
}
|
|
|
|
|
|
|
|
// runOnceNow runs the helper repeatedly, calling the nowFn each time it runs it. It does that
|
|
|
|
// until the helper returns that it is done or an error occurs.
|
|
|
|
//
|
|
|
|
// This function exists because tests want to use RunOnce and have a single fixed time for
|
|
|
|
// reproducibility, but the chore loop wants to use whatever time.Now is every time the helper
|
|
|
|
// is run.
|
|
|
|
func (chore *Chore) runOnceNow(ctx context.Context, nowFn func() time.Time) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-02-14 00:03:41 +00:00
|
|
|
for {
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
done, err := chore.runOnceHelper(ctx, nowFn())
|
2020-02-14 00:03:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
if done {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
func (chore *Chore) readWork(ctx context.Context, queue orders.Queue) (
|
2020-02-14 00:03:41 +00:00
|
|
|
bucketRollups []orders.BucketBandwidthRollup,
|
|
|
|
storagenodeRollups []orders.StoragenodeBandwidthRollup,
|
|
|
|
consumedSerials []orders.ConsumedSerial,
|
|
|
|
done bool, err error,
|
|
|
|
) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
// Variables and types to keep track of bucket bandwidth rollups
|
|
|
|
type bucketKey struct {
|
|
|
|
projectID uuid.UUID
|
|
|
|
bucketName string
|
|
|
|
action pb.PieceAction
|
|
|
|
}
|
|
|
|
byBucket := make(map[bucketKey]uint64)
|
|
|
|
|
|
|
|
// Variables and types to keep track of storagenode bandwidth rollups
|
|
|
|
type storagenodeKey struct {
|
|
|
|
nodeID storj.NodeID
|
|
|
|
action pb.PieceAction
|
|
|
|
}
|
|
|
|
byStoragenode := make(map[storagenodeKey]uint64)
|
|
|
|
|
|
|
|
// Variables to keep track of which serial numbers were consumed
|
|
|
|
type consumedSerialKey struct {
|
|
|
|
nodeID storj.NodeID
|
|
|
|
serialNumber storj.SerialNumber
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|
2020-02-14 00:03:41 +00:00
|
|
|
seenConsumedSerials := make(map[consumedSerialKey]struct{})
|
2020-01-15 21:45:17 +00:00
|
|
|
|
2020-05-30 17:02:01 +01:00
|
|
|
// Get a batch of pending serials from the queue.
|
|
|
|
pendingSerials, queueDone, err := queue.GetPendingSerialsBatch(ctx, chore.config.QueueBatchSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, false, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, row := range pendingSerials {
|
|
|
|
row := row
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2020-05-30 17:02:01 +01:00
|
|
|
// If we have seen this serial inside of this function already, don't
|
|
|
|
// count it again and record it now.
|
|
|
|
key := consumedSerialKey{
|
|
|
|
nodeID: row.NodeID,
|
|
|
|
serialNumber: row.SerialNumber,
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|
2020-05-30 17:02:01 +01:00
|
|
|
if _, exists := seenConsumedSerials[key]; exists {
|
|
|
|
continue
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|
2020-05-30 17:02:01 +01:00
|
|
|
seenConsumedSerials[key] = struct{}{}
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2020-05-30 17:02:01 +01:00
|
|
|
// Parse the node id, project id, and bucket name from the reported serial.
|
2020-08-28 12:56:09 +01:00
|
|
|
bucket, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(row.BucketID)) // TODO: rename row.BucketID -> row.BucketPrefix
|
2020-05-30 17:02:01 +01:00
|
|
|
if err != nil {
|
|
|
|
chore.log.Error("bad row inserted into reported serials",
|
|
|
|
zap.Binary("bucket_id", row.BucketID),
|
|
|
|
zap.String("node_id", row.NodeID.String()),
|
|
|
|
zap.String("serial_number", row.SerialNumber.String()))
|
|
|
|
continue
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|
2020-05-30 17:02:01 +01:00
|
|
|
action := pb.PieceAction(row.Action)
|
|
|
|
settled := row.Settled
|
|
|
|
|
|
|
|
// Update our batch state to include it.
|
|
|
|
byBucket[bucketKey{
|
2020-08-28 12:56:09 +01:00
|
|
|
projectID: bucket.ProjectID,
|
|
|
|
bucketName: bucket.BucketName,
|
2020-05-30 17:02:01 +01:00
|
|
|
action: action,
|
|
|
|
}] += settled
|
|
|
|
|
|
|
|
byStoragenode[storagenodeKey{
|
|
|
|
nodeID: row.NodeID,
|
|
|
|
action: action,
|
|
|
|
}] += settled
|
|
|
|
|
|
|
|
consumedSerials = append(consumedSerials, orders.ConsumedSerial{
|
|
|
|
NodeID: row.NodeID,
|
|
|
|
SerialNumber: row.SerialNumber,
|
|
|
|
ExpiresAt: row.ExpiresAt,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we didn't get a full batch, the queue must have run out. We should signal
|
|
|
|
// this fact to our caller so that they can stop looping.
|
|
|
|
if queueDone {
|
|
|
|
done = true
|
2020-02-14 00:03:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert bucket rollups into a slice.
|
|
|
|
for key, settled := range byBucket {
|
|
|
|
bucketRollups = append(bucketRollups, orders.BucketBandwidthRollup{
|
|
|
|
ProjectID: key.projectID,
|
|
|
|
BucketName: key.bucketName,
|
|
|
|
Action: key.action,
|
|
|
|
Settled: int64(settled),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert storagenode rollups into a slice.
|
|
|
|
for key, settled := range byStoragenode {
|
|
|
|
storagenodeRollups = append(storagenodeRollups, orders.StoragenodeBandwidthRollup{
|
|
|
|
NodeID: key.nodeID,
|
|
|
|
Action: key.action,
|
|
|
|
Settled: int64(settled),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
chore.log.Debug("Read work",
|
|
|
|
zap.Int("bucket_rollups", len(bucketRollups)),
|
|
|
|
zap.Int("storagenode_rollups", len(storagenodeRollups)),
|
|
|
|
zap.Int("consumed_serials", len(consumedSerials)),
|
2020-03-02 18:47:22 +00:00
|
|
|
zap.Bool("done", done),
|
2020-02-14 00:03:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return bucketRollups, storagenodeRollups, consumedSerials, done, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (chore *Chore) runOnceHelper(ctx context.Context, now time.Time) (done bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
err = chore.db.WithQueue(ctx, func(ctx context.Context, queue orders.Queue) error {
|
|
|
|
var (
|
|
|
|
bucketRollups []orders.BucketBandwidthRollup
|
|
|
|
storagenodeRollups []orders.StoragenodeBandwidthRollup
|
|
|
|
consumedSerials []orders.ConsumedSerial
|
|
|
|
)
|
|
|
|
|
2020-03-02 18:47:22 +00:00
|
|
|
// Read the work we should insert.
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
bucketRollups, storagenodeRollups, consumedSerials, done, err = chore.readWork(ctx, queue)
|
2020-02-14 00:03:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have work, write it all in its own transaction.
|
|
|
|
return errs.Wrap(chore.db.WithTransaction(ctx, func(ctx context.Context, tx orders.Transaction) error {
|
|
|
|
if err := tx.UpdateBucketBandwidthBatch(ctx, now, bucketRollups); err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
2020-11-12 19:01:55 +00:00
|
|
|
if err := tx.UpdateStoragenodeBandwidthBatchPhase2(ctx, now, storagenodeRollups); err != nil {
|
2020-02-14 00:03:41 +00:00
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
if err := tx.CreateConsumedSerialsBatch(ctx, consumedSerials); err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
})
|
|
|
|
return done, errs.Wrap(err)
|
2020-01-15 21:45:17 +00:00
|
|
|
}
|