2019-10-07 21:38:05 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package gracefulexit
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-29 20:22:20 +00:00
|
|
|
"sync"
|
2019-10-07 21:38:05 +01:00
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/storj"
|
2021-03-23 12:14:38 +00:00
|
|
|
"storj.io/storj/satellite/metainfo/metaloop"
|
2020-02-21 14:07:29 +00:00
|
|
|
"storj.io/uplink/private/eestream"
|
2019-10-07 21:38:05 +01:00
|
|
|
)
|
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
var _ metaloop.Observer = (*PathCollector)(nil)
|
2019-10-07 21:38:05 +01:00
|
|
|
|
2020-12-05 16:01:42 +00:00
|
|
|
// PathCollector uses the metainfo loop to add paths to node reservoirs.
|
2019-10-07 21:38:05 +01:00
|
|
|
//
|
|
|
|
// architecture: Observer
|
|
|
|
type PathCollector struct {
|
2019-10-29 20:22:20 +00:00
|
|
|
db DB
|
|
|
|
nodeIDMutex sync.Mutex
|
|
|
|
nodeIDStorage map[storj.NodeID]int64
|
|
|
|
buffer []TransferQueueItem
|
|
|
|
log *zap.Logger
|
|
|
|
batchSize int
|
2019-10-07 21:38:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewPathCollector instantiates a path collector.
|
|
|
|
func NewPathCollector(db DB, nodeIDs storj.NodeIDList, log *zap.Logger, batchSize int) *PathCollector {
|
|
|
|
buffer := make([]TransferQueueItem, 0, batchSize)
|
|
|
|
collector := &PathCollector{
|
|
|
|
db: db,
|
|
|
|
log: log,
|
|
|
|
buffer: buffer,
|
|
|
|
batchSize: batchSize,
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(nodeIDs) > 0 {
|
2019-10-29 20:22:20 +00:00
|
|
|
collector.nodeIDStorage = make(map[storj.NodeID]int64, len(nodeIDs))
|
2019-10-07 21:38:05 +01:00
|
|
|
for _, nodeID := range nodeIDs {
|
2019-10-29 20:22:20 +00:00
|
|
|
collector.nodeIDStorage[nodeID] = 0
|
2019-10-07 21:38:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return collector
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush persists the current buffer items to the database.
|
|
|
|
func (collector *PathCollector) Flush(ctx context.Context) (err error) {
|
|
|
|
return collector.flush(ctx, 1)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// RemoteSegment takes a remote segment found in metainfo and creates a graceful exit transfer queue item if it doesn't exist already.
|
2021-03-23 12:14:38 +00:00
|
|
|
func (collector *PathCollector) RemoteSegment(ctx context.Context, segment *metaloop.Segment) (err error) {
|
2019-10-29 20:22:20 +00:00
|
|
|
if len(collector.nodeIDStorage) == 0 {
|
2019-10-07 21:38:05 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-29 20:22:20 +00:00
|
|
|
collector.nodeIDMutex.Lock()
|
|
|
|
defer collector.nodeIDMutex.Unlock()
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
numPieces := len(segment.Pieces)
|
|
|
|
key := segment.Location.Encode()
|
|
|
|
for _, piece := range segment.Pieces {
|
|
|
|
if _, ok := collector.nodeIDStorage[piece.StorageNode]; !ok {
|
2019-10-07 21:38:05 +01:00
|
|
|
continue
|
|
|
|
}
|
2020-10-27 06:59:14 +00:00
|
|
|
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
|
2019-10-29 20:22:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-03-02 12:58:23 +00:00
|
|
|
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
|
2020-10-27 06:59:14 +00:00
|
|
|
collector.nodeIDStorage[piece.StorageNode] += pieceSize
|
2019-10-07 21:38:05 +01:00
|
|
|
|
|
|
|
item := TransferQueueItem{
|
2020-10-27 06:59:14 +00:00
|
|
|
NodeID: piece.StorageNode,
|
2020-09-03 10:38:54 +01:00
|
|
|
Key: key,
|
2020-10-27 06:59:14 +00:00
|
|
|
PieceNum: int32(piece.Number),
|
|
|
|
RootPieceID: segment.RootPieceID,
|
|
|
|
DurabilityRatio: float64(numPieces) / float64(segment.Redundancy.TotalShares),
|
2019-10-07 21:38:05 +01:00
|
|
|
}
|
2019-11-26 17:04:48 +00:00
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
collector.log.Debug("adding piece to transfer queue.", zap.Stringer("Node ID", piece.StorageNode),
|
|
|
|
zap.ByteString("key", key), zap.Uint16("piece num", piece.Number),
|
|
|
|
zap.Int("num pieces", numPieces), zap.Int16("total possible pieces", segment.Redundancy.TotalShares))
|
2019-10-07 21:38:05 +01:00
|
|
|
|
|
|
|
collector.buffer = append(collector.buffer, item)
|
|
|
|
err = collector.flush(ctx, collector.batchSize)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Object returns nil because the audit service does not interact with objects.
|
2021-03-23 12:14:38 +00:00
|
|
|
func (collector *PathCollector) Object(ctx context.Context, object *metaloop.Object) (err error) {
|
2019-10-07 21:38:05 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// InlineSegment returns nil because we're only auditing for storage nodes for now.
|
2021-03-23 12:14:38 +00:00
|
|
|
func (collector *PathCollector) InlineSegment(ctx context.Context, segment *metaloop.Segment) (err error) {
|
2019-10-07 21:38:05 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (collector *PathCollector) flush(ctx context.Context, limit int) (err error) {
|
|
|
|
if len(collector.buffer) >= limit {
|
|
|
|
err = collector.db.Enqueue(ctx, collector.buffer)
|
|
|
|
collector.buffer = collector.buffer[:0]
|
|
|
|
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|