satellite/gracefulexit: observer cleanup
Some changes to make code cleaner and easier to adopt to new ranged loop. * removed unneeded mutex * reorganize constructor args * avoid creating the same redundancy scheme for each segment piece Change-Id: I81f3f6597147fc515516949db3ce796a60b1c8a0
This commit is contained in:
parent
cc858f4e91
commit
b2dc8211d6
@ -100,7 +100,7 @@ func (chore *Chore) Run(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Populate transfer queue for nodes that have not completed the exit loop yet
|
// Populate transfer queue for nodes that have not completed the exit loop yet
|
||||||
pathCollector := NewPathCollector(chore.db, exitingNodesLoopIncomplete, chore.log, chore.config.ChoreBatchSize)
|
pathCollector := NewPathCollector(chore.log, chore.db, exitingNodesLoopIncomplete, chore.config.ChoreBatchSize)
|
||||||
err = chore.segmentLoop.Join(ctx, pathCollector)
|
err = chore.segmentLoop.Join(ctx, pathCollector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
chore.log.Error("error joining segment loop.", zap.Error(err))
|
chore.log.Error("error joining segment loop.", zap.Error(err))
|
||||||
|
@ -5,7 +5,6 @@ package gracefulexit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -23,27 +22,25 @@ var _ segmentloop.Observer = (*PathCollector)(nil)
|
|||||||
//
|
//
|
||||||
// architecture: Observer
|
// architecture: Observer
|
||||||
type PathCollector struct {
|
type PathCollector struct {
|
||||||
db DB
|
|
||||||
nodeIDMutex sync.Mutex
|
|
||||||
nodeIDStorage map[storj.NodeID]int64
|
|
||||||
buffer []TransferQueueItem
|
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
db DB
|
||||||
|
buffer []TransferQueueItem
|
||||||
batchSize int
|
batchSize int
|
||||||
|
nodeIDStorage map[storj.NodeID]int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPathCollector instantiates a path collector.
|
// NewPathCollector instantiates a path collector.
|
||||||
func NewPathCollector(db DB, nodeIDs storj.NodeIDList, log *zap.Logger, batchSize int) *PathCollector {
|
func NewPathCollector(log *zap.Logger, db DB, exitingNodes storj.NodeIDList, batchSize int) *PathCollector {
|
||||||
buffer := make([]TransferQueueItem, 0, batchSize)
|
|
||||||
collector := &PathCollector{
|
collector := &PathCollector{
|
||||||
db: db,
|
log: log,
|
||||||
log: log,
|
db: db,
|
||||||
buffer: buffer,
|
buffer: make([]TransferQueueItem, 0, batchSize),
|
||||||
batchSize: batchSize,
|
batchSize: batchSize,
|
||||||
|
nodeIDStorage: make(map[storj.NodeID]int64, len(exitingNodes)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodeIDs) > 0 {
|
if len(exitingNodes) > 0 {
|
||||||
collector.nodeIDStorage = make(map[storj.NodeID]int64, len(nodeIDs))
|
for _, nodeID := range exitingNodes {
|
||||||
for _, nodeID := range nodeIDs {
|
|
||||||
collector.nodeIDStorage[nodeID] = 0
|
collector.nodeIDStorage[nodeID] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,19 +67,23 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, segment *segm
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
collector.nodeIDMutex.Lock()
|
pieceSize := int64(-1)
|
||||||
defer collector.nodeIDMutex.Unlock()
|
|
||||||
|
|
||||||
numPieces := len(segment.Pieces)
|
numPieces := len(segment.Pieces)
|
||||||
for _, piece := range segment.Pieces {
|
for _, piece := range segment.Pieces {
|
||||||
if _, ok := collector.nodeIDStorage[piece.StorageNode]; !ok {
|
if _, ok := collector.nodeIDStorage[piece.StorageNode]; !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
|
|
||||||
if err != nil {
|
// avoid creating new redundancy strategy for every segment piece
|
||||||
return err
|
if pieceSize == -1 {
|
||||||
|
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pieceSize = eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
|
||||||
}
|
}
|
||||||
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
|
|
||||||
collector.nodeIDStorage[piece.StorageNode] += pieceSize
|
collector.nodeIDStorage[piece.StorageNode] += pieceSize
|
||||||
|
|
||||||
item := TransferQueueItem{
|
item := TransferQueueItem{
|
||||||
|
Loading…
Reference in New Issue
Block a user