satellite/metainfo/piecedeletion: fix deadlock when waiting for threshold to reach
This PR fixes a deadlock that can happen when the number of piece deletion requests is different from the distinct node count from those requests. The success threshold should be based on the number of nodes instead of the amount of requests Change-Id: I83073a22eb1e111be1e27641cebcefecdc16afcb
This commit is contained in:
parent
4783c3e1d3
commit
7c7334e9d5
@ -173,11 +173,6 @@ func (service *Service) Delete(ctx context.Context, requests []Request, successT
|
||||
}
|
||||
defer service.concurrentRequests.Release(int64(totalPieceCount))
|
||||
|
||||
threshold, err := sync2.NewSuccessThreshold(len(requests), successThreshold)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Create a map for matching node information with the corresponding
|
||||
// request.
|
||||
nodesReqs := make(map[storj.NodeID]Request, len(requests))
|
||||
@ -209,6 +204,11 @@ func (service *Service) Delete(ctx context.Context, requests []Request, successT
|
||||
}
|
||||
}
|
||||
|
||||
threshold, err := sync2.NewSuccessThreshold(len(nodesReqs), successThreshold)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
for _, req := range nodesReqs {
|
||||
service.combiner.Enqueue(req.Node, Job{
|
||||
Pieces: req.Pieces,
|
||||
|
Loading…
Reference in New Issue
Block a user