3d6518081a
To handle concurrent deletion requests we need to combine them into a single request. To implement this we introduces few concurrency ideas: * Combiner, which takes a node id and a Job and handles combining multiple requests to a single batch. * Job, which represents deleting of multiple piece ids with a notification mechanism to the caller. * Queue, which provides communication from Combiner to Handler. It can limit the number of requests per work queue. * Handler, which takes an active Queue and processes it until it has consumed all the jobs. It can provide limits to handling concurrency. Change-Id: I3299325534abad4bae66969ffa16c6ed95d5574f
95 lines
1.8 KiB
Go
95 lines
1.8 KiB
Go
// Copyright (C) 2020 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package piecedeletion_test
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"storj.io/common/pb"
|
|
"storj.io/common/storj"
|
|
"storj.io/common/sync2"
|
|
"storj.io/common/testcontext"
|
|
"storj.io/common/testrand"
|
|
"storj.io/storj/satellite/metainfo/piecedeletion"
|
|
)
|
|
|
|
type CountHandler struct {
|
|
Count int64
|
|
}
|
|
|
|
func (handler *CountHandler) Handle(ctx context.Context, node *pb.Node, queue piecedeletion.Queue) {
|
|
for {
|
|
list, ok := queue.PopAll()
|
|
if !ok {
|
|
return
|
|
}
|
|
for _, job := range list {
|
|
atomic.AddInt64(&handler.Count, int64(len(job.Pieces)))
|
|
job.Resolve.Success()
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestCombiner(t *testing.T) {
|
|
ctx := testcontext.New(t)
|
|
defer ctx.Cleanup()
|
|
|
|
const (
|
|
activeLimit = 8
|
|
nodeCount = 70
|
|
requestCount = 100
|
|
parallelCount = 10
|
|
queueSize = 5
|
|
)
|
|
|
|
nodes := []*pb.Node{}
|
|
for i := 0; i < nodeCount; i++ {
|
|
nodes = append(nodes, &pb.Node{
|
|
Id: testrand.NodeID(),
|
|
})
|
|
}
|
|
|
|
counter := &CountHandler{}
|
|
limited := piecedeletion.NewLimitedHandler(counter, activeLimit)
|
|
newQueue := func() piecedeletion.Queue {
|
|
return piecedeletion.NewLimitedJobs(queueSize)
|
|
}
|
|
|
|
combiner := piecedeletion.NewCombiner(ctx, limited, newQueue)
|
|
|
|
var wg sync.WaitGroup
|
|
for i := 0; i < parallelCount; i++ {
|
|
wg.Add(1)
|
|
ctx.Go(func() error {
|
|
defer wg.Done()
|
|
|
|
pending, err := sync2.NewSuccessThreshold(requestCount, 0.999999)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for k := 0; k < requestCount; k++ {
|
|
node := nodes[testrand.Intn(len(nodes))]
|
|
|
|
combiner.Enqueue(node, piecedeletion.Job{
|
|
Pieces: []storj.PieceID{testrand.PieceID()},
|
|
Resolve: pending,
|
|
})
|
|
}
|
|
|
|
pending.Wait(ctx)
|
|
return nil
|
|
})
|
|
}
|
|
wg.Wait()
|
|
combiner.Close()
|
|
|
|
require.Equal(t, int(counter.Count), int(requestCount*parallelCount))
|
|
}
|