storj/satellite/metainfo/piecedeletion/queue.go
Egon Elbre 3d6518081a satellite/metainfo/piecedeletion: add Combiner
To handle concurrent deletion requests we need to combine them into a
single request.

To implement this we introduces few concurrency ideas:

* Combiner, which takes a node id and a Job and handles combining
  multiple requests to a single batch.

* Job, which represents deleting of multiple piece ids with a
  notification mechanism to the caller.

* Queue, which provides communication from Combiner to Handler.
  It can limit the number of requests per work queue.

* Handler, which takes an active Queue and processes it until it has
  consumed all the jobs.
  It can provide limits to handling concurrency.

Change-Id: I3299325534abad4bae66969ffa16c6ed95d5574f
2020-03-16 17:13:26 +00:00

76 lines
1.8 KiB
Go

// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package piecedeletion
import "sync"
// LimitedJobs is a finalizable list of deletion jobs with a limit to how many
// jobs it can handle.
type LimitedJobs struct {
maxPiecesPerBatch int
mu sync.Mutex
// done indicates that no more items will be appended to the queue.
done bool
// count is the number of piece ids queued here.
count int
// list is the list of delete jobs.
list []Job
}
// NewLimitedJobs returns a new limited job queue.
func NewLimitedJobs(maxPiecesPerBatch int) *LimitedJobs {
return &LimitedJobs{
maxPiecesPerBatch: maxPiecesPerBatch,
}
}
// TryPush tries to add a job to the queue.
//
// maxPiecesPerBatch < 0, means no limit
func (jobs *LimitedJobs) TryPush(job Job) bool {
return jobs.tryPush(job, jobs.maxPiecesPerBatch)
}
// tryPush tries to add a job to the queue.
//
// maxPiecesPerBatch < 0, means no limit
func (jobs *LimitedJobs) tryPush(job Job, maxPiecesPerBatch int) bool {
jobs.mu.Lock()
defer jobs.mu.Unlock()
// check whether we have finished work with this jobs queue.
if jobs.done {
return false
}
// add to the queue, this can potentially overflow `maxPiecesPerBatch`,
// however splitting a single request and promise across multiple batches, is annoying.
jobs.count += len(job.Pieces)
// check whether the queue is at capacity
if maxPiecesPerBatch >= 0 && jobs.count >= maxPiecesPerBatch {
jobs.done = true
}
jobs.list = append(jobs.list, job)
return true
}
// PopAll returns all the jobs in this list.
func (jobs *LimitedJobs) PopAll() (_ []Job, ok bool) {
jobs.mu.Lock()
defer jobs.mu.Unlock()
// when we try to pop and the queue is empty, make the queue final.
if len(jobs.list) == 0 {
jobs.done = true
return nil, false
}
list := jobs.list
jobs.list = nil
return list, true
}