2019-07-22 14:34:12 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-02-25 14:16:44 +00:00
|
|
|
"fmt"
|
2019-07-22 14:34:12 +01:00
|
|
|
"time"
|
|
|
|
|
2020-02-25 14:16:44 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2019-07-22 14:34:12 +01:00
|
|
|
"github.com/zeebo/errs"
|
2019-12-19 18:33:59 +00:00
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
"storj.io/common/storj"
|
2020-12-10 15:09:44 +00:00
|
|
|
"storj.io/common/uuid"
|
2020-08-31 11:14:20 +01:00
|
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
2019-07-22 14:34:12 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// LoopError is a standard error class for this component.
|
|
|
|
LoopError = errs.Class("metainfo loop error")
|
2019-11-21 20:24:17 +00:00
|
|
|
// LoopClosedError is a loop closed error.
|
2019-07-22 14:34:12 +01:00
|
|
|
LoopClosedError = LoopError.New("loop closed")
|
|
|
|
)
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
// Object is the object info passed to Observer by metainfo loop.
|
|
|
|
type Object struct {
|
|
|
|
Location metabase.ObjectLocation // tally
|
2020-12-18 12:27:45 +00:00
|
|
|
StreamID uuid.UUID // metrics, repair
|
2020-10-27 06:59:14 +00:00
|
|
|
SegmentCount int // metrics
|
2020-12-17 13:04:42 +00:00
|
|
|
MetadataSize int // tally
|
2020-10-27 06:59:14 +00:00
|
|
|
expirationDate time.Time // tally
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expired checks if object is expired relative to now.
|
|
|
|
func (object *Object) Expired(now time.Time) bool {
|
|
|
|
return !object.expirationDate.IsZero() && object.expirationDate.Before(now)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Segment is the segment info passed to Observer by metainfo loop.
|
|
|
|
type Segment struct {
|
2020-12-11 14:12:42 +00:00
|
|
|
Location metabase.SegmentLocation // tally, repair, graceful exit, audit
|
2020-12-14 11:04:38 +00:00
|
|
|
StreamID uuid.UUID // audit
|
2020-12-11 14:12:42 +00:00
|
|
|
DataSize int // tally, graceful exit
|
|
|
|
Inline bool // metrics
|
|
|
|
Redundancy storj.RedundancyScheme // tally, graceful exit, repair
|
|
|
|
RootPieceID storj.PieceID // gc, graceful exit
|
|
|
|
Pieces metabase.Pieces // tally, audit, gc, graceful exit, repair
|
|
|
|
CreationDate time.Time // repair
|
2020-12-14 12:54:22 +00:00
|
|
|
ExpirationDate time.Time // tally, repair
|
2020-12-11 14:12:42 +00:00
|
|
|
LastRepaired time.Time // repair
|
2020-10-27 06:59:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Expired checks if segment is expired relative to now.
|
|
|
|
func (segment *Segment) Expired(now time.Time) bool {
|
2020-12-14 12:54:22 +00:00
|
|
|
return !segment.ExpirationDate.IsZero() && segment.ExpirationDate.Before(now)
|
2020-10-27 06:59:14 +00:00
|
|
|
}
|
|
|
|
|
2019-07-22 14:34:12 +01:00
|
|
|
// Observer is an interface defining an observer that can subscribe to the metainfo loop.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Observer
|
2019-07-22 14:34:12 +01:00
|
|
|
type Observer interface {
|
2020-10-27 06:59:14 +00:00
|
|
|
Object(context.Context, *Object) error
|
|
|
|
RemoteSegment(context.Context, *Segment) error
|
|
|
|
InlineSegment(context.Context, *Segment) error
|
2019-09-12 11:38:49 +01:00
|
|
|
}
|
|
|
|
|
2020-02-13 11:01:39 +00:00
|
|
|
// NullObserver is an observer that does nothing. This is useful for joining
|
2020-07-16 15:18:02 +01:00
|
|
|
// and ensuring the metainfo loop runs once before you use a real observer.
|
2020-02-13 11:01:39 +00:00
|
|
|
type NullObserver struct{}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Object implements the Observer interface.
|
2020-10-27 06:59:14 +00:00
|
|
|
func (NullObserver) Object(context.Context, *Object) error {
|
2020-02-13 11:01:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// RemoteSegment implements the Observer interface.
|
2020-10-27 06:59:14 +00:00
|
|
|
func (NullObserver) RemoteSegment(context.Context, *Segment) error {
|
2020-02-13 11:01:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// InlineSegment implements the Observer interface.
|
2020-10-27 06:59:14 +00:00
|
|
|
func (NullObserver) InlineSegment(context.Context, *Segment) error {
|
2020-02-13 11:01:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-22 14:34:12 +01:00
|
|
|
type observerContext struct {
|
2020-02-25 14:16:44 +00:00
|
|
|
observer Observer
|
|
|
|
|
2019-07-22 14:34:12 +01:00
|
|
|
ctx context.Context
|
|
|
|
done chan error
|
2020-02-25 14:16:44 +00:00
|
|
|
|
|
|
|
object *monkit.DurationDist
|
|
|
|
remote *monkit.DurationDist
|
|
|
|
inline *monkit.DurationDist
|
|
|
|
}
|
|
|
|
|
|
|
|
func newObserverContext(ctx context.Context, obs Observer) *observerContext {
|
|
|
|
name := fmt.Sprintf("%T", obs)
|
|
|
|
key := monkit.NewSeriesKey("observer").WithTag("name", name)
|
|
|
|
|
|
|
|
return &observerContext{
|
|
|
|
observer: obs,
|
|
|
|
|
|
|
|
ctx: ctx,
|
|
|
|
done: make(chan error),
|
|
|
|
|
|
|
|
object: monkit.NewDurationDist(key.WithTag("pointer_type", "object")),
|
|
|
|
inline: monkit.NewDurationDist(key.WithTag("pointer_type", "inline")),
|
|
|
|
remote: monkit.NewDurationDist(key.WithTag("pointer_type", "remote")),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
func (observer *observerContext) Object(ctx context.Context, object *Object) error {
|
2020-02-25 14:16:44 +00:00
|
|
|
start := time.Now()
|
|
|
|
defer func() { observer.object.Insert(time.Since(start)) }()
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
return observer.observer.Object(ctx, object)
|
2020-02-25 14:16:44 +00:00
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
func (observer *observerContext) RemoteSegment(ctx context.Context, segment *Segment) error {
|
2020-02-25 14:16:44 +00:00
|
|
|
start := time.Now()
|
|
|
|
defer func() { observer.remote.Insert(time.Since(start)) }()
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
return observer.observer.RemoteSegment(ctx, segment)
|
2020-02-25 14:16:44 +00:00
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
func (observer *observerContext) InlineSegment(ctx context.Context, segment *Segment) error {
|
2020-02-25 14:16:44 +00:00
|
|
|
start := time.Now()
|
|
|
|
defer func() { observer.inline.Insert(time.Since(start)) }()
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
return observer.observer.InlineSegment(ctx, segment)
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (observer *observerContext) HandleError(err error) bool {
|
|
|
|
if err != nil {
|
|
|
|
observer.done <- err
|
|
|
|
observer.Finish()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (observer *observerContext) Finish() {
|
|
|
|
close(observer.done)
|
2020-02-25 14:16:44 +00:00
|
|
|
|
|
|
|
name := fmt.Sprintf("%T", observer.observer)
|
|
|
|
stats := allObserverStatsCollectors.GetStats(name)
|
|
|
|
stats.Observe(observer)
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (observer *observerContext) Wait() error {
|
|
|
|
return <-observer.done
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoopConfig contains configurable values for the metainfo loop.
|
|
|
|
type LoopConfig struct {
|
|
|
|
CoalesceDuration time.Duration `help:"how long to wait for new observers before starting iteration" releaseDefault:"5s" devDefault:"5s"`
|
2020-05-05 07:51:24 +01:00
|
|
|
RateLimit float64 `help:"rate limit (default is 0 which is unlimited segments per second)" default:"0"`
|
2020-09-01 17:03:31 +01:00
|
|
|
ListLimit int `help:"how many items to query in a batch" default:"2500"`
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Loop is a metainfo loop service.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Service
|
2019-07-22 14:34:12 +01:00
|
|
|
type Loop struct {
|
2020-12-09 08:50:04 +00:00
|
|
|
config LoopConfig
|
|
|
|
db PointerDB
|
|
|
|
bucketsDB BucketsDB
|
|
|
|
metabaseDB MetabaseDB
|
|
|
|
join chan []*observerContext
|
|
|
|
done chan struct{}
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewLoop creates a new metainfo loop service.
|
2020-12-09 08:50:04 +00:00
|
|
|
func NewLoop(config LoopConfig, db PointerDB, bucketsDB BucketsDB, metabaseDB MetabaseDB) *Loop {
|
2019-07-22 14:34:12 +01:00
|
|
|
return &Loop{
|
2020-12-10 15:09:44 +00:00
|
|
|
db: db,
|
|
|
|
bucketsDB: bucketsDB,
|
|
|
|
metabaseDB: metabaseDB,
|
|
|
|
config: config,
|
|
|
|
join: make(chan []*observerContext),
|
|
|
|
done: make(chan struct{}),
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Join will join the looper for one full cycle until completion and then returns.
|
|
|
|
// On ctx cancel the observer will return without completely finishing.
|
|
|
|
// Only on full complete iteration it will return nil.
|
|
|
|
// Safe to be called concurrently.
|
2020-04-16 08:30:20 +01:00
|
|
|
func (loop *Loop) Join(ctx context.Context, observers ...Observer) (err error) {
|
2019-07-22 14:34:12 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-04-16 08:30:20 +01:00
|
|
|
obsContexts := make([]*observerContext, len(observers))
|
|
|
|
for i, obs := range observers {
|
|
|
|
obsContexts[i] = newObserverContext(ctx, obs)
|
|
|
|
}
|
2019-07-22 14:34:12 +01:00
|
|
|
|
|
|
|
select {
|
2020-04-16 08:30:20 +01:00
|
|
|
case loop.join <- obsContexts:
|
2019-07-22 14:34:12 +01:00
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-loop.done:
|
|
|
|
return LoopClosedError
|
|
|
|
}
|
|
|
|
|
2020-04-16 08:30:20 +01:00
|
|
|
var errList errs.Group
|
|
|
|
for _, ctx := range obsContexts {
|
|
|
|
errList.Add(ctx.Wait())
|
|
|
|
}
|
|
|
|
|
|
|
|
return errList.Err()
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run starts the looping service.
|
|
|
|
// It can only be called once, otherwise a panic will occur.
|
|
|
|
func (loop *Loop) Run(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
for {
|
|
|
|
err := loop.runOnce(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 20:14:39 +01:00
|
|
|
// Close closes the looping services.
|
|
|
|
func (loop *Loop) Close() (err error) {
|
|
|
|
close(loop.done)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-22 14:34:12 +01:00
|
|
|
// runOnce goes through metainfo one time and sends information to observers.
|
|
|
|
func (loop *Loop) runOnce(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var observers []*observerContext
|
|
|
|
|
|
|
|
// wait for the first observer, or exit because context is canceled
|
|
|
|
select {
|
2020-04-16 08:30:20 +01:00
|
|
|
case list := <-loop.join:
|
|
|
|
observers = append(observers, list...)
|
2019-07-22 14:34:12 +01:00
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// after the first observer is found, set timer for CoalesceDuration and add any observers that try to join before the timer is up
|
|
|
|
timer := time.NewTimer(loop.config.CoalesceDuration)
|
|
|
|
waitformore:
|
|
|
|
for {
|
|
|
|
select {
|
2020-04-16 08:30:20 +01:00
|
|
|
case list := <-loop.join:
|
|
|
|
observers = append(observers, list...)
|
2019-07-22 14:34:12 +01:00
|
|
|
case <-timer.C:
|
|
|
|
break waitformore
|
|
|
|
case <-ctx.Done():
|
2019-11-18 15:26:48 +00:00
|
|
|
finishObservers(observers)
|
2019-07-22 14:34:12 +01:00
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
2020-12-09 08:50:04 +00:00
|
|
|
return iterateDatabase(ctx, loop.db, loop.bucketsDB, loop.metabaseDB, observers, loop.config.ListLimit, rate.NewLimiter(rate.Limit(loop.config.RateLimit), 1))
|
2019-11-18 15:26:48 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 20:24:17 +00:00
|
|
|
// IterateDatabase iterates over PointerDB and notifies specified observers about results.
|
2020-05-05 07:51:24 +01:00
|
|
|
//
|
|
|
|
// It uses 10000 as the lookup limit for iterating.
|
2020-12-09 08:50:04 +00:00
|
|
|
func IterateDatabase(ctx context.Context, rateLimit float64, db PointerDB, bucketsDB BucketsDB, metabaseDB MetabaseDB, observers ...Observer) error {
|
2019-11-18 15:26:48 +00:00
|
|
|
obsContexts := make([]*observerContext, len(observers))
|
|
|
|
for i, observer := range observers {
|
2020-02-25 14:16:44 +00:00
|
|
|
obsContexts[i] = newObserverContext(ctx, observer)
|
2019-11-18 15:26:48 +00:00
|
|
|
}
|
2020-12-09 08:50:04 +00:00
|
|
|
return iterateDatabase(ctx, db, bucketsDB, metabaseDB, obsContexts, 10000, rate.NewLimiter(rate.Limit(rateLimit), 1))
|
2019-11-18 15:26:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait waits for run to be finished.
|
|
|
|
// Safe to be called concurrently.
|
|
|
|
func (loop *Loop) Wait() {
|
|
|
|
<-loop.done
|
|
|
|
}
|
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
func iterateDatabase(ctx context.Context, db PointerDB, bucketsDB BucketsDB, metabaseDB MetabaseDB, observers []*observerContext, limit int, rateLimiter *rate.Limiter) (err error) {
|
2019-11-18 15:26:48 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
for _, observer := range observers {
|
|
|
|
observer.HandleError(err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2020-12-15 21:44:57 +00:00
|
|
|
finishObservers(observers)
|
2019-11-18 15:26:48 +00:00
|
|
|
}()
|
|
|
|
|
2021-02-18 11:37:49 +00:00
|
|
|
observers, err = iterateObjects(ctx, metabaseDB, observers, limit, rateLimiter)
|
|
|
|
if err != nil {
|
|
|
|
return LoopError.Wrap(err)
|
2020-12-10 15:09:44 +00:00
|
|
|
}
|
2021-02-18 11:37:49 +00:00
|
|
|
|
2020-12-10 15:09:44 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-02-18 11:37:49 +00:00
|
|
|
func iterateObjects(ctx context.Context, metabaseDB MetabaseDB, observers []*observerContext, limit int, rateLimiter *rate.Limiter) (_ []*observerContext, err error) {
|
2020-12-10 15:09:44 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
// TODO we should improve performance here, this is just most straightforward solution
|
2021-02-18 11:37:49 +00:00
|
|
|
err = metabaseDB.FullIterateObjects(ctx, metabase.FullIterateObjects{
|
|
|
|
BatchSize: limit,
|
|
|
|
}, func(ctx context.Context, it metabase.FullObjectsIterator) error {
|
|
|
|
var entry metabase.FullObjectEntry
|
2020-12-10 15:09:44 +00:00
|
|
|
for it.Next(ctx, &entry) {
|
2020-05-05 07:51:24 +01:00
|
|
|
if err := rateLimiter.Wait(ctx); err != nil {
|
|
|
|
// We don't really execute concurrent batches so we should never
|
|
|
|
// exceed the burst size of 1 and this should never happen.
|
|
|
|
// We can also enter here if the context is cancelled.
|
2020-12-10 15:09:44 +00:00
|
|
|
return err
|
2020-05-05 07:51:24 +01:00
|
|
|
}
|
2019-12-19 18:33:59 +00:00
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
nextObservers := observers[:0]
|
2020-12-10 15:09:44 +00:00
|
|
|
for _, observer := range observers {
|
2021-02-18 11:37:49 +00:00
|
|
|
keepObserver := handleObject(ctx, observer, entry)
|
2020-12-15 21:44:57 +00:00
|
|
|
if keepObserver {
|
|
|
|
nextObservers = append(nextObservers, observer)
|
2020-12-10 15:09:44 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-22 14:34:12 +01:00
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
observers = nextObservers
|
2020-12-10 15:09:44 +00:00
|
|
|
if len(observers) == 0 {
|
|
|
|
return nil
|
2020-05-05 07:51:24 +01:00
|
|
|
}
|
2019-07-22 14:34:12 +01:00
|
|
|
|
2020-12-10 15:09:44 +00:00
|
|
|
// if context has been canceled exit. Otherwise, continue
|
2020-12-15 21:44:57 +00:00
|
|
|
if err := ctx.Err(); err != nil {
|
2020-12-10 15:09:44 +00:00
|
|
|
return err
|
2020-05-05 07:51:24 +01:00
|
|
|
}
|
2020-12-10 15:09:44 +00:00
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
more := true
|
|
|
|
cursor := metabase.SegmentPosition{}
|
|
|
|
for more {
|
|
|
|
if err := rateLimiter.Wait(ctx); err != nil {
|
|
|
|
// We don't really execute concurrent batches so we should never
|
|
|
|
// exceed the burst size of 1 and this should never happen.
|
|
|
|
// We can also enter here if the context is cancelled.
|
|
|
|
return err
|
2020-12-10 15:09:44 +00:00
|
|
|
}
|
2020-12-15 21:44:57 +00:00
|
|
|
|
|
|
|
segments, err := metabaseDB.ListSegments(ctx, metabase.ListSegments{
|
|
|
|
StreamID: entry.StreamID,
|
|
|
|
Cursor: cursor,
|
|
|
|
Limit: limit,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
for _, segment := range segments.Segments {
|
|
|
|
nextObservers := observers[:0]
|
|
|
|
location := metabase.SegmentLocation{
|
2021-02-18 11:37:49 +00:00
|
|
|
ProjectID: entry.ProjectID,
|
|
|
|
BucketName: entry.BucketName,
|
2020-12-15 21:44:57 +00:00
|
|
|
ObjectKey: entry.ObjectKey,
|
|
|
|
Position: segment.Position,
|
|
|
|
}
|
|
|
|
for _, observer := range observers {
|
|
|
|
keepObserver := handleSegment(ctx, observer, location, segment, entry.ExpiresAt)
|
|
|
|
if keepObserver {
|
|
|
|
nextObservers = append(nextObservers, observer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
observers = nextObservers
|
|
|
|
if len(observers) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// if context has been canceled exit. Otherwise, continue
|
|
|
|
if err := ctx.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-07-22 14:34:12 +01:00
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
more = segments.More
|
|
|
|
if more {
|
|
|
|
lastSegment := segments.Segments[len(segments.Segments)-1]
|
|
|
|
cursor = lastSegment.Position
|
|
|
|
}
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
2020-05-05 07:51:24 +01:00
|
|
|
}
|
2020-12-15 21:44:57 +00:00
|
|
|
return nil
|
|
|
|
})
|
2020-12-10 15:09:44 +00:00
|
|
|
|
2020-12-15 21:44:57 +00:00
|
|
|
return observers, err
|
2020-12-10 15:09:44 +00:00
|
|
|
}
|
|
|
|
|
2021-02-18 11:37:49 +00:00
|
|
|
func handleObject(ctx context.Context, observer *observerContext, object metabase.FullObjectEntry) bool {
|
2020-12-10 15:09:44 +00:00
|
|
|
expirationDate := time.Time{}
|
|
|
|
if object.ExpiresAt != nil {
|
|
|
|
expirationDate = *object.ExpiresAt
|
|
|
|
}
|
|
|
|
|
|
|
|
if observer.HandleError(observer.Object(ctx, &Object{
|
2021-02-18 11:37:49 +00:00
|
|
|
Location: object.Location(),
|
2020-12-18 12:27:45 +00:00
|
|
|
StreamID: object.StreamID,
|
2020-12-10 15:09:44 +00:00
|
|
|
SegmentCount: int(object.SegmentCount),
|
2020-12-17 13:04:42 +00:00
|
|
|
MetadataSize: len(object.EncryptedMetadata),
|
2020-12-10 15:09:44 +00:00
|
|
|
expirationDate: expirationDate,
|
|
|
|
})) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-observer.ctx.Done():
|
|
|
|
observer.HandleError(observer.ctx.Err())
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-12-10 20:49:23 +00:00
|
|
|
func handleSegment(ctx context.Context, observer *observerContext, location metabase.SegmentLocation, segment metabase.Segment, expiresAt *time.Time) bool {
|
2020-12-10 15:09:44 +00:00
|
|
|
loopSegment := &Segment{
|
|
|
|
Location: location,
|
2020-12-10 20:49:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if expiresAt != nil {
|
2020-12-14 12:54:22 +00:00
|
|
|
loopSegment.ExpirationDate = *expiresAt
|
2020-12-10 15:09:44 +00:00
|
|
|
}
|
|
|
|
|
2020-12-14 11:04:38 +00:00
|
|
|
loopSegment.StreamID = segment.StreamID
|
2020-12-15 21:44:57 +00:00
|
|
|
loopSegment.DataSize = int(segment.EncryptedSize)
|
2020-12-10 15:09:44 +00:00
|
|
|
if segment.Inline() {
|
|
|
|
loopSegment.Inline = true
|
|
|
|
if observer.HandleError(observer.InlineSegment(ctx, loopSegment)) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
loopSegment.RootPieceID = segment.RootPieceID
|
|
|
|
loopSegment.Redundancy = segment.Redundancy
|
|
|
|
loopSegment.Pieces = segment.Pieces
|
|
|
|
if observer.HandleError(observer.RemoteSegment(ctx, loopSegment)) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-observer.ctx.Done():
|
|
|
|
observer.HandleError(observer.ctx.Err())
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
|
2019-11-18 15:26:48 +00:00
|
|
|
func finishObservers(observers []*observerContext) {
|
|
|
|
for _, observer := range observers {
|
|
|
|
observer.Finish()
|
2019-07-22 14:34:12 +01:00
|
|
|
}
|
|
|
|
}
|