2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-10-02 20:46:29 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package checker
|
2018-10-03 19:35:56 +01:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-10-30 19:16:40 +00:00
|
|
|
"time"
|
2018-10-04 22:40:34 +01:00
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2019-01-23 19:58:44 +00:00
|
|
|
"github.com/zeebo/errs"
|
2018-10-04 22:40:34 +01:00
|
|
|
"go.uber.org/zap"
|
2019-05-31 15:12:49 +01:00
|
|
|
"golang.org/x/sync/errgroup"
|
2018-10-04 22:40:34 +01:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/errs2"
|
|
|
|
"storj.io/common/pb"
|
2020-11-20 22:20:03 +00:00
|
|
|
"storj.io/common/storj"
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/sync2"
|
2020-10-30 10:41:22 +00:00
|
|
|
"storj.io/storj/satellite/internalpb"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2021-04-22 10:07:18 +01:00
|
|
|
"storj.io/storj/satellite/metabase/metaloop"
|
2019-04-25 09:46:32 +01:00
|
|
|
"storj.io/storj/satellite/metainfo"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2020-10-21 23:02:54 +01:00
|
|
|
"storj.io/storj/satellite/repair"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/repair/irreparable"
|
|
|
|
"storj.io/storj/satellite/repair/queue"
|
2018-10-03 19:35:56 +01:00
|
|
|
)
|
|
|
|
|
2019-01-23 19:58:44 +00:00
|
|
|
// Error is a standard error class for this package.
|
|
|
|
var (
|
2021-04-28 09:06:17 +01:00
|
|
|
Error = errs.Class("repair checker")
|
2019-01-23 19:58:44 +00:00
|
|
|
mon = monkit.Package()
|
|
|
|
)
|
|
|
|
|
2020-12-05 16:01:42 +00:00
|
|
|
// Checker contains the information needed to do checks for missing pieces.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Chore
|
2019-02-11 21:06:39 +00:00
|
|
|
type Checker struct {
|
2019-08-01 19:44:32 +01:00
|
|
|
logger *zap.Logger
|
2019-05-22 22:17:52 +01:00
|
|
|
repairQueue queue.RepairQueue
|
|
|
|
irrdb irreparable.DB
|
2020-12-14 17:33:03 +00:00
|
|
|
metabase metainfo.MetabaseDB
|
2021-03-23 12:14:38 +00:00
|
|
|
metaLoop *metaloop.Service
|
2019-08-01 19:44:32 +01:00
|
|
|
nodestate *ReliabilityCache
|
2020-11-20 22:20:03 +00:00
|
|
|
statsCollector *statsCollector
|
2020-10-27 18:26:46 +00:00
|
|
|
repairOverrides RepairOverridesMap
|
2020-10-21 23:02:54 +01:00
|
|
|
nodeFailureRate float64
|
2020-01-30 13:06:43 +00:00
|
|
|
Loop *sync2.Cycle
|
|
|
|
IrreparableLoop *sync2.Cycle
|
2018-10-09 17:09:33 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// NewChecker creates a new instance of checker.
|
2021-03-23 12:14:38 +00:00
|
|
|
func NewChecker(logger *zap.Logger, repairQueue queue.RepairQueue, irrdb irreparable.DB, metabase metainfo.MetabaseDB, metaLoop *metaloop.Service, overlay *overlay.Service, config Config) *Checker {
|
2019-07-08 23:04:35 +01:00
|
|
|
return &Checker{
|
2019-08-01 19:44:32 +01:00
|
|
|
logger: logger,
|
|
|
|
|
2020-10-21 23:02:54 +01:00
|
|
|
repairQueue: repairQueue,
|
|
|
|
irrdb: irrdb,
|
2020-12-14 17:33:03 +00:00
|
|
|
metabase: metabase,
|
2020-10-21 23:02:54 +01:00
|
|
|
metaLoop: metaLoop,
|
|
|
|
nodestate: NewReliabilityCache(overlay, config.ReliabilityCacheStaleness),
|
2020-11-20 22:20:03 +00:00
|
|
|
statsCollector: newStatsCollector(),
|
2020-10-27 18:26:46 +00:00
|
|
|
repairOverrides: config.RepairOverrides.GetMap(),
|
2020-10-21 23:02:54 +01:00
|
|
|
nodeFailureRate: config.NodeFailureRate,
|
2019-08-01 19:44:32 +01:00
|
|
|
|
2020-01-30 13:06:43 +00:00
|
|
|
Loop: sync2.NewCycle(config.Interval),
|
|
|
|
IrreparableLoop: sync2.NewCycle(config.IrreparableInterval),
|
2018-10-09 17:09:33 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Run the checker loop.
|
2019-02-11 21:06:39 +00:00
|
|
|
func (checker *Checker) Run(ctx context.Context) (err error) {
|
2018-11-01 14:03:45 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-05-31 15:12:49 +01:00
|
|
|
group, ctx := errgroup.WithContext(ctx)
|
2019-05-30 16:18:20 +01:00
|
|
|
|
2019-05-31 15:12:49 +01:00
|
|
|
group.Go(func() error {
|
|
|
|
return checker.Loop.Run(ctx, checker.IdentifyInjuredSegments)
|
|
|
|
})
|
2019-05-30 16:18:20 +01:00
|
|
|
|
2019-05-31 15:12:49 +01:00
|
|
|
group.Go(func() error {
|
|
|
|
return checker.IrreparableLoop.Run(ctx, checker.IrreparableProcess)
|
|
|
|
})
|
2019-05-30 16:18:20 +01:00
|
|
|
|
2019-05-31 15:12:49 +01:00
|
|
|
return group.Wait()
|
2018-11-01 14:03:45 +00:00
|
|
|
}
|
|
|
|
|
satellite/repair: use survivability model for segment health
The chief segment health models we've come up with are the "immediate
danger" model and the "survivability" model. The former calculates the
chance of losing a segment becoming lost in the next time period (using
the CDF of the binomial distribution to estimate the chance of x nodes
failing in that period), while the latter estimates the number of
iterations for which a segment can be expected to survive (using the
mean of the negative binomial distribution). The immediate danger model
was a promising one for comparing segment health across segments with
different RS parameters, as it is more precisely what we want to
prevent, but it turns out that practically all segments in production
have infinite health, as the chance of losing segments with any
reasonable estimate of node failure rate is smaller than DBL_EPSILON,
the smallest possible difference from 1.0 representable in a float64
(about 1e-16).
Leaving aside the wisdom of worrying about the repair of segments that
have less than a 1e-16 chance of being lost, we want to be extremely
conservative and proactive in our repair efforts, and the health of the
segments we have been repairing thus far also evaluates to infinity
under the immediate danger model. Thus, we find ourselves reaching for
an alternative.
Dr. Ben saves the day: the survivability model is a reasonably close
approximation of the immediate danger model, and even better, it is
far simpler to calculate and yields manageable values for real-world
segments. The downside to it is that it requires as input an estimate
of the total number of active nodes.
This change replaces the segment health calculation to use the
survivability model, and reinstates the call to SegmentHealth() where it
was reverted. It gets estimates for the total number of active nodes by
leveraging the reliability cache.
Change-Id: Ia5d9b9031b9f6cf0fa7b9005a7011609415527dc
2020-12-08 04:18:00 +00:00
|
|
|
// getNodesEstimate updates the estimate of the total number of nodes. It is guaranteed
|
|
|
|
// to return a number greater than 0 when the error is nil.
|
|
|
|
//
|
|
|
|
// We can't calculate this upon first starting a Checker, because there may not be any
|
|
|
|
// nodes yet. We expect that there will be nodes before there are segments, though.
|
|
|
|
func (checker *Checker) getNodesEstimate(ctx context.Context) (int, error) {
|
|
|
|
// this should be safe to call frequently; it is an efficient caching lookup.
|
|
|
|
totalNumNodes, err := checker.nodestate.NumNodes(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// We could proceed here by returning the last good value, or by returning a fallback
|
|
|
|
// constant estimate, like "20000", and we'd probably be fine, but it would be better
|
|
|
|
// not to have that happen silently for too long. Also, if we can't get this from the
|
|
|
|
// database, we probably can't modify the injured segments queue, so it won't help to
|
|
|
|
// proceed with this repair operation.
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if totalNumNodes == 0 {
|
|
|
|
return 0, Error.New("segment health is meaningless: there are no nodes")
|
|
|
|
}
|
|
|
|
return totalNumNodes, nil
|
|
|
|
}
|
|
|
|
|
2019-07-08 23:04:35 +01:00
|
|
|
// RefreshReliabilityCache forces refreshing node online status cache.
|
|
|
|
func (checker *Checker) RefreshReliabilityCache(ctx context.Context) error {
|
|
|
|
return checker.nodestate.Refresh(ctx)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Close halts the Checker loop.
|
2019-02-14 12:33:41 +00:00
|
|
|
func (checker *Checker) Close() error {
|
|
|
|
checker.Loop.Close()
|
|
|
|
return nil
|
|
|
|
}
|
2019-01-18 13:54:08 +00:00
|
|
|
|
2019-08-06 17:35:59 +01:00
|
|
|
// IdentifyInjuredSegments checks for missing pieces off of the metainfo and overlay.
|
2019-02-11 21:06:39 +00:00
|
|
|
func (checker *Checker) IdentifyInjuredSegments(ctx context.Context) (err error) {
|
2018-10-09 17:09:33 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-09-09 21:52:22 +01:00
|
|
|
startTime := time.Now()
|
|
|
|
|
2019-08-01 19:44:32 +01:00
|
|
|
observer := &checkerObserver{
|
satellite/repair: use survivability model for segment health
The chief segment health models we've come up with are the "immediate
danger" model and the "survivability" model. The former calculates the
chance of losing a segment becoming lost in the next time period (using
the CDF of the binomial distribution to estimate the chance of x nodes
failing in that period), while the latter estimates the number of
iterations for which a segment can be expected to survive (using the
mean of the negative binomial distribution). The immediate danger model
was a promising one for comparing segment health across segments with
different RS parameters, as it is more precisely what we want to
prevent, but it turns out that practically all segments in production
have infinite health, as the chance of losing segments with any
reasonable estimate of node failure rate is smaller than DBL_EPSILON,
the smallest possible difference from 1.0 representable in a float64
(about 1e-16).
Leaving aside the wisdom of worrying about the repair of segments that
have less than a 1e-16 chance of being lost, we want to be extremely
conservative and proactive in our repair efforts, and the health of the
segments we have been repairing thus far also evaluates to infinity
under the immediate danger model. Thus, we find ourselves reaching for
an alternative.
Dr. Ben saves the day: the survivability model is a reasonably close
approximation of the immediate danger model, and even better, it is
far simpler to calculate and yields manageable values for real-world
segments. The downside to it is that it requires as input an estimate
of the total number of active nodes.
This change replaces the segment health calculation to use the
survivability model, and reinstates the call to SegmentHealth() where it
was reverted. It gets estimates for the total number of active nodes by
leveraging the reliability cache.
Change-Id: Ia5d9b9031b9f6cf0fa7b9005a7011609415527dc
2020-12-08 04:18:00 +00:00
|
|
|
repairQueue: checker.repairQueue,
|
|
|
|
irrdb: checker.irrdb,
|
|
|
|
nodestate: checker.nodestate,
|
|
|
|
statsCollector: checker.statsCollector,
|
|
|
|
monStats: aggregateStats{},
|
|
|
|
repairOverrides: checker.repairOverrides,
|
|
|
|
nodeFailureRate: checker.nodeFailureRate,
|
|
|
|
getNodesEstimate: checker.getNodesEstimate,
|
|
|
|
log: checker.logger,
|
2019-08-01 19:44:32 +01:00
|
|
|
}
|
|
|
|
err = checker.metaLoop.Join(ctx, observer)
|
2019-02-26 15:17:51 +00:00
|
|
|
if err != nil {
|
2019-08-01 19:44:32 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
|
|
|
checker.logger.Error("IdentifyInjuredSegments error", zap.Error(err))
|
|
|
|
}
|
2019-02-26 15:17:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-09 21:52:22 +01:00
|
|
|
// remove all segments which were not seen as unhealthy by this checker iteration
|
|
|
|
healthyDeleted, err := checker.repairQueue.Clean(ctx, startTime)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-11-20 22:20:03 +00:00
|
|
|
checker.statsCollector.collectAggregates()
|
|
|
|
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.IntVal("remote_files_checked").Observe(observer.monStats.objectsChecked) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_checked").Observe(observer.monStats.remoteSegmentsChecked) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_failed_to_check").Observe(observer.monStats.remoteSegmentsFailedToCheck) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_needing_repair").Observe(observer.monStats.remoteSegmentsNeedingRepair) //mon:locked
|
|
|
|
mon.IntVal("new_remote_segments_needing_repair").Observe(observer.monStats.newRemoteSegmentsNeedingRepair) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_lost").Observe(observer.monStats.remoteSegmentsLost) //mon:locked
|
|
|
|
mon.IntVal("remote_files_lost").Observe(int64(len(observer.monStats.remoteSegmentInfo))) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_over_threshold_1").Observe(observer.monStats.remoteSegmentsOverThreshold[0]) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_over_threshold_2").Observe(observer.monStats.remoteSegmentsOverThreshold[1]) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_over_threshold_3").Observe(observer.monStats.remoteSegmentsOverThreshold[2]) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_over_threshold_4").Observe(observer.monStats.remoteSegmentsOverThreshold[3]) //mon:locked
|
|
|
|
mon.IntVal("remote_segments_over_threshold_5").Observe(observer.monStats.remoteSegmentsOverThreshold[4]) //mon:locked
|
|
|
|
mon.IntVal("healthy_segments_removed_from_queue").Observe(healthyDeleted) //mon:locked
|
2019-08-01 19:44:32 +01:00
|
|
|
|
2020-03-10 13:59:29 +00:00
|
|
|
allUnhealthy := observer.monStats.remoteSegmentsNeedingRepair + observer.monStats.remoteSegmentsFailedToCheck
|
|
|
|
allChecked := observer.monStats.remoteSegmentsChecked
|
|
|
|
allHealthy := allChecked - allUnhealthy
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.FloatVal("remote_segments_healthy_percentage").Observe(100 * float64(allHealthy) / float64(allChecked)) //mon:locked
|
2020-03-10 13:59:29 +00:00
|
|
|
|
2019-02-26 15:17:51 +00:00
|
|
|
return nil
|
2018-10-09 17:09:33 +01:00
|
|
|
}
|
|
|
|
|
2020-08-31 11:14:20 +01:00
|
|
|
// checks for a object location in slice.
|
|
|
|
func containsObjectLocation(a []metabase.ObjectLocation, x metabase.ObjectLocation) bool {
|
2019-02-26 15:17:51 +00:00
|
|
|
for _, n := range a {
|
|
|
|
if x == n {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2019-05-30 16:18:20 +01:00
|
|
|
|
2020-12-14 17:33:03 +00:00
|
|
|
func (checker *Checker) updateIrreparableSegmentStatus(ctx context.Context, key metabase.SegmentKey, redundancy storj.RedundancyScheme, creationDate time.Time, pieces metabase.Pieces) (err error) {
|
2019-06-04 12:36:27 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-30 16:18:20 +01:00
|
|
|
|
2020-12-14 17:33:03 +00:00
|
|
|
if len(pieces) == 0 {
|
2019-05-30 16:18:20 +01:00
|
|
|
checker.logger.Debug("no pieces on remote segment")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-12-14 17:33:03 +00:00
|
|
|
missingPieces, err := checker.nodestate.MissingPieces(ctx, creationDate, pieces)
|
2019-05-30 16:18:20 +01:00
|
|
|
if err != nil {
|
2019-08-01 19:44:32 +01:00
|
|
|
return errs.Combine(Error.New("error getting missing pieces"), err)
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
numHealthy := int32(len(pieces) - len(missingPieces))
|
2019-07-10 22:27:46 +01:00
|
|
|
|
2020-12-14 17:33:03 +00:00
|
|
|
repairThreshold := int32(redundancy.RepairShares)
|
|
|
|
pbRedundancy := &pb.RedundancyScheme{
|
|
|
|
MinReq: int32(redundancy.RequiredShares),
|
|
|
|
RepairThreshold: int32(redundancy.RepairShares),
|
|
|
|
SuccessThreshold: int32(redundancy.OptimalShares),
|
|
|
|
Total: int32(redundancy.TotalShares),
|
|
|
|
}
|
|
|
|
overrideValue := checker.repairOverrides.GetOverrideValuePB(pbRedundancy)
|
2020-10-27 18:26:46 +00:00
|
|
|
if overrideValue != 0 {
|
|
|
|
repairThreshold = overrideValue
|
2020-03-06 20:39:53 +00:00
|
|
|
}
|
|
|
|
|
satellite/repair: use survivability model for segment health
The chief segment health models we've come up with are the "immediate
danger" model and the "survivability" model. The former calculates the
chance of losing a segment becoming lost in the next time period (using
the CDF of the binomial distribution to estimate the chance of x nodes
failing in that period), while the latter estimates the number of
iterations for which a segment can be expected to survive (using the
mean of the negative binomial distribution). The immediate danger model
was a promising one for comparing segment health across segments with
different RS parameters, as it is more precisely what we want to
prevent, but it turns out that practically all segments in production
have infinite health, as the chance of losing segments with any
reasonable estimate of node failure rate is smaller than DBL_EPSILON,
the smallest possible difference from 1.0 representable in a float64
(about 1e-16).
Leaving aside the wisdom of worrying about the repair of segments that
have less than a 1e-16 chance of being lost, we want to be extremely
conservative and proactive in our repair efforts, and the health of the
segments we have been repairing thus far also evaluates to infinity
under the immediate danger model. Thus, we find ourselves reaching for
an alternative.
Dr. Ben saves the day: the survivability model is a reasonably close
approximation of the immediate danger model, and even better, it is
far simpler to calculate and yields manageable values for real-world
segments. The downside to it is that it requires as input an estimate
of the total number of active nodes.
This change replaces the segment health calculation to use the
survivability model, and reinstates the call to SegmentHealth() where it
was reverted. It gets estimates for the total number of active nodes by
leveraging the reliability cache.
Change-Id: Ia5d9b9031b9f6cf0fa7b9005a7011609415527dc
2020-12-08 04:18:00 +00:00
|
|
|
totalNumNodes, err := checker.getNodesEstimate(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("could not get estimate of total number of nodes: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-09-06 20:20:36 +01:00
|
|
|
// we repair when the number of healthy pieces is less than or equal to the repair threshold and is greater or equal to
|
|
|
|
// minimum required pieces in redundancy
|
2019-05-30 16:18:20 +01:00
|
|
|
// except for the case when the repair and success thresholds are the same (a case usually seen during testing)
|
2020-06-24 19:56:15 +01:00
|
|
|
//
|
|
|
|
// If the segment is suddenly entirely healthy again, we don't need to repair and we don't need to
|
|
|
|
// keep it in the irreparabledb queue either.
|
2020-12-14 17:33:03 +00:00
|
|
|
if numHealthy >= int32(redundancy.RequiredShares) && numHealthy <= repairThreshold && numHealthy < int32(redundancy.OptimalShares) {
|
2020-12-18 08:13:03 +00:00
|
|
|
segmentHealth := repair.SegmentHealth(int(numHealthy), int(redundancy.RequiredShares), totalNumNodes, checker.nodeFailureRate)
|
2020-10-30 10:41:22 +00:00
|
|
|
_, err = checker.repairQueue.Insert(ctx, &internalpb.InjuredSegment{
|
2020-09-08 11:13:18 +01:00
|
|
|
Path: key,
|
2019-07-10 22:27:46 +01:00
|
|
|
LostPieces: missingPieces,
|
|
|
|
InsertedTime: time.Now().UTC(),
|
2020-10-21 23:02:54 +01:00
|
|
|
}, segmentHealth)
|
2019-05-30 16:18:20 +01:00
|
|
|
if err != nil {
|
2019-08-01 19:44:32 +01:00
|
|
|
return errs.Combine(Error.New("error adding injured segment to queue"), err)
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// delete always returns nil when something was deleted and also when element didn't exists
|
2020-09-08 11:13:18 +01:00
|
|
|
err = checker.irrdb.Delete(ctx, key)
|
2019-05-30 16:18:20 +01:00
|
|
|
if err != nil {
|
|
|
|
checker.logger.Error("error deleting entry from irreparable db: ", zap.Error(err))
|
|
|
|
}
|
2020-12-14 17:33:03 +00:00
|
|
|
} else if numHealthy < int32(redundancy.RequiredShares) && numHealthy < repairThreshold {
|
2019-08-01 19:44:32 +01:00
|
|
|
|
|
|
|
// make an entry into the irreparable table
|
2020-10-30 11:12:01 +00:00
|
|
|
segmentInfo := &internalpb.IrreparableSegment{
|
2020-09-08 11:13:18 +01:00
|
|
|
Path: key,
|
2019-08-01 19:44:32 +01:00
|
|
|
LostPieces: int32(len(missingPieces)),
|
|
|
|
LastRepairAttempt: time.Now().Unix(),
|
|
|
|
RepairAttemptCount: int64(1),
|
|
|
|
}
|
|
|
|
|
|
|
|
// add the entry if new or update attempt count if already exists
|
|
|
|
err := checker.irrdb.IncrementRepairAttempts(ctx, segmentInfo)
|
|
|
|
if err != nil {
|
|
|
|
return errs.Combine(Error.New("error handling irreparable segment to queue"), err)
|
|
|
|
}
|
2020-12-14 17:33:03 +00:00
|
|
|
} else if numHealthy > repairThreshold || numHealthy >= int32(redundancy.OptimalShares) {
|
2020-09-08 11:13:18 +01:00
|
|
|
err = checker.irrdb.Delete(ctx, key)
|
2020-06-24 19:56:15 +01:00
|
|
|
if err != nil {
|
|
|
|
return Error.New("error removing segment from irreparable queue: %v", err)
|
|
|
|
}
|
2019-08-01 19:44:32 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
var _ metaloop.Observer = (*checkerObserver)(nil)
|
2019-09-10 14:24:16 +01:00
|
|
|
|
2020-12-05 16:01:42 +00:00
|
|
|
// checkerObserver implements the metainfo loop Observer interface.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Observer
|
2019-08-01 19:44:32 +01:00
|
|
|
type checkerObserver struct {
|
satellite/repair: use survivability model for segment health
The chief segment health models we've come up with are the "immediate
danger" model and the "survivability" model. The former calculates the
chance of losing a segment becoming lost in the next time period (using
the CDF of the binomial distribution to estimate the chance of x nodes
failing in that period), while the latter estimates the number of
iterations for which a segment can be expected to survive (using the
mean of the negative binomial distribution). The immediate danger model
was a promising one for comparing segment health across segments with
different RS parameters, as it is more precisely what we want to
prevent, but it turns out that practically all segments in production
have infinite health, as the chance of losing segments with any
reasonable estimate of node failure rate is smaller than DBL_EPSILON,
the smallest possible difference from 1.0 representable in a float64
(about 1e-16).
Leaving aside the wisdom of worrying about the repair of segments that
have less than a 1e-16 chance of being lost, we want to be extremely
conservative and proactive in our repair efforts, and the health of the
segments we have been repairing thus far also evaluates to infinity
under the immediate danger model. Thus, we find ourselves reaching for
an alternative.
Dr. Ben saves the day: the survivability model is a reasonably close
approximation of the immediate danger model, and even better, it is
far simpler to calculate and yields manageable values for real-world
segments. The downside to it is that it requires as input an estimate
of the total number of active nodes.
This change replaces the segment health calculation to use the
survivability model, and reinstates the call to SegmentHealth() where it
was reverted. It gets estimates for the total number of active nodes by
leveraging the reliability cache.
Change-Id: Ia5d9b9031b9f6cf0fa7b9005a7011609415527dc
2020-12-08 04:18:00 +00:00
|
|
|
repairQueue queue.RepairQueue
|
|
|
|
irrdb irreparable.DB
|
|
|
|
nodestate *ReliabilityCache
|
|
|
|
statsCollector *statsCollector
|
|
|
|
monStats aggregateStats // TODO(cam): once we verify statsCollector reports data correctly, remove this
|
|
|
|
repairOverrides RepairOverridesMap
|
|
|
|
nodeFailureRate float64
|
|
|
|
getNodesEstimate func(ctx context.Context) (int, error)
|
|
|
|
log *zap.Logger
|
2021-02-18 15:33:39 +00:00
|
|
|
|
|
|
|
// we need to delay counting objects to ensure they get associated with the correct redundancy only once
|
|
|
|
objectCounted bool
|
2019-08-01 19:44:32 +01:00
|
|
|
}
|
|
|
|
|
2020-11-20 22:20:03 +00:00
|
|
|
func (obs *checkerObserver) getStatsByRS(redundancy storj.RedundancyScheme) *stats {
|
|
|
|
rsString := getRSString(obs.loadRedundancy(redundancy))
|
|
|
|
return obs.statsCollector.getStatsByRS(rsString)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (obs *checkerObserver) loadRedundancy(redundancy storj.RedundancyScheme) (int, int, int, int) {
|
|
|
|
repair := int(redundancy.RepairShares)
|
|
|
|
overrideValue := obs.repairOverrides.GetOverrideValue(redundancy)
|
|
|
|
if overrideValue != 0 {
|
|
|
|
repair = int(overrideValue)
|
|
|
|
}
|
|
|
|
return int(redundancy.RequiredShares), repair, int(redundancy.OptimalShares), int(redundancy.TotalShares)
|
|
|
|
}
|
|
|
|
|
2021-04-01 11:56:39 +01:00
|
|
|
// LoopStarted is called at each start of a loop.
|
|
|
|
func (obs *checkerObserver) LoopStarted(context.Context, metaloop.LoopInfo) (err error) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
func (obs *checkerObserver) RemoteSegment(ctx context.Context, segment *metaloop.Segment) (err error) {
|
2019-08-01 19:44:32 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-12-14 17:33:03 +00:00
|
|
|
// ignore segment if expired
|
2020-10-27 06:59:14 +00:00
|
|
|
if segment.Expired(time.Now()) {
|
2020-04-15 20:20:16 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-18 15:33:39 +00:00
|
|
|
stats := obs.getStatsByRS(segment.Redundancy)
|
|
|
|
|
|
|
|
if !obs.objectCounted {
|
|
|
|
obs.objectCounted = true
|
|
|
|
stats.iterationAggregates.objectsChecked++
|
|
|
|
}
|
|
|
|
|
2019-08-01 19:44:32 +01:00
|
|
|
obs.monStats.remoteSegmentsChecked++
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.iterationAggregates.remoteSegmentsChecked++
|
2019-08-01 19:44:32 +01:00
|
|
|
|
2020-11-10 14:49:19 +00:00
|
|
|
// ensure we get values, even if only zero values, so that redash can have an alert based on this
|
|
|
|
mon.Counter("checker_segments_below_min_req").Inc(0) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.segmentsBelowMinReq.Inc(0)
|
2020-11-10 14:49:19 +00:00
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
pieces := segment.Pieces
|
|
|
|
if len(pieces) == 0 {
|
2019-08-01 19:44:32 +01:00
|
|
|
obs.log.Debug("no pieces on remote segment")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
pbPieces := make([]*pb.RemotePiece, len(pieces))
|
|
|
|
for i, piece := range pieces {
|
|
|
|
pbPieces[i] = &pb.RemotePiece{
|
|
|
|
PieceNum: int32(piece.Number),
|
|
|
|
NodeId: piece.StorageNode,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
satellite/repair: use survivability model for segment health
The chief segment health models we've come up with are the "immediate
danger" model and the "survivability" model. The former calculates the
chance of losing a segment becoming lost in the next time period (using
the CDF of the binomial distribution to estimate the chance of x nodes
failing in that period), while the latter estimates the number of
iterations for which a segment can be expected to survive (using the
mean of the negative binomial distribution). The immediate danger model
was a promising one for comparing segment health across segments with
different RS parameters, as it is more precisely what we want to
prevent, but it turns out that practically all segments in production
have infinite health, as the chance of losing segments with any
reasonable estimate of node failure rate is smaller than DBL_EPSILON,
the smallest possible difference from 1.0 representable in a float64
(about 1e-16).
Leaving aside the wisdom of worrying about the repair of segments that
have less than a 1e-16 chance of being lost, we want to be extremely
conservative and proactive in our repair efforts, and the health of the
segments we have been repairing thus far also evaluates to infinity
under the immediate danger model. Thus, we find ourselves reaching for
an alternative.
Dr. Ben saves the day: the survivability model is a reasonably close
approximation of the immediate danger model, and even better, it is
far simpler to calculate and yields manageable values for real-world
segments. The downside to it is that it requires as input an estimate
of the total number of active nodes.
This change replaces the segment health calculation to use the
survivability model, and reinstates the call to SegmentHealth() where it
was reverted. It gets estimates for the total number of active nodes by
leveraging the reliability cache.
Change-Id: Ia5d9b9031b9f6cf0fa7b9005a7011609415527dc
2020-12-08 04:18:00 +00:00
|
|
|
totalNumNodes, err := obs.getNodesEstimate(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("could not get estimate of total number of nodes: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-03-31 15:08:10 +01:00
|
|
|
createdAt := time.Time{}
|
|
|
|
if segment.CreatedAt != nil {
|
|
|
|
createdAt = *segment.CreatedAt
|
|
|
|
}
|
|
|
|
repairedAt := time.Time{}
|
|
|
|
if segment.RepairedAt != nil {
|
|
|
|
repairedAt = *segment.RepairedAt
|
|
|
|
}
|
|
|
|
missingPieces, err := obs.nodestate.MissingPieces(ctx, createdAt, segment.Pieces)
|
2019-08-01 19:44:32 +01:00
|
|
|
if err != nil {
|
2020-03-10 13:59:29 +00:00
|
|
|
obs.monStats.remoteSegmentsFailedToCheck++
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.iterationAggregates.remoteSegmentsFailedToCheck++
|
2019-08-01 19:44:32 +01:00
|
|
|
return errs.Combine(Error.New("error getting missing pieces"), err)
|
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
numHealthy := len(pieces) - len(missingPieces)
|
2020-11-20 22:20:03 +00:00
|
|
|
mon.IntVal("checker_segment_total_count").Observe(int64(len(pieces))) //mon:locked
|
|
|
|
stats.segmentTotalCount.Observe(int64(len(pieces)))
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.IntVal("checker_segment_healthy_count").Observe(int64(numHealthy)) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.segmentHealthyCount.Observe(int64(numHealthy))
|
2019-08-01 19:44:32 +01:00
|
|
|
|
2021-03-31 15:08:10 +01:00
|
|
|
segmentAge := time.Since(createdAt)
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.IntVal("checker_segment_age").Observe(int64(segmentAge.Seconds())) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.segmentAge.Observe(int64(segmentAge.Seconds()))
|
|
|
|
|
|
|
|
required, repairThreshold, successThreshold, _ := obs.loadRedundancy(segment.Redundancy)
|
2019-09-17 20:18:48 +01:00
|
|
|
|
satellite/repair: use survivability model for segment health
The chief segment health models we've come up with are the "immediate
danger" model and the "survivability" model. The former calculates the
chance of losing a segment becoming lost in the next time period (using
the CDF of the binomial distribution to estimate the chance of x nodes
failing in that period), while the latter estimates the number of
iterations for which a segment can be expected to survive (using the
mean of the negative binomial distribution). The immediate danger model
was a promising one for comparing segment health across segments with
different RS parameters, as it is more precisely what we want to
prevent, but it turns out that practically all segments in production
have infinite health, as the chance of losing segments with any
reasonable estimate of node failure rate is smaller than DBL_EPSILON,
the smallest possible difference from 1.0 representable in a float64
(about 1e-16).
Leaving aside the wisdom of worrying about the repair of segments that
have less than a 1e-16 chance of being lost, we want to be extremely
conservative and proactive in our repair efforts, and the health of the
segments we have been repairing thus far also evaluates to infinity
under the immediate danger model. Thus, we find ourselves reaching for
an alternative.
Dr. Ben saves the day: the survivability model is a reasonably close
approximation of the immediate danger model, and even better, it is
far simpler to calculate and yields manageable values for real-world
segments. The downside to it is that it requires as input an estimate
of the total number of active nodes.
This change replaces the segment health calculation to use the
survivability model, and reinstates the call to SegmentHealth() where it
was reverted. It gets estimates for the total number of active nodes by
leveraging the reliability cache.
Change-Id: Ia5d9b9031b9f6cf0fa7b9005a7011609415527dc
2020-12-08 04:18:00 +00:00
|
|
|
segmentHealth := repair.SegmentHealth(numHealthy, required, totalNumNodes, obs.nodeFailureRate)
|
2020-10-21 23:02:54 +01:00
|
|
|
mon.FloatVal("checker_segment_health").Observe(segmentHealth) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.segmentHealth.Observe(segmentHealth)
|
2019-10-02 13:58:37 +01:00
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
key := segment.Location.Encode()
|
2019-09-06 20:20:36 +01:00
|
|
|
// we repair when the number of healthy pieces is less than or equal to the repair threshold and is greater or equal to
|
|
|
|
// minimum required pieces in redundancy
|
2019-08-01 19:44:32 +01:00
|
|
|
// except for the case when the repair and success thresholds are the same (a case usually seen during testing)
|
2020-10-27 06:59:14 +00:00
|
|
|
if numHealthy >= required && numHealthy <= repairThreshold && numHealthy < successThreshold {
|
2020-10-21 23:02:54 +01:00
|
|
|
mon.FloatVal("checker_injured_segment_health").Observe(segmentHealth) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.injuredSegmentHealth.Observe(segmentHealth)
|
2019-08-01 19:44:32 +01:00
|
|
|
obs.monStats.remoteSegmentsNeedingRepair++
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.iterationAggregates.remoteSegmentsNeedingRepair++
|
2020-10-30 10:41:22 +00:00
|
|
|
alreadyInserted, err := obs.repairQueue.Insert(ctx, &internalpb.InjuredSegment{
|
2020-09-02 08:16:58 +01:00
|
|
|
Path: key,
|
2019-08-01 19:44:32 +01:00
|
|
|
LostPieces: missingPieces,
|
|
|
|
InsertedTime: time.Now().UTC(),
|
2021-01-04 16:48:17 +00:00
|
|
|
}, segmentHealth)
|
2019-08-01 19:44:32 +01:00
|
|
|
if err != nil {
|
|
|
|
obs.log.Error("error adding injured segment to queue", zap.Error(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-22 20:54:05 +01:00
|
|
|
if !alreadyInserted {
|
|
|
|
obs.monStats.newRemoteSegmentsNeedingRepair++
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.iterationAggregates.newRemoteSegmentsNeedingRepair++
|
2020-05-22 20:54:05 +01:00
|
|
|
}
|
|
|
|
|
2019-08-01 19:44:32 +01:00
|
|
|
// delete always returns nil when something was deleted and also when element didn't exists
|
2020-09-02 08:16:58 +01:00
|
|
|
err = obs.irrdb.Delete(ctx, key)
|
2019-08-01 19:44:32 +01:00
|
|
|
if err != nil {
|
|
|
|
obs.log.Error("error deleting entry from irreparable db", zap.Error(err))
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-27 06:59:14 +00:00
|
|
|
} else if numHealthy < required && numHealthy < repairThreshold {
|
|
|
|
lostSegInfo := segment.Location.Object()
|
2020-08-31 11:14:20 +01:00
|
|
|
if !containsObjectLocation(obs.monStats.remoteSegmentInfo, lostSegInfo) {
|
|
|
|
obs.monStats.remoteSegmentInfo = append(obs.monStats.remoteSegmentInfo, lostSegInfo)
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
2020-11-20 22:20:03 +00:00
|
|
|
if !containsObjectLocation(stats.iterationAggregates.remoteSegmentInfo, lostSegInfo) {
|
|
|
|
stats.iterationAggregates.remoteSegmentInfo = append(stats.iterationAggregates.remoteSegmentInfo, lostSegInfo)
|
|
|
|
}
|
2019-05-30 16:18:20 +01:00
|
|
|
|
2019-09-17 20:18:48 +01:00
|
|
|
var segmentAge time.Duration
|
2021-03-31 15:08:10 +01:00
|
|
|
if createdAt.Before(repairedAt) {
|
|
|
|
segmentAge = time.Since(repairedAt)
|
2019-09-17 20:18:48 +01:00
|
|
|
} else {
|
2021-03-31 15:08:10 +01:00
|
|
|
segmentAge = time.Since(createdAt)
|
2019-09-17 20:18:48 +01:00
|
|
|
}
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.IntVal("checker_segment_time_until_irreparable").Observe(int64(segmentAge.Seconds())) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.segmentTimeUntilIrreparable.Observe(int64(segmentAge.Seconds()))
|
2019-09-17 20:18:48 +01:00
|
|
|
|
2019-08-01 19:44:32 +01:00
|
|
|
obs.monStats.remoteSegmentsLost++
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.iterationAggregates.remoteSegmentsLost++
|
|
|
|
|
2020-11-10 14:49:19 +00:00
|
|
|
mon.Counter("checker_segments_below_min_req").Inc(1) //mon:locked
|
2020-11-20 22:20:03 +00:00
|
|
|
stats.segmentsBelowMinReq.Inc(1)
|
|
|
|
|
2019-08-01 19:44:32 +01:00
|
|
|
// make an entry into the irreparable table
|
2020-10-30 11:12:01 +00:00
|
|
|
segmentInfo := &internalpb.IrreparableSegment{
|
2020-09-02 08:16:58 +01:00
|
|
|
Path: key,
|
2019-05-30 16:18:20 +01:00
|
|
|
LostPieces: int32(len(missingPieces)),
|
|
|
|
LastRepairAttempt: time.Now().Unix(),
|
|
|
|
RepairAttemptCount: int64(1),
|
|
|
|
}
|
|
|
|
|
|
|
|
// add the entry if new or update attempt count if already exists
|
2019-08-01 19:44:32 +01:00
|
|
|
err := obs.irrdb.IncrementRepairAttempts(ctx, segmentInfo)
|
2019-05-30 16:18:20 +01:00
|
|
|
if err != nil {
|
2019-08-01 19:44:32 +01:00
|
|
|
obs.log.Error("error handling irreparable segment to queue", zap.Error(err))
|
|
|
|
return nil
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
2020-11-20 22:20:03 +00:00
|
|
|
} else {
|
|
|
|
if numHealthy > repairThreshold && numHealthy <= (repairThreshold+len(obs.monStats.remoteSegmentsOverThreshold)) {
|
|
|
|
// record metrics for segments right above repair threshold
|
|
|
|
// numHealthy=repairThreshold+1 through numHealthy=repairThreshold+5
|
|
|
|
for i := range obs.monStats.remoteSegmentsOverThreshold {
|
|
|
|
if numHealthy == (repairThreshold + i + 1) {
|
|
|
|
obs.monStats.remoteSegmentsOverThreshold[i]++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if numHealthy > repairThreshold && numHealthy <= (repairThreshold+len(stats.iterationAggregates.remoteSegmentsOverThreshold)) {
|
|
|
|
// record metrics for segments right above repair threshold
|
|
|
|
// numHealthy=repairThreshold+1 through numHealthy=repairThreshold+5
|
|
|
|
for i := range stats.iterationAggregates.remoteSegmentsOverThreshold {
|
|
|
|
if numHealthy == (repairThreshold + i + 1) {
|
|
|
|
stats.iterationAggregates.remoteSegmentsOverThreshold[i]++
|
|
|
|
break
|
|
|
|
}
|
2020-02-24 23:06:52 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
2019-08-01 19:44:32 +01:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
func (obs *checkerObserver) Object(ctx context.Context, object *metaloop.Object) (err error) {
|
2019-08-01 19:44:32 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-13 14:51:41 +01:00
|
|
|
obs.monStats.objectsChecked++
|
2019-08-01 19:44:32 +01:00
|
|
|
|
2021-02-18 15:33:39 +00:00
|
|
|
// TODO: check for expired objects
|
|
|
|
|
2020-12-18 12:27:45 +00:00
|
|
|
if object.SegmentCount == 0 {
|
|
|
|
stats := obs.getStatsByRS(storj.RedundancyScheme{})
|
|
|
|
stats.iterationAggregates.objectsChecked++
|
|
|
|
return nil
|
|
|
|
}
|
2021-02-18 15:33:39 +00:00
|
|
|
obs.objectCounted = false
|
2020-11-20 22:20:03 +00:00
|
|
|
|
2019-08-01 19:44:32 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
func (obs *checkerObserver) InlineSegment(ctx context.Context, segment *metaloop.Segment) (err error) {
|
2019-08-01 19:44:32 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-12-18 12:27:45 +00:00
|
|
|
|
2021-02-18 15:33:39 +00:00
|
|
|
// TODO: check for expired segments
|
2020-12-18 12:27:45 +00:00
|
|
|
|
2021-02-18 15:33:39 +00:00
|
|
|
if !obs.objectCounted {
|
2021-02-19 13:02:52 +00:00
|
|
|
// Note: this may give false stats when an object starts with a inline segment.
|
2021-02-18 15:33:39 +00:00
|
|
|
obs.objectCounted = true
|
|
|
|
stats := obs.getStatsByRS(storj.RedundancyScheme{})
|
|
|
|
stats.iterationAggregates.objectsChecked++
|
2020-12-18 12:27:45 +00:00
|
|
|
}
|
|
|
|
|
2019-05-30 16:18:20 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-18 17:21:21 +01:00
|
|
|
// IrreparableProcess iterates over all items in the irreparabledb. If an item can
|
|
|
|
// now be repaired then it is added to a worker queue.
|
2019-05-30 16:18:20 +01:00
|
|
|
func (checker *Checker) IrreparableProcess(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-07-18 17:21:21 +01:00
|
|
|
const limit = 1000
|
2020-11-03 17:09:50 +00:00
|
|
|
lastSeenSegmentKey := metabase.SegmentKey{}
|
2019-05-30 16:18:20 +01:00
|
|
|
|
|
|
|
for {
|
2020-09-08 11:13:18 +01:00
|
|
|
segments, err := checker.irrdb.GetLimited(ctx, limit, lastSeenSegmentKey)
|
2019-05-30 16:18:20 +01:00
|
|
|
if err != nil {
|
2019-08-01 19:44:32 +01:00
|
|
|
return errs.Combine(Error.New("error reading segment from the queue"), err)
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// zero segments returned with nil err
|
2019-07-18 17:21:21 +01:00
|
|
|
if len(segments) == 0 {
|
2019-05-30 16:18:20 +01:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-09-08 11:13:18 +01:00
|
|
|
lastSeenSegmentKey = metabase.SegmentKey(segments[len(segments)-1].Path)
|
2019-07-18 17:21:21 +01:00
|
|
|
|
|
|
|
for _, segment := range segments {
|
2020-12-14 17:33:03 +00:00
|
|
|
var redundancy storj.RedundancyScheme
|
|
|
|
var pieces metabase.Pieces
|
|
|
|
var createAt time.Time
|
|
|
|
if segment.SegmentDetail == (&pb.Pointer{}) {
|
|
|
|
// TODO IrreparableDB will be removed in a future so we shouldn't care too much about performance
|
|
|
|
location, err := metabase.ParseSegmentKey(metabase.SegmentKey(segment.GetPath()))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
object, err := checker.metabase.GetObjectLatestVersion(ctx, metabase.GetObjectLatestVersion{
|
|
|
|
ObjectLocation: location.Object(),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
createAt = object.CreatedAt
|
|
|
|
|
|
|
|
segment, err := checker.metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
|
|
|
StreamID: object.StreamID,
|
|
|
|
Position: location.Position,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
redundancy = segment.Redundancy
|
|
|
|
} else {
|
|
|
|
// skip inline segments
|
|
|
|
if segment.SegmentDetail.Remote == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
createAt = segment.SegmentDetail.CreationDate
|
|
|
|
|
|
|
|
pbRedundancy := segment.SegmentDetail.Remote.Redundancy
|
|
|
|
redundancy = storj.RedundancyScheme{
|
|
|
|
RequiredShares: int16(pbRedundancy.MinReq),
|
|
|
|
RepairShares: int16(pbRedundancy.RepairThreshold),
|
|
|
|
OptimalShares: int16(pbRedundancy.SuccessThreshold),
|
|
|
|
TotalShares: int16(pbRedundancy.Total),
|
|
|
|
ShareSize: pbRedundancy.ErasureShareSize,
|
|
|
|
}
|
|
|
|
pieces = make(metabase.Pieces, len(segment.SegmentDetail.Remote.RemotePieces))
|
|
|
|
for _, piece := range segment.SegmentDetail.Remote.RemotePieces {
|
|
|
|
pieces = append(pieces, metabase.Piece{
|
|
|
|
Number: uint16(piece.PieceNum),
|
|
|
|
StorageNode: piece.NodeId,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = checker.updateIrreparableSegmentStatus(ctx,
|
|
|
|
metabase.SegmentKey(segment.GetPath()),
|
|
|
|
redundancy,
|
|
|
|
createAt,
|
|
|
|
pieces,
|
|
|
|
)
|
2019-07-18 17:21:21 +01:00
|
|
|
if err != nil {
|
|
|
|
checker.logger.Error("irrepair segment checker failed: ", zap.Error(err))
|
|
|
|
}
|
2019-05-30 16:18:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|