2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-12-13 07:12:36 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2019-07-29 12:24:56 +01:00
|
|
|
package repairer
|
2018-12-13 07:12:36 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-05-14 16:05:42 +01:00
|
|
|
"errors"
|
2020-02-24 20:13:12 +00:00
|
|
|
"fmt"
|
2019-07-11 23:44:47 +01:00
|
|
|
"math"
|
2019-03-19 13:14:59 +00:00
|
|
|
"time"
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-01-29 20:42:27 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-07-02 11:08:02 +01:00
|
|
|
"go.uber.org/zap"
|
2019-01-29 20:42:27 +00:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2019-03-28 20:09:23 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2020-10-27 18:26:46 +00:00
|
|
|
"storj.io/storj/satellite/repair/checker"
|
2021-06-17 16:05:04 +01:00
|
|
|
"storj.io/storj/satellite/repair/queue"
|
2021-07-15 15:14:13 +01:00
|
|
|
"storj.io/storj/satellite/reputation"
|
2020-02-21 14:07:29 +00:00
|
|
|
"storj.io/uplink/private/eestream"
|
2018-12-13 07:12:36 +00:00
|
|
|
)
|
|
|
|
|
2020-02-24 20:13:12 +00:00
|
|
|
var (
|
2021-04-28 09:06:17 +01:00
|
|
|
metainfoGetError = errs.Class("metainfo db get")
|
|
|
|
metainfoPutError = errs.Class("metainfo db put")
|
2020-02-24 20:13:12 +00:00
|
|
|
invalidRepairError = errs.Class("invalid repair")
|
|
|
|
overlayQueryError = errs.Class("overlay query failure")
|
|
|
|
orderLimitFailureError = errs.Class("order limits failure")
|
|
|
|
repairReconstructError = errs.Class("repair reconstruction failure")
|
|
|
|
repairPutError = errs.Class("repair could not store repaired pieces")
|
|
|
|
)
|
|
|
|
|
|
|
|
// irreparableError identifies situations where a segment could not be repaired due to reasons
|
|
|
|
// which are hopefully transient (e.g. too many pieces unavailable). The segment should be added
|
|
|
|
// to the irreparableDB.
|
|
|
|
type irreparableError struct {
|
|
|
|
piecesAvailable int32
|
|
|
|
piecesRequired int32
|
2021-08-06 18:58:22 +01:00
|
|
|
errlist []error
|
2020-02-24 20:13:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ie *irreparableError) Error() string {
|
|
|
|
return fmt.Sprintf("%d available pieces < %d required", ie.piecesAvailable, ie.piecesRequired)
|
|
|
|
}
|
2019-07-30 16:38:25 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// SegmentRepairer for segments.
|
2019-07-29 12:24:56 +01:00
|
|
|
type SegmentRepairer struct {
|
2021-02-03 22:22:46 +00:00
|
|
|
log *zap.Logger
|
|
|
|
statsCollector *statsCollector
|
2021-05-13 09:14:18 +01:00
|
|
|
metabase *metabase.DB
|
2021-02-03 22:22:46 +00:00
|
|
|
orders *orders.Service
|
|
|
|
overlay *overlay.Service
|
2021-07-15 15:14:13 +01:00
|
|
|
reputation *reputation.Service
|
2021-02-03 22:22:46 +00:00
|
|
|
ec *ECRepairer
|
|
|
|
timeout time.Duration
|
2019-07-11 23:44:47 +01:00
|
|
|
|
|
|
|
// multiplierOptimalThreshold is the value that multiplied by the optimal
|
|
|
|
// threshold results in the maximum limit of number of nodes to upload
|
|
|
|
// repaired pieces
|
|
|
|
multiplierOptimalThreshold float64
|
2019-10-02 13:58:37 +01:00
|
|
|
|
2020-10-27 18:26:46 +00:00
|
|
|
// repairOverrides is the set of values configured by the checker to override the repair threshold for various RS schemes.
|
|
|
|
repairOverrides checker.RepairOverridesMap
|
2020-12-18 08:49:31 +00:00
|
|
|
|
|
|
|
nowFn func() time.Time
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 23:44:47 +01:00
|
|
|
// NewSegmentRepairer creates a new instance of SegmentRepairer.
|
|
|
|
//
|
|
|
|
// excessPercentageOptimalThreshold is the percentage to apply over the optimal
|
|
|
|
// threshould to determine the maximum limit of nodes to upload repaired pieces,
|
|
|
|
// when negative, 0 is applied.
|
|
|
|
func NewSegmentRepairer(
|
2021-05-13 09:14:18 +01:00
|
|
|
log *zap.Logger, metabase *metabase.DB, orders *orders.Service,
|
2021-07-15 15:14:13 +01:00
|
|
|
overlay *overlay.Service, reputation *reputation.Service, dialer rpc.Dialer,
|
|
|
|
timeout time.Duration, excessOptimalThreshold float64,
|
|
|
|
repairOverrides checker.RepairOverrides, downloadTimeout time.Duration,
|
|
|
|
inMemoryRepair bool, satelliteSignee signing.Signee,
|
2019-07-29 12:24:56 +01:00
|
|
|
) *SegmentRepairer {
|
2019-07-11 23:44:47 +01:00
|
|
|
|
|
|
|
if excessOptimalThreshold < 0 {
|
|
|
|
excessOptimalThreshold = 0
|
|
|
|
}
|
|
|
|
|
2019-07-29 12:24:56 +01:00
|
|
|
return &SegmentRepairer{
|
2019-07-11 23:44:47 +01:00
|
|
|
log: log,
|
2021-02-03 22:22:46 +00:00
|
|
|
statsCollector: newStatsCollector(),
|
2020-12-14 14:29:48 +00:00
|
|
|
metabase: metabase,
|
2019-07-11 23:44:47 +01:00
|
|
|
orders: orders,
|
2019-08-06 17:35:59 +01:00
|
|
|
overlay: overlay,
|
2021-07-15 15:14:13 +01:00
|
|
|
reputation: reputation,
|
2020-03-18 23:55:09 +00:00
|
|
|
ec: NewECRepairer(log.Named("ec repairer"), dialer, satelliteSignee, downloadTimeout, inMemoryRepair),
|
2019-07-11 23:44:47 +01:00
|
|
|
timeout: timeout,
|
|
|
|
multiplierOptimalThreshold: 1 + excessOptimalThreshold,
|
2020-10-27 18:26:46 +00:00
|
|
|
repairOverrides: repairOverrides.GetMap(),
|
2020-12-18 08:49:31 +00:00
|
|
|
|
|
|
|
nowFn: time.Now,
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Repair retrieves an at-risk segment and repairs and stores lost pieces on new nodes
|
2019-08-05 16:09:16 +01:00
|
|
|
// note that shouldDelete is used even in the case where err is not null
|
2020-07-16 15:18:02 +01:00
|
|
|
// note that it will update audit status as failed for nodes that failed piece hash verification during repair downloading.
|
2021-06-17 16:05:04 +01:00
|
|
|
func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue.InjuredSegment) (shouldDelete bool, err error) {
|
|
|
|
defer mon.Task()(&ctx, queueSegment.StreamID.String(), queueSegment.Position.Encode())(&err)
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2021-06-17 16:05:04 +01:00
|
|
|
segment, err := repairer.metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
|
|
|
StreamID: queueSegment.StreamID,
|
|
|
|
Position: queueSegment.Position,
|
2020-12-14 14:29:48 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2021-06-17 16:05:04 +01:00
|
|
|
if metabase.ErrSegmentNotFound.Has(err) {
|
2020-12-14 14:29:48 +00:00
|
|
|
mon.Meter("repair_unnecessary").Mark(1) //mon:locked
|
|
|
|
mon.Meter("segment_deleted_before_repair").Mark(1) //mon:locked
|
|
|
|
repairer.log.Debug("segment was deleted")
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, metainfoGetError.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if segment.Inline() {
|
2020-02-24 20:13:12 +00:00
|
|
|
return true, invalidRepairError.New("cannot repair inline segment")
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return true, invalidRepairError.New("invalid redundancy strategy: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2021-02-11 10:46:16 +00:00
|
|
|
stats := repairer.getStatsByRS(&pb.RedundancyScheme{
|
|
|
|
Type: pb.RedundancyScheme_SchemeType(segment.Redundancy.Algorithm),
|
|
|
|
ErasureShareSize: segment.Redundancy.ShareSize,
|
|
|
|
MinReq: int32(segment.Redundancy.RequiredShares),
|
|
|
|
RepairThreshold: int32(segment.Redundancy.RepairShares),
|
|
|
|
SuccessThreshold: int32(segment.Redundancy.OptimalShares),
|
|
|
|
Total: int32(segment.Redundancy.TotalShares),
|
|
|
|
})
|
2021-02-03 22:22:46 +00:00
|
|
|
|
|
|
|
mon.Meter("repair_attempts").Mark(1) //mon:locked
|
|
|
|
stats.repairAttempts.Mark(1)
|
2021-02-11 10:46:16 +00:00
|
|
|
mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked
|
|
|
|
stats.repairSegmentSize.Observe(int64(segment.EncryptedSize))
|
2021-02-03 22:22:46 +00:00
|
|
|
|
2018-12-13 07:12:36 +00:00
|
|
|
var excludeNodeIDs storj.NodeIDList
|
2020-12-14 14:29:48 +00:00
|
|
|
pieces := segment.Pieces
|
2019-08-06 17:35:59 +01:00
|
|
|
missingPieces, err := repairer.overlay.GetMissingPieces(ctx, pieces)
|
2019-05-16 14:49:10 +01:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, overlayQueryError.New("error identifying missing pieces: %w", err)
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
numHealthy := len(pieces) - len(missingPieces)
|
2019-09-06 20:20:36 +01:00
|
|
|
// irreparable piece
|
2020-12-14 14:29:48 +00:00
|
|
|
if numHealthy < int(segment.Redundancy.RequiredShares) {
|
2020-11-10 14:49:19 +00:00
|
|
|
mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairerSegmentsBelowMinReq.Inc(1)
|
|
|
|
mon.Meter("repair_nodes_unavailable").Mark(1) //mon:locked
|
|
|
|
stats.repairerNodesUnavailable.Mark(1)
|
2021-06-15 22:45:31 +01:00
|
|
|
|
|
|
|
repairer.log.Warn("irreparable segment",
|
2021-06-17 16:05:04 +01:00
|
|
|
zap.String("StreamID", queueSegment.StreamID.String()),
|
|
|
|
zap.Uint64("Position", queueSegment.Position.Encode()),
|
2021-06-15 22:45:31 +01:00
|
|
|
zap.Int("piecesAvailable", numHealthy),
|
|
|
|
zap.Int16("piecesRequired", segment.Redundancy.RequiredShares),
|
|
|
|
)
|
|
|
|
return false, nil
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
2020-11-10 14:49:19 +00:00
|
|
|
// ensure we get values, even if only zero values, so that redash can have an alert based on this
|
|
|
|
mon.Counter("repairer_segments_below_min_req").Inc(0) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairerSegmentsBelowMinReq.Inc(0)
|
2020-11-10 14:49:19 +00:00
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
repairThreshold := int32(segment.Redundancy.RepairShares)
|
|
|
|
|
|
|
|
pbRedundancy := &pb.RedundancyScheme{
|
|
|
|
MinReq: int32(segment.Redundancy.RequiredShares),
|
|
|
|
RepairThreshold: int32(segment.Redundancy.RepairShares),
|
|
|
|
SuccessThreshold: int32(segment.Redundancy.OptimalShares),
|
|
|
|
Total: int32(segment.Redundancy.TotalShares),
|
|
|
|
}
|
|
|
|
overrideValue := repairer.repairOverrides.GetOverrideValuePB(pbRedundancy)
|
2020-10-27 18:26:46 +00:00
|
|
|
if overrideValue != 0 {
|
|
|
|
repairThreshold = overrideValue
|
2019-10-02 13:58:37 +01:00
|
|
|
}
|
|
|
|
|
2019-05-16 14:49:10 +01:00
|
|
|
// repair not needed
|
2020-12-14 14:29:48 +00:00
|
|
|
if numHealthy > int(repairThreshold) {
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_unnecessary").Mark(1) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairUnnecessary.Mark(1)
|
2019-10-16 16:28:56 +01:00
|
|
|
repairer.log.Debug("segment above repair threshold", zap.Int("numHealthy", numHealthy), zap.Int32("repairThreshold", repairThreshold))
|
2019-08-05 16:09:16 +01:00
|
|
|
return true, nil
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:54:31 +01:00
|
|
|
healthyRatioBeforeRepair := 0.0
|
2020-12-14 14:29:48 +00:00
|
|
|
if segment.Redundancy.TotalShares != 0 {
|
|
|
|
healthyRatioBeforeRepair = float64(numHealthy) / float64(segment.Redundancy.TotalShares)
|
2019-05-28 15:54:31 +01:00
|
|
|
}
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.healthyRatioBeforeRepair.Observe(healthyRatioBeforeRepair)
|
2019-05-28 15:10:26 +01:00
|
|
|
|
2019-05-16 14:49:10 +01:00
|
|
|
lostPiecesSet := sliceToSet(missingPieces)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
var healthyPieces, unhealthyPieces metabase.Pieces
|
|
|
|
healthyMap := make(map[uint16]bool)
|
|
|
|
// Populate healthyPieces with all pieces from the segment except those correlating to indices in lostPieces
|
2019-05-20 14:22:03 +01:00
|
|
|
for _, piece := range pieces {
|
2020-12-14 14:29:48 +00:00
|
|
|
excludeNodeIDs = append(excludeNodeIDs, piece.StorageNode)
|
|
|
|
if !lostPiecesSet[piece.Number] {
|
2019-03-18 10:55:06 +00:00
|
|
|
healthyPieces = append(healthyPieces, piece)
|
2020-12-14 14:29:48 +00:00
|
|
|
healthyMap[piece.Number] = true
|
2019-06-28 20:48:51 +01:00
|
|
|
} else {
|
|
|
|
unhealthyPieces = append(unhealthyPieces, piece)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-03-28 20:09:23 +00:00
|
|
|
// Create the order limits for the GET_REPAIR action
|
2021-07-13 14:52:37 +01:00
|
|
|
getOrderLimits, getPrivateKey, cachedIPsAndPorts, err := repairer.orders.CreateGetRepairOrderLimits(ctx, metabase.BucketLocation{}, segment, healthyPieces)
|
2019-03-28 20:09:23 +00:00
|
|
|
if err != nil {
|
2021-08-17 17:59:23 +01:00
|
|
|
if orders.ErrDownloadFailedNotEnoughPieces.Has(err) {
|
|
|
|
mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked
|
|
|
|
stats.repairerSegmentsBelowMinReq.Inc(1)
|
|
|
|
mon.Meter("repair_nodes_unavailable").Mark(1) //mon:locked
|
|
|
|
stats.repairerNodesUnavailable.Mark(1)
|
|
|
|
|
|
|
|
repairer.log.Warn("irreparable segment",
|
|
|
|
zap.String("StreamID", queueSegment.StreamID.String()),
|
|
|
|
zap.Uint64("Position", queueSegment.Position.Encode()),
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
}
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, orderLimitFailureError.New("could not create GET_REPAIR order limits: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 22:30:35 +01:00
|
|
|
// Double check for healthy pieces which became unhealthy inside CreateGetRepairOrderLimits
|
|
|
|
// Remove them from healthyPieces and add them to unhealthyPieces
|
2020-12-14 14:29:48 +00:00
|
|
|
var newHealthyPieces metabase.Pieces
|
2020-07-17 22:30:35 +01:00
|
|
|
for _, piece := range healthyPieces {
|
2020-12-14 14:29:48 +00:00
|
|
|
if getOrderLimits[piece.Number] == nil {
|
2020-07-17 22:30:35 +01:00
|
|
|
unhealthyPieces = append(unhealthyPieces, piece)
|
|
|
|
} else {
|
|
|
|
newHealthyPieces = append(newHealthyPieces, piece)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
healthyPieces = newHealthyPieces
|
|
|
|
|
2019-07-11 23:44:47 +01:00
|
|
|
var requestCount int
|
2020-05-28 21:19:44 +01:00
|
|
|
var minSuccessfulNeeded int
|
2019-07-11 23:44:47 +01:00
|
|
|
{
|
2020-07-24 12:08:58 +01:00
|
|
|
totalNeeded := math.Ceil(float64(redundancy.OptimalThreshold()) * repairer.multiplierOptimalThreshold)
|
2019-07-11 23:44:47 +01:00
|
|
|
requestCount = int(totalNeeded) - len(healthyPieces)
|
2020-05-28 21:19:44 +01:00
|
|
|
minSuccessfulNeeded = redundancy.OptimalThreshold() - len(healthyPieces)
|
2019-07-11 23:44:47 +01:00
|
|
|
}
|
|
|
|
|
2018-12-13 07:12:36 +00:00
|
|
|
// Request Overlay for n-h new storage nodes
|
2019-03-23 08:06:11 +00:00
|
|
|
request := overlay.FindStorageNodesRequest{
|
2019-07-11 23:44:47 +01:00
|
|
|
RequestedCount: requestCount,
|
2020-03-12 18:37:57 +00:00
|
|
|
ExcludedIDs: excludeNodeIDs,
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2020-08-04 17:50:12 +01:00
|
|
|
newNodes, err := repairer.overlay.FindStorageNodesForUpload(ctx, request)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, overlayQueryError.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Create the order limits for the PUT_REPAIR action
|
2021-06-17 16:05:04 +01:00
|
|
|
putLimits, putPrivateKey, err := repairer.orders.CreatePutRepairOrderLimits(ctx, metabase.BucketLocation{}, segment, getOrderLimits, newNodes, repairer.multiplierOptimalThreshold)
|
2019-03-28 20:09:23 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, orderLimitFailureError.New("could not create PUT_REPAIR order limits: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Download the segment using just the healthy pieces
|
2021-07-13 14:52:37 +01:00
|
|
|
segmentReader, pbFailedPieces, err := repairer.ec.Get(ctx, getOrderLimits, cachedIPsAndPorts, getPrivateKey, redundancy, int64(segment.EncryptedSize))
|
2019-09-16 18:13:24 +01:00
|
|
|
|
|
|
|
// Populate node IDs that failed piece hashes verification
|
|
|
|
var failedNodeIDs storj.NodeIDList
|
2020-12-14 14:29:48 +00:00
|
|
|
for _, piece := range pbFailedPieces {
|
2019-09-16 18:13:24 +01:00
|
|
|
failedNodeIDs = append(failedNodeIDs, piece.NodeId)
|
|
|
|
}
|
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
// TODO refactor repairer.ec.Get?
|
|
|
|
failedPieces := make(metabase.Pieces, len(pbFailedPieces))
|
|
|
|
for i, piece := range pbFailedPieces {
|
|
|
|
failedPieces[i] = metabase.Piece{
|
|
|
|
Number: uint16(piece.PieceNum),
|
|
|
|
StorageNode: piece.NodeId,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 14:27:09 +01:00
|
|
|
// ensure we get values, even if only zero values, so that redash can have an alert based on this
|
|
|
|
mon.Meter("repair_too_many_nodes_failed").Mark(0) //mon:locked
|
|
|
|
stats.repairTooManyNodesFailed.Mark(0)
|
|
|
|
|
2019-09-13 17:21:20 +01:00
|
|
|
// update audit status for nodes that failed piece hash verification during downloading
|
|
|
|
failedNum, updateErr := repairer.updateAuditFailStatus(ctx, failedNodeIDs)
|
|
|
|
if updateErr != nil || failedNum > 0 {
|
|
|
|
// failed updates should not affect repair, therefore we will not return the error
|
cmd/satellite: ignore Canceled in exit from repair worker
Firstly, this changes the repair functionality to return Canceled errors
when a repair is canceled during the Get phase. Previously, because we
do not track individual errors per piece, this would just show up as a
failure to download enough pieces to repair the segment, which would
cause the segment to be added to the IrreparableDB, which is entirely
unhelpful.
Then, ignore Canceled errors in the return value of the repair worker.
Apparently, when the worker returns an error, that makes Cobra exit the
program with a nonzero exit code, which causes some piece of our
deployment automation to freak out and page people. And when we ask the
repair worker to shut down, "canceled" errors are what we _expect_, not
an error case.
Change-Id: Ia3eb1c60a8d6ec5d09e7cef55dea523be28e8435
2020-05-15 03:39:22 +01:00
|
|
|
repairer.log.Debug("failed to update audit fail status", zap.Int("Failed Update Number", failedNum), zap.Error(updateErr))
|
2019-09-13 17:21:20 +01:00
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
cmd/satellite: ignore Canceled in exit from repair worker
Firstly, this changes the repair functionality to return Canceled errors
when a repair is canceled during the Get phase. Previously, because we
do not track individual errors per piece, this would just show up as a
failure to download enough pieces to repair the segment, which would
cause the segment to be added to the IrreparableDB, which is entirely
unhelpful.
Then, ignore Canceled errors in the return value of the repair worker.
Apparently, when the worker returns an error, that makes Cobra exit the
program with a nonzero exit code, which causes some piece of our
deployment automation to freak out and page people. And when we ask the
repair worker to shut down, "canceled" errors are what we _expect_, not
an error case.
Change-Id: Ia3eb1c60a8d6ec5d09e7cef55dea523be28e8435
2020-05-15 03:39:22 +01:00
|
|
|
// If the context was closed during the Get phase, it will appear here as though
|
|
|
|
// we just failed to download enough pieces to reconstruct the segment. Check for
|
|
|
|
// a closed context before doing any further error processing.
|
|
|
|
if ctxErr := ctx.Err(); ctxErr != nil {
|
|
|
|
return false, ctxErr
|
|
|
|
}
|
2020-02-24 20:13:12 +00:00
|
|
|
// If Get failed because of input validation, then it will keep failing. But if it
|
|
|
|
// gave us irreparableError, then we failed to download enough pieces and must try
|
|
|
|
// to wait for nodes to come back online.
|
2021-05-14 16:05:42 +01:00
|
|
|
var irreparableErr *irreparableError
|
|
|
|
if errors.As(err, &irreparableErr) {
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_too_many_nodes_failed").Mark(1) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairTooManyNodesFailed.Mark(1)
|
2021-06-15 22:45:31 +01:00
|
|
|
|
|
|
|
repairer.log.Warn("irreparable segment",
|
2021-06-17 16:05:04 +01:00
|
|
|
zap.String("StreamID", queueSegment.StreamID.String()),
|
|
|
|
zap.Uint64("Position", queueSegment.Position.Encode()),
|
2021-06-15 22:45:31 +01:00
|
|
|
zap.Int32("piecesAvailable", irreparableErr.piecesAvailable),
|
|
|
|
zap.Int32("piecesRequired", irreparableErr.piecesRequired),
|
2021-08-06 18:58:22 +01:00
|
|
|
zap.Error(errs.Combine(irreparableErr.errlist...)),
|
2021-06-15 22:45:31 +01:00
|
|
|
)
|
|
|
|
return false, nil
|
2020-02-24 20:13:12 +00:00
|
|
|
}
|
|
|
|
// The segment's redundancy strategy is invalid, or else there was an internal error.
|
|
|
|
return true, repairReconstructError.New("segment could not be reconstructed: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
2019-09-06 20:20:36 +01:00
|
|
|
defer func() { err = errs.Combine(err, segmentReader.Close()) }()
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Upload the repaired pieces
|
2021-06-17 16:05:04 +01:00
|
|
|
successfulNodes, _, err := repairer.ec.Repair(ctx, putLimits, putPrivateKey, redundancy, segmentReader, repairer.timeout, minSuccessfulNeeded)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, repairPutError.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 15:41:38 +01:00
|
|
|
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
|
|
|
|
var bytesRepaired int64
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
// Add the successfully uploaded pieces to repairedPieces
|
2020-12-14 14:29:48 +00:00
|
|
|
var repairedPieces metabase.Pieces
|
|
|
|
repairedMap := make(map[uint16]bool)
|
2019-03-18 10:55:06 +00:00
|
|
|
for i, node := range successfulNodes {
|
|
|
|
if node == nil {
|
|
|
|
continue
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
2021-07-20 15:41:38 +01:00
|
|
|
bytesRepaired += pieceSize
|
2020-12-14 14:29:48 +00:00
|
|
|
piece := metabase.Piece{
|
|
|
|
Number: uint16(i),
|
|
|
|
StorageNode: node.Id,
|
2019-07-25 17:59:46 +01:00
|
|
|
}
|
2020-12-14 14:29:48 +00:00
|
|
|
repairedPieces = append(repairedPieces, piece)
|
|
|
|
repairedMap[uint16(i)] = true
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 15:41:38 +01:00
|
|
|
mon.Meter("repair_bytes_uploaded").Mark64(bytesRepaired) //mon:locked
|
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
healthyAfterRepair := len(healthyPieces) + len(repairedPieces)
|
2019-05-29 14:14:25 +01:00
|
|
|
switch {
|
2020-12-14 14:29:48 +00:00
|
|
|
case healthyAfterRepair <= int(segment.Redundancy.RepairShares):
|
2020-02-24 20:13:12 +00:00
|
|
|
// Important: this indicates a failure to PUT enough pieces to the network to pass
|
|
|
|
// the repair threshold, and _not_ a failure to reconstruct the segment. But we
|
|
|
|
// put at least one piece, else ec.Repair() would have returned an error. So the
|
|
|
|
// repair "succeeded" in that the segment is now healthier than it was, but it is
|
|
|
|
// not as healthy as we want it to be.
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_failed").Mark(1) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairFailed.Mark(1)
|
2020-12-14 14:29:48 +00:00
|
|
|
case healthyAfterRepair < int(segment.Redundancy.OptimalShares):
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_partial").Mark(1) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairPartial.Mark(1)
|
2019-05-29 14:14:25 +01:00
|
|
|
default:
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_success").Mark(1) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.repairSuccess.Mark(1)
|
2019-05-28 15:10:26 +01:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:54:31 +01:00
|
|
|
healthyRatioAfterRepair := 0.0
|
2020-12-14 14:29:48 +00:00
|
|
|
if segment.Redundancy.TotalShares != 0 {
|
|
|
|
healthyRatioAfterRepair = float64(healthyAfterRepair) / float64(segment.Redundancy.TotalShares)
|
2019-05-28 15:54:31 +01:00
|
|
|
}
|
2021-02-03 22:22:46 +00:00
|
|
|
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.FloatVal("healthy_ratio_after_repair").Observe(healthyRatioAfterRepair) //mon:locked
|
2021-02-03 22:22:46 +00:00
|
|
|
stats.healthyRatioAfterRepair.Observe(healthyRatioAfterRepair)
|
2019-05-28 15:10:26 +01:00
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
var toRemove metabase.Pieces
|
|
|
|
if healthyAfterRepair >= int(segment.Redundancy.OptimalShares) {
|
2019-07-25 17:59:46 +01:00
|
|
|
// if full repair, remove all unhealthy pieces
|
|
|
|
toRemove = unhealthyPieces
|
|
|
|
} else {
|
|
|
|
// if partial repair, leave unrepaired unhealthy pieces in the pointer
|
|
|
|
for _, piece := range unhealthyPieces {
|
2020-12-14 14:29:48 +00:00
|
|
|
if repairedMap[piece.Number] {
|
2019-07-25 17:59:46 +01:00
|
|
|
// add only repaired pieces in the slice, unrepaired
|
|
|
|
// unhealthy pieces are not removed from the pointer
|
|
|
|
toRemove = append(toRemove, piece)
|
2019-06-28 20:48:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-16 18:13:24 +01:00
|
|
|
// add pieces that failed piece hashes verification to the removal list
|
|
|
|
toRemove = append(toRemove, failedPieces...)
|
|
|
|
|
2021-07-28 07:43:24 +01:00
|
|
|
newPieces, err := segment.Pieces.Update(repairedPieces, toRemove)
|
2020-12-14 14:29:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, repairPutError.Wrap(err)
|
2019-10-07 18:54:12 +01:00
|
|
|
}
|
|
|
|
|
2020-12-14 14:29:48 +00:00
|
|
|
err = repairer.metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
2021-01-29 15:28:19 +00:00
|
|
|
StreamID: segment.StreamID,
|
2021-06-17 16:05:04 +01:00
|
|
|
Position: segment.Position,
|
2019-09-17 20:18:48 +01:00
|
|
|
|
2021-03-12 11:23:44 +00:00
|
|
|
OldPieces: segment.Pieces,
|
|
|
|
NewRedundancy: segment.Redundancy,
|
|
|
|
NewPieces: newPieces,
|
2021-03-17 10:31:07 +00:00
|
|
|
|
|
|
|
NewRepairedAt: time.Now(),
|
2020-12-14 14:29:48 +00:00
|
|
|
})
|
2019-09-17 20:18:48 +01:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, metainfoPutError.Wrap(err)
|
2019-09-17 20:18:48 +01:00
|
|
|
}
|
|
|
|
|
2021-03-17 10:31:07 +00:00
|
|
|
repairedAt := time.Time{}
|
|
|
|
if segment.RepairedAt != nil {
|
|
|
|
repairedAt = *segment.RepairedAt
|
|
|
|
}
|
|
|
|
|
|
|
|
var segmentAge time.Duration
|
2021-08-05 00:56:50 +01:00
|
|
|
if segment.CreatedAt.Before(repairedAt) {
|
2021-03-17 10:31:07 +00:00
|
|
|
segmentAge = time.Since(repairedAt)
|
|
|
|
} else {
|
2021-08-05 00:56:50 +01:00
|
|
|
segmentAge = time.Since(segment.CreatedAt)
|
2021-03-17 10:31:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO what to do with RepairCount
|
|
|
|
var repairCount int64
|
|
|
|
// pointer.RepairCount++
|
|
|
|
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.IntVal("segment_time_until_repair").Observe(int64(segmentAge.Seconds())) //mon:locked
|
2021-08-05 00:56:50 +01:00
|
|
|
stats.segmentTimeUntilRepair.Observe(int64(segmentAge.Seconds()))
|
2021-02-11 10:46:16 +00:00
|
|
|
mon.IntVal("segment_repair_count").Observe(repairCount) //mon:locked
|
|
|
|
stats.segmentRepairCount.Observe(repairCount)
|
2019-09-17 20:18:48 +01:00
|
|
|
|
|
|
|
return true, nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2021-02-03 22:22:46 +00:00
|
|
|
func (repairer *SegmentRepairer) getStatsByRS(redundancy *pb.RedundancyScheme) *stats {
|
|
|
|
rsString := getRSString(repairer.loadRedundancy(redundancy))
|
|
|
|
return repairer.statsCollector.getStatsByRS(rsString)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (repairer *SegmentRepairer) loadRedundancy(redundancy *pb.RedundancyScheme) (int, int, int, int) {
|
|
|
|
repair := int(redundancy.RepairThreshold)
|
|
|
|
overrideValue := repairer.repairOverrides.GetOverrideValuePB(redundancy)
|
|
|
|
if overrideValue != 0 {
|
|
|
|
repair = int(overrideValue)
|
|
|
|
}
|
|
|
|
return int(redundancy.MinReq), repair, int(redundancy.SuccessThreshold), int(redundancy.Total)
|
|
|
|
}
|
|
|
|
|
2019-09-13 17:21:20 +01:00
|
|
|
func (repairer *SegmentRepairer) updateAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failedNum int, err error) {
|
2021-07-15 15:14:13 +01:00
|
|
|
var errGroup errs.Group
|
|
|
|
for _, nodeID := range failedAuditNodeIDs {
|
|
|
|
err := repairer.reputation.ApplyAudit(ctx, nodeID, reputation.AuditFailure)
|
|
|
|
if err != nil {
|
|
|
|
failedNum++
|
|
|
|
errGroup.Add(err)
|
|
|
|
continue
|
2019-09-13 17:21:20 +01:00
|
|
|
}
|
|
|
|
}
|
2021-07-15 15:14:13 +01:00
|
|
|
if failedNum > 0 {
|
|
|
|
return failedNum, errs.Combine(Error.New("failed to update some audit fail statuses in overlay"), errGroup.Err())
|
2019-09-13 17:21:20 +01:00
|
|
|
}
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2020-12-18 08:49:31 +00:00
|
|
|
// SetNow allows tests to have the server act as if the current time is whatever they want.
|
|
|
|
func (repairer *SegmentRepairer) SetNow(nowFn func() time.Time) {
|
|
|
|
repairer.nowFn = nowFn
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// sliceToSet converts the given slice to a set.
|
2020-12-14 14:29:48 +00:00
|
|
|
func sliceToSet(slice []uint16) map[uint16]bool {
|
|
|
|
set := make(map[uint16]bool, len(slice))
|
2019-03-18 10:55:06 +00:00
|
|
|
for _, value := range slice {
|
2019-07-25 17:59:46 +01:00
|
|
|
set[value] = true
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
return set
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|