2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-12-13 07:12:36 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2019-07-29 12:24:56 +01:00
|
|
|
package repairer
|
2018-12-13 07:12:36 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-02-24 20:13:12 +00:00
|
|
|
"fmt"
|
2019-07-11 23:44:47 +01:00
|
|
|
"math"
|
2019-03-19 13:14:59 +00:00
|
|
|
"time"
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-01-29 20:42:27 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-07-02 11:08:02 +01:00
|
|
|
"go.uber.org/zap"
|
2019-01-29 20:42:27 +00:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
2019-04-25 09:46:32 +01:00
|
|
|
"storj.io/storj/satellite/metainfo"
|
2020-08-28 12:56:09 +01:00
|
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
2019-03-28 20:09:23 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2020-02-21 14:07:29 +00:00
|
|
|
"storj.io/uplink/private/eestream"
|
2018-12-13 07:12:36 +00:00
|
|
|
)
|
|
|
|
|
2020-02-24 20:13:12 +00:00
|
|
|
var (
|
|
|
|
metainfoGetError = errs.Class("metainfo db get error")
|
|
|
|
metainfoPutError = errs.Class("metainfo db put error")
|
|
|
|
invalidRepairError = errs.Class("invalid repair")
|
|
|
|
overlayQueryError = errs.Class("overlay query failure")
|
|
|
|
orderLimitFailureError = errs.Class("order limits failure")
|
|
|
|
repairReconstructError = errs.Class("repair reconstruction failure")
|
|
|
|
repairPutError = errs.Class("repair could not store repaired pieces")
|
|
|
|
)
|
|
|
|
|
|
|
|
// irreparableError identifies situations where a segment could not be repaired due to reasons
|
|
|
|
// which are hopefully transient (e.g. too many pieces unavailable). The segment should be added
|
|
|
|
// to the irreparableDB.
|
|
|
|
type irreparableError struct {
|
|
|
|
path storj.Path
|
|
|
|
piecesAvailable int32
|
|
|
|
piecesRequired int32
|
|
|
|
segmentInfo *pb.Pointer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ie *irreparableError) Error() string {
|
|
|
|
return fmt.Sprintf("%d available pieces < %d required", ie.piecesAvailable, ie.piecesRequired)
|
|
|
|
}
|
2019-07-30 16:38:25 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// SegmentRepairer for segments.
|
2019-07-29 12:24:56 +01:00
|
|
|
type SegmentRepairer struct {
|
2019-07-02 11:08:02 +01:00
|
|
|
log *zap.Logger
|
2019-04-25 09:46:32 +01:00
|
|
|
metainfo *metainfo.Service
|
|
|
|
orders *orders.Service
|
2019-08-06 17:35:59 +01:00
|
|
|
overlay *overlay.Service
|
2019-09-06 20:20:36 +01:00
|
|
|
ec *ECRepairer
|
2019-04-25 09:46:32 +01:00
|
|
|
timeout time.Duration
|
2019-07-11 23:44:47 +01:00
|
|
|
|
|
|
|
// multiplierOptimalThreshold is the value that multiplied by the optimal
|
|
|
|
// threshold results in the maximum limit of number of nodes to upload
|
|
|
|
// repaired pieces
|
|
|
|
multiplierOptimalThreshold float64
|
2019-10-02 13:58:37 +01:00
|
|
|
|
2020-10-13 13:47:55 +01:00
|
|
|
// repairOverride is the value handed over from the checker to override the Repair Threshold
|
2019-10-02 13:58:37 +01:00
|
|
|
repairOverride int
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 23:44:47 +01:00
|
|
|
// NewSegmentRepairer creates a new instance of SegmentRepairer.
|
|
|
|
//
|
|
|
|
// excessPercentageOptimalThreshold is the percentage to apply over the optimal
|
|
|
|
// threshould to determine the maximum limit of nodes to upload repaired pieces,
|
|
|
|
// when negative, 0 is applied.
|
|
|
|
func NewSegmentRepairer(
|
|
|
|
log *zap.Logger, metainfo *metainfo.Service, orders *orders.Service,
|
2019-09-19 05:46:39 +01:00
|
|
|
overlay *overlay.Service, dialer rpc.Dialer, timeout time.Duration,
|
2019-10-30 20:31:08 +00:00
|
|
|
excessOptimalThreshold float64, repairOverride int,
|
2020-03-18 23:55:09 +00:00
|
|
|
downloadTimeout time.Duration, inMemoryRepair bool,
|
2019-10-30 20:31:08 +00:00
|
|
|
satelliteSignee signing.Signee,
|
2019-07-29 12:24:56 +01:00
|
|
|
) *SegmentRepairer {
|
2019-07-11 23:44:47 +01:00
|
|
|
|
|
|
|
if excessOptimalThreshold < 0 {
|
|
|
|
excessOptimalThreshold = 0
|
|
|
|
}
|
|
|
|
|
2019-07-29 12:24:56 +01:00
|
|
|
return &SegmentRepairer{
|
2019-07-11 23:44:47 +01:00
|
|
|
log: log,
|
|
|
|
metainfo: metainfo,
|
|
|
|
orders: orders,
|
2019-08-06 17:35:59 +01:00
|
|
|
overlay: overlay,
|
2020-03-18 23:55:09 +00:00
|
|
|
ec: NewECRepairer(log.Named("ec repairer"), dialer, satelliteSignee, downloadTimeout, inMemoryRepair),
|
2019-07-11 23:44:47 +01:00
|
|
|
timeout: timeout,
|
|
|
|
multiplierOptimalThreshold: 1 + excessOptimalThreshold,
|
2019-10-02 13:58:37 +01:00
|
|
|
repairOverride: repairOverride,
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Repair retrieves an at-risk segment and repairs and stores lost pieces on new nodes
|
2019-08-05 16:09:16 +01:00
|
|
|
// note that shouldDelete is used even in the case where err is not null
|
2020-07-16 15:18:02 +01:00
|
|
|
// note that it will update audit status as failed for nodes that failed piece hash verification during repair downloading.
|
2019-08-05 16:09:16 +01:00
|
|
|
func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (shouldDelete bool, err error) {
|
2019-07-23 15:28:06 +01:00
|
|
|
defer mon.Task()(&ctx, path)(&err)
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-04-25 09:46:32 +01:00
|
|
|
// Read the segment pointer from the metainfo
|
2020-09-03 14:54:56 +01:00
|
|
|
pointer, err := repairer.metainfo.Get(ctx, metabase.SegmentKey(path))
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storj.ErrObjectNotFound.Has(err) {
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_unnecessary").Mark(1) //mon:locked
|
|
|
|
mon.Meter("segment_deleted_before_repair").Mark(1) //mon:locked
|
2020-03-30 10:59:56 +01:00
|
|
|
repairer.log.Debug("segment was deleted")
|
2019-10-15 04:39:28 +01:00
|
|
|
return true, nil
|
|
|
|
}
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, metainfoGetError.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
if pointer.GetType() != pb.Pointer_REMOTE {
|
2020-02-24 20:13:12 +00:00
|
|
|
return true, invalidRepairError.New("cannot repair inline segment")
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2020-04-15 20:20:16 +01:00
|
|
|
if !pointer.ExpirationDate.IsZero() && pointer.ExpirationDate.Before(time.Now().UTC()) {
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_expired").Mark(1) //mon:locked
|
2020-04-15 20:20:16 +01:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_attempts").Mark(1) //mon:locked
|
|
|
|
mon.IntVal("repair_segment_size").Observe(pointer.GetSegmentSize()) //mon:locked
|
2019-05-28 15:10:26 +01:00
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return true, invalidRepairError.New("invalid redundancy strategy: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var excludeNodeIDs storj.NodeIDList
|
2019-06-28 20:48:51 +01:00
|
|
|
var healthyPieces, unhealthyPieces []*pb.RemotePiece
|
|
|
|
healthyMap := make(map[int32]bool)
|
2019-05-16 14:49:10 +01:00
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
2019-08-06 17:35:59 +01:00
|
|
|
missingPieces, err := repairer.overlay.GetMissingPieces(ctx, pieces)
|
2019-05-16 14:49:10 +01:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, overlayQueryError.New("error identifying missing pieces: %w", err)
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
numHealthy := len(pieces) - len(missingPieces)
|
2019-09-06 20:20:36 +01:00
|
|
|
// irreparable piece
|
|
|
|
if int32(numHealthy) < pointer.Remote.Redundancy.MinReq {
|
2020-11-10 14:49:19 +00:00
|
|
|
mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked
|
|
|
|
mon.Meter("repair_nodes_unavailable").Mark(1) //mon:locked
|
2020-02-24 20:13:12 +00:00
|
|
|
return true, &irreparableError{
|
|
|
|
path: path,
|
|
|
|
piecesAvailable: int32(numHealthy),
|
|
|
|
piecesRequired: pointer.Remote.Redundancy.MinReq,
|
|
|
|
segmentInfo: pointer,
|
|
|
|
}
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
2020-11-10 14:49:19 +00:00
|
|
|
// ensure we get values, even if only zero values, so that redash can have an alert based on this
|
|
|
|
mon.Counter("repairer_segments_below_min_req").Inc(0) //mon:locked
|
|
|
|
|
2019-10-02 13:58:37 +01:00
|
|
|
repairThreshold := pointer.Remote.Redundancy.RepairThreshold
|
|
|
|
if repairer.repairOverride != 0 {
|
|
|
|
repairThreshold = int32(repairer.repairOverride)
|
|
|
|
}
|
|
|
|
|
2019-05-16 14:49:10 +01:00
|
|
|
// repair not needed
|
2019-10-02 13:58:37 +01:00
|
|
|
if int32(numHealthy) > repairThreshold {
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_unnecessary").Mark(1) //mon:locked
|
2019-10-16 16:28:56 +01:00
|
|
|
repairer.log.Debug("segment above repair threshold", zap.Int("numHealthy", numHealthy), zap.Int32("repairThreshold", repairThreshold))
|
2019-08-05 16:09:16 +01:00
|
|
|
return true, nil
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:54:31 +01:00
|
|
|
healthyRatioBeforeRepair := 0.0
|
|
|
|
if pointer.Remote.Redundancy.Total != 0 {
|
|
|
|
healthyRatioBeforeRepair = float64(numHealthy) / float64(pointer.Remote.Redundancy.Total)
|
|
|
|
}
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair) //mon:locked
|
2019-05-28 15:10:26 +01:00
|
|
|
|
2019-05-16 14:49:10 +01:00
|
|
|
lostPiecesSet := sliceToSet(missingPieces)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
// Populate healthyPieces with all pieces from the pointer except those correlating to indices in lostPieces
|
2019-05-20 14:22:03 +01:00
|
|
|
for _, piece := range pieces {
|
2019-03-18 10:55:06 +00:00
|
|
|
excludeNodeIDs = append(excludeNodeIDs, piece.NodeId)
|
2019-07-25 17:59:46 +01:00
|
|
|
if !lostPiecesSet[piece.GetPieceNum()] {
|
2019-03-18 10:55:06 +00:00
|
|
|
healthyPieces = append(healthyPieces, piece)
|
2019-06-28 20:48:51 +01:00
|
|
|
healthyMap[piece.GetPieceNum()] = true
|
|
|
|
} else {
|
|
|
|
unhealthyPieces = append(unhealthyPieces, piece)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2020-08-28 12:56:09 +01:00
|
|
|
segmentLocation, err := metabase.ParseSegmentKey(metabase.SegmentKey(path))
|
2019-04-09 18:20:00 +01:00
|
|
|
if err != nil {
|
2020-08-28 12:56:09 +01:00
|
|
|
return false, invalidRepairError.New("could not parse segment key: %w", err)
|
2019-04-09 18:20:00 +01:00
|
|
|
}
|
2020-08-28 12:56:09 +01:00
|
|
|
bucket := segmentLocation.Bucket()
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-03-28 20:09:23 +00:00
|
|
|
// Create the order limits for the GET_REPAIR action
|
2020-08-28 12:56:09 +01:00
|
|
|
getOrderLimits, getPrivateKey, err := repairer.orders.CreateGetRepairOrderLimits(ctx, bucket, pointer, healthyPieces)
|
2019-03-28 20:09:23 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, orderLimitFailureError.New("could not create GET_REPAIR order limits: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 22:30:35 +01:00
|
|
|
// Double check for healthy pieces which became unhealthy inside CreateGetRepairOrderLimits
|
|
|
|
// Remove them from healthyPieces and add them to unhealthyPieces
|
|
|
|
var newHealthyPieces []*pb.RemotePiece
|
|
|
|
for _, piece := range healthyPieces {
|
|
|
|
if getOrderLimits[piece.GetPieceNum()] == nil {
|
|
|
|
unhealthyPieces = append(unhealthyPieces, piece)
|
|
|
|
} else {
|
|
|
|
newHealthyPieces = append(newHealthyPieces, piece)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
healthyPieces = newHealthyPieces
|
|
|
|
|
2019-07-11 23:44:47 +01:00
|
|
|
var requestCount int
|
2020-05-28 21:19:44 +01:00
|
|
|
var minSuccessfulNeeded int
|
2019-07-11 23:44:47 +01:00
|
|
|
{
|
2020-07-24 12:08:58 +01:00
|
|
|
totalNeeded := math.Ceil(float64(redundancy.OptimalThreshold()) * repairer.multiplierOptimalThreshold)
|
2019-07-11 23:44:47 +01:00
|
|
|
requestCount = int(totalNeeded) - len(healthyPieces)
|
2020-05-28 21:19:44 +01:00
|
|
|
minSuccessfulNeeded = redundancy.OptimalThreshold() - len(healthyPieces)
|
2019-07-11 23:44:47 +01:00
|
|
|
}
|
|
|
|
|
2018-12-13 07:12:36 +00:00
|
|
|
// Request Overlay for n-h new storage nodes
|
2019-03-23 08:06:11 +00:00
|
|
|
request := overlay.FindStorageNodesRequest{
|
2019-07-11 23:44:47 +01:00
|
|
|
RequestedCount: requestCount,
|
2020-03-12 18:37:57 +00:00
|
|
|
ExcludedIDs: excludeNodeIDs,
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2020-08-04 17:50:12 +01:00
|
|
|
newNodes, err := repairer.overlay.FindStorageNodesForUpload(ctx, request)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, overlayQueryError.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Create the order limits for the PUT_REPAIR action
|
2020-08-28 12:56:09 +01:00
|
|
|
putLimits, putPrivateKey, err := repairer.orders.CreatePutRepairOrderLimits(ctx, bucket, pointer, getOrderLimits, newNodes, repairer.multiplierOptimalThreshold)
|
2019-03-28 20:09:23 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, orderLimitFailureError.New("could not create PUT_REPAIR order limits: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Download the segment using just the healthy pieces
|
2019-10-16 16:28:56 +01:00
|
|
|
segmentReader, failedPieces, err := repairer.ec.Get(ctx, getOrderLimits, getPrivateKey, redundancy, pointer.GetSegmentSize(), path)
|
2019-09-16 18:13:24 +01:00
|
|
|
|
|
|
|
// Populate node IDs that failed piece hashes verification
|
|
|
|
var failedNodeIDs storj.NodeIDList
|
|
|
|
for _, piece := range failedPieces {
|
|
|
|
failedNodeIDs = append(failedNodeIDs, piece.NodeId)
|
|
|
|
}
|
|
|
|
|
2019-09-13 17:21:20 +01:00
|
|
|
// update audit status for nodes that failed piece hash verification during downloading
|
|
|
|
failedNum, updateErr := repairer.updateAuditFailStatus(ctx, failedNodeIDs)
|
|
|
|
if updateErr != nil || failedNum > 0 {
|
|
|
|
// failed updates should not affect repair, therefore we will not return the error
|
|
|
|
repairer.log.Debug("failed to update audit fail status", zap.Int("Failed Update Number", failedNum), zap.Error(err))
|
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
// If Get failed because of input validation, then it will keep failing. But if it
|
|
|
|
// gave us irreparableError, then we failed to download enough pieces and must try
|
|
|
|
// to wait for nodes to come back online.
|
|
|
|
if irreparableErr, ok := err.(*irreparableError); ok {
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_too_many_nodes_failed").Mark(1) //mon:locked
|
2020-02-24 20:13:12 +00:00
|
|
|
irreparableErr.segmentInfo = pointer
|
|
|
|
return true, irreparableErr
|
|
|
|
}
|
|
|
|
// The segment's redundancy strategy is invalid, or else there was an internal error.
|
|
|
|
return true, repairReconstructError.New("segment could not be reconstructed: %w", err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
2019-09-06 20:20:36 +01:00
|
|
|
defer func() { err = errs.Combine(err, segmentReader.Close()) }()
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Upload the repaired pieces
|
2020-05-28 21:19:44 +01:00
|
|
|
successfulNodes, hashes, err := repairer.ec.Repair(ctx, putLimits, putPrivateKey, redundancy, segmentReader, repairer.timeout, path, minSuccessfulNeeded)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, repairPutError.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
// Add the successfully uploaded pieces to repairedPieces
|
|
|
|
var repairedPieces []*pb.RemotePiece
|
|
|
|
repairedMap := make(map[int32]bool)
|
2019-03-18 10:55:06 +00:00
|
|
|
for i, node := range successfulNodes {
|
|
|
|
if node == nil {
|
|
|
|
continue
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
piece := pb.RemotePiece{
|
2019-03-18 10:55:06 +00:00
|
|
|
PieceNum: int32(i),
|
|
|
|
NodeId: node.Id,
|
|
|
|
Hash: hashes[i],
|
2019-07-25 17:59:46 +01:00
|
|
|
}
|
|
|
|
repairedPieces = append(repairedPieces, &piece)
|
|
|
|
repairedMap[int32(i)] = true
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
healthyAfterRepair := int32(len(healthyPieces) + len(repairedPieces))
|
2019-05-29 14:14:25 +01:00
|
|
|
switch {
|
2019-07-25 17:59:46 +01:00
|
|
|
case healthyAfterRepair <= pointer.Remote.Redundancy.RepairThreshold:
|
2020-02-24 20:13:12 +00:00
|
|
|
// Important: this indicates a failure to PUT enough pieces to the network to pass
|
|
|
|
// the repair threshold, and _not_ a failure to reconstruct the segment. But we
|
|
|
|
// put at least one piece, else ec.Repair() would have returned an error. So the
|
|
|
|
// repair "succeeded" in that the segment is now healthier than it was, but it is
|
|
|
|
// not as healthy as we want it to be.
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_failed").Mark(1) //mon:locked
|
2019-07-25 17:59:46 +01:00
|
|
|
case healthyAfterRepair < pointer.Remote.Redundancy.SuccessThreshold:
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_partial").Mark(1) //mon:locked
|
2019-05-29 14:14:25 +01:00
|
|
|
default:
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.Meter("repair_success").Mark(1) //mon:locked
|
2019-05-28 15:10:26 +01:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:54:31 +01:00
|
|
|
healthyRatioAfterRepair := 0.0
|
|
|
|
if pointer.Remote.Redundancy.Total != 0 {
|
2019-07-25 17:59:46 +01:00
|
|
|
healthyRatioAfterRepair = float64(healthyAfterRepair) / float64(pointer.Remote.Redundancy.Total)
|
2019-05-28 15:54:31 +01:00
|
|
|
}
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.FloatVal("healthy_ratio_after_repair").Observe(healthyRatioAfterRepair) //mon:locked
|
2019-05-28 15:10:26 +01:00
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
var toRemove []*pb.RemotePiece
|
|
|
|
if healthyAfterRepair >= pointer.Remote.Redundancy.SuccessThreshold {
|
|
|
|
// if full repair, remove all unhealthy pieces
|
|
|
|
toRemove = unhealthyPieces
|
|
|
|
} else {
|
|
|
|
// if partial repair, leave unrepaired unhealthy pieces in the pointer
|
|
|
|
for _, piece := range unhealthyPieces {
|
|
|
|
if repairedMap[piece.GetPieceNum()] {
|
|
|
|
// add only repaired pieces in the slice, unrepaired
|
|
|
|
// unhealthy pieces are not removed from the pointer
|
|
|
|
toRemove = append(toRemove, piece)
|
2019-06-28 20:48:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-16 18:13:24 +01:00
|
|
|
// add pieces that failed piece hashes verification to the removal list
|
|
|
|
toRemove = append(toRemove, failedPieces...)
|
|
|
|
|
2019-10-07 18:54:12 +01:00
|
|
|
var segmentAge time.Duration
|
|
|
|
if pointer.CreationDate.Before(pointer.LastRepaired) {
|
|
|
|
segmentAge = time.Since(pointer.LastRepaired)
|
|
|
|
} else {
|
|
|
|
segmentAge = time.Since(pointer.CreationDate)
|
|
|
|
}
|
|
|
|
|
2019-09-17 20:18:48 +01:00
|
|
|
pointer.LastRepaired = time.Now().UTC()
|
|
|
|
pointer.RepairCount++
|
|
|
|
|
2019-04-25 09:46:32 +01:00
|
|
|
// Update the segment pointer in the metainfo
|
2020-09-03 14:54:56 +01:00
|
|
|
_, err = repairer.metainfo.UpdatePieces(ctx, metabase.SegmentKey(path), pointer, repairedPieces, toRemove)
|
2019-09-17 20:18:48 +01:00
|
|
|
if err != nil {
|
2020-02-24 20:13:12 +00:00
|
|
|
return false, metainfoPutError.Wrap(err)
|
2019-09-17 20:18:48 +01:00
|
|
|
}
|
|
|
|
|
2020-10-13 13:13:41 +01:00
|
|
|
mon.IntVal("segment_time_until_repair").Observe(int64(segmentAge.Seconds())) //mon:locked
|
|
|
|
mon.IntVal("segment_repair_count").Observe(int64(pointer.RepairCount)) //mon:locked
|
2019-09-17 20:18:48 +01:00
|
|
|
|
|
|
|
return true, nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-09-13 17:21:20 +01:00
|
|
|
func (repairer *SegmentRepairer) updateAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failedNum int, err error) {
|
|
|
|
updateRequests := make([]*overlay.UpdateRequest, len(failedAuditNodeIDs))
|
|
|
|
for i, nodeID := range failedAuditNodeIDs {
|
|
|
|
updateRequests[i] = &overlay.UpdateRequest{
|
|
|
|
NodeID: nodeID,
|
2020-03-09 15:35:54 +00:00
|
|
|
AuditOutcome: overlay.AuditFailure,
|
2019-09-13 17:21:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(updateRequests) > 0 {
|
|
|
|
failed, err := repairer.overlay.BatchUpdateStats(ctx, updateRequests)
|
|
|
|
if err != nil || len(failed) > 0 {
|
|
|
|
return len(failed), errs.Combine(Error.New("failed to update some audit fail statuses in overlay"), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// sliceToSet converts the given slice to a set.
|
2019-07-25 17:59:46 +01:00
|
|
|
func sliceToSet(slice []int32) map[int32]bool {
|
|
|
|
set := make(map[int32]bool, len(slice))
|
2019-03-18 10:55:06 +00:00
|
|
|
for _, value := range slice {
|
2019-07-25 17:59:46 +01:00
|
|
|
set[value] = true
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
return set
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|