2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-12-13 07:12:36 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package segments
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-03-19 13:14:59 +00:00
|
|
|
"time"
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-01-29 20:42:27 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/pkg/eestream"
|
|
|
|
"storj.io/storj/pkg/identity"
|
2018-12-13 07:12:36 +00:00
|
|
|
"storj.io/storj/pkg/overlay"
|
|
|
|
"storj.io/storj/pkg/pb"
|
2019-01-02 18:47:34 +00:00
|
|
|
ecclient "storj.io/storj/pkg/storage/ec"
|
2018-12-13 07:12:36 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2019-04-25 09:46:32 +01:00
|
|
|
"storj.io/storj/satellite/metainfo"
|
2019-03-28 20:09:23 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
2018-12-13 07:12:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Repairer for segments
|
|
|
|
type Repairer struct {
|
2019-04-25 09:46:32 +01:00
|
|
|
metainfo *metainfo.Service
|
|
|
|
orders *orders.Service
|
|
|
|
cache *overlay.Cache
|
|
|
|
ec ecclient.Client
|
|
|
|
identity *identity.FullIdentity
|
|
|
|
timeout time.Duration
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewSegmentRepairer creates a new instance of SegmentRepairer
|
2019-04-25 09:46:32 +01:00
|
|
|
func NewSegmentRepairer(metainfo *metainfo.Service, orders *orders.Service, cache *overlay.Cache, ec ecclient.Client, identity *identity.FullIdentity, timeout time.Duration) *Repairer {
|
2019-03-18 10:55:06 +00:00
|
|
|
return &Repairer{
|
2019-04-25 09:46:32 +01:00
|
|
|
metainfo: metainfo,
|
|
|
|
orders: orders,
|
|
|
|
cache: cache,
|
2019-06-14 10:16:31 +01:00
|
|
|
ec: ec.WithForceErrorDetection(true),
|
2019-04-25 09:46:32 +01:00
|
|
|
identity: identity,
|
|
|
|
timeout: timeout,
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Repair retrieves an at-risk segment and repairs and stores lost pieces on new nodes
|
2019-05-16 14:49:10 +01:00
|
|
|
func (repairer *Repairer) Repair(ctx context.Context, path storj.Path) (err error) {
|
2018-12-13 07:12:36 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-04-25 09:46:32 +01:00
|
|
|
// Read the segment pointer from the metainfo
|
2019-06-05 15:23:10 +01:00
|
|
|
pointer, err := repairer.metainfo.Get(ctx, path)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
if pointer.GetType() != pb.Pointer_REMOTE {
|
|
|
|
return Error.New("cannot repair inline segment %s", path)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.Meter("repair_attempts").Mark(1)
|
|
|
|
mon.IntVal("repair_segment_size").Observe(pointer.GetSegmentSize())
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
|
|
|
expiration := pointer.GetExpirationDate()
|
|
|
|
|
2018-12-13 07:12:36 +00:00
|
|
|
var excludeNodeIDs storj.NodeIDList
|
2019-06-28 20:48:51 +01:00
|
|
|
var healthyPieces, unhealthyPieces []*pb.RemotePiece
|
|
|
|
healthyMap := make(map[int32]bool)
|
2019-05-16 14:49:10 +01:00
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
|
|
|
missingPieces, err := repairer.cache.GetMissingPieces(ctx, pieces)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("error getting missing pieces %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
numHealthy := len(pieces) - len(missingPieces)
|
2019-06-14 10:16:31 +01:00
|
|
|
// irreparable piece, we need k+1 to detect corrupted pieces
|
|
|
|
if int32(numHealthy) < pointer.Remote.Redundancy.MinReq+1 {
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.Meter("repair_nodes_unavailable").Mark(1)
|
2019-06-14 10:16:31 +01:00
|
|
|
return Error.New("segment %v cannot be repaired: only %d healthy pieces, %d required", path, numHealthy, pointer.Remote.Redundancy.MinReq+1)
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// repair not needed
|
2019-05-20 11:50:13 +01:00
|
|
|
if int32(numHealthy) > pointer.Remote.Redundancy.RepairThreshold {
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.Meter("repair_unnecessary").Mark(1)
|
2019-06-14 10:16:31 +01:00
|
|
|
return Error.New("segment %v with %d pieces above repair threshold %d", path, numHealthy, pointer.Remote.Redundancy.RepairThreshold)
|
2019-05-16 14:49:10 +01:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:54:31 +01:00
|
|
|
healthyRatioBeforeRepair := 0.0
|
|
|
|
if pointer.Remote.Redundancy.Total != 0 {
|
|
|
|
healthyRatioBeforeRepair = float64(numHealthy) / float64(pointer.Remote.Redundancy.Total)
|
|
|
|
}
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair)
|
|
|
|
|
2019-05-16 14:49:10 +01:00
|
|
|
lostPiecesSet := sliceToSet(missingPieces)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
// Populate healthyPieces with all pieces from the pointer except those correlating to indices in lostPieces
|
2019-05-20 14:22:03 +01:00
|
|
|
for _, piece := range pieces {
|
2019-03-18 10:55:06 +00:00
|
|
|
excludeNodeIDs = append(excludeNodeIDs, piece.NodeId)
|
|
|
|
if _, ok := lostPiecesSet[piece.GetPieceNum()]; !ok {
|
|
|
|
healthyPieces = append(healthyPieces, piece)
|
2019-06-28 20:48:51 +01:00
|
|
|
healthyMap[piece.GetPieceNum()] = true
|
|
|
|
} else {
|
|
|
|
unhealthyPieces = append(unhealthyPieces, piece)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-04-09 18:20:00 +01:00
|
|
|
bucketID, err := createBucketID(path)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-03-28 20:09:23 +00:00
|
|
|
// Create the order limits for the GET_REPAIR action
|
|
|
|
getOrderLimits, err := repairer.orders.CreateGetRepairOrderLimits(ctx, repairer.identity.PeerIdentity(), bucketID, pointer, healthyPieces)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Request Overlay for n-h new storage nodes
|
2019-03-23 08:06:11 +00:00
|
|
|
request := overlay.FindStorageNodesRequest{
|
|
|
|
RequestedCount: redundancy.TotalCount() - len(healthyPieces),
|
|
|
|
FreeBandwidth: pieceSize,
|
|
|
|
FreeDisk: pieceSize,
|
|
|
|
ExcludedNodes: excludeNodeIDs,
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2019-03-23 08:06:11 +00:00
|
|
|
newNodes, err := repairer.cache.FindStorageNodes(ctx, request)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
2019-03-18 10:55:06 +00:00
|
|
|
return Error.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Create the order limits for the PUT_REPAIR action
|
2019-03-28 20:09:23 +00:00
|
|
|
putLimits, err := repairer.orders.CreatePutRepairOrderLimits(ctx, repairer.identity.PeerIdentity(), bucketID, pointer, getOrderLimits, newNodes)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Download the segment using just the healthy pieces
|
2019-03-28 20:09:23 +00:00
|
|
|
rr, err := repairer.ec.Get(ctx, getOrderLimits, redundancy, pointer.GetSegmentSize())
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
r, err := rr.Range(ctx, 0, rr.Size())
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2019-01-29 20:42:27 +00:00
|
|
|
defer func() { err = errs.Combine(err, r.Close()) }()
|
2018-12-13 07:12:36 +00:00
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Upload the repaired pieces
|
2019-05-20 09:37:46 +01:00
|
|
|
successfulNodes, hashes, err := repairer.ec.Repair(ctx, putLimits, redundancy, r, convertTime(expiration), repairer.timeout, path)
|
2018-12-13 07:12:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Add the successfully uploaded pieces to the healthyPieces
|
|
|
|
for i, node := range successfulNodes {
|
|
|
|
if node == nil {
|
|
|
|
continue
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
healthyPieces = append(healthyPieces, &pb.RemotePiece{
|
|
|
|
PieceNum: int32(i),
|
|
|
|
NodeId: node.Id,
|
|
|
|
Hash: hashes[i],
|
|
|
|
})
|
2019-06-28 20:48:51 +01:00
|
|
|
healthyMap[int32(i)] = true
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
|
|
|
|
2019-06-28 20:48:51 +01:00
|
|
|
healthyLength := int32(len(healthyPieces))
|
2019-05-29 14:14:25 +01:00
|
|
|
switch {
|
2019-06-28 20:48:51 +01:00
|
|
|
case healthyLength <= pointer.Remote.Redundancy.RepairThreshold:
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.Meter("repair_failed").Mark(1)
|
2019-06-28 20:48:51 +01:00
|
|
|
case healthyLength < pointer.Remote.Redundancy.SuccessThreshold:
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.Meter("repair_partial").Mark(1)
|
2019-05-29 14:14:25 +01:00
|
|
|
default:
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.Meter("repair_success").Mark(1)
|
|
|
|
}
|
|
|
|
|
2019-05-28 15:54:31 +01:00
|
|
|
healthyRatioAfterRepair := 0.0
|
|
|
|
if pointer.Remote.Redundancy.Total != 0 {
|
2019-06-28 20:48:51 +01:00
|
|
|
healthyRatioAfterRepair = float64(healthyLength) / float64(pointer.Remote.Redundancy.Total)
|
2019-05-28 15:54:31 +01:00
|
|
|
}
|
2019-05-28 15:10:26 +01:00
|
|
|
mon.FloatVal("healthy_ratio_after_repair").Observe(healthyRatioAfterRepair)
|
|
|
|
|
2019-06-28 20:48:51 +01:00
|
|
|
// if partial repair, include "unhealthy" pieces that are not duplicates
|
|
|
|
if healthyLength < pointer.Remote.Redundancy.SuccessThreshold {
|
|
|
|
for _, p := range unhealthyPieces {
|
|
|
|
if _, ok := healthyMap[p.GetPieceNum()]; !ok {
|
|
|
|
healthyPieces = append(healthyPieces, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the remote pieces in the pointer
|
|
|
|
pointer.GetRemote().RemotePieces = healthyPieces
|
|
|
|
|
2019-04-25 09:46:32 +01:00
|
|
|
// Update the segment pointer in the metainfo
|
2019-06-05 15:23:10 +01:00
|
|
|
return repairer.metainfo.Put(ctx, path, pointer)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// sliceToSet converts the given slice to a set
|
|
|
|
func sliceToSet(slice []int32) map[int32]struct{} {
|
|
|
|
set := make(map[int32]struct{}, len(slice))
|
|
|
|
for _, value := range slice {
|
|
|
|
set[value] = struct{}{}
|
|
|
|
}
|
|
|
|
return set
|
2018-12-13 07:12:36 +00:00
|
|
|
}
|
2019-03-28 20:09:23 +00:00
|
|
|
|
2019-04-09 18:20:00 +01:00
|
|
|
func createBucketID(path storj.Path) ([]byte, error) {
|
2019-03-28 20:09:23 +00:00
|
|
|
comps := storj.SplitPath(path)
|
2019-04-09 18:20:00 +01:00
|
|
|
if len(comps) < 3 {
|
|
|
|
return nil, Error.New("no bucket component in path: %s", path)
|
2019-03-28 20:09:23 +00:00
|
|
|
}
|
2019-04-09 18:20:00 +01:00
|
|
|
return []byte(storj.JoinPaths(comps[0], comps[2])), nil
|
2019-03-28 20:09:23 +00:00
|
|
|
}
|