2019-09-06 20:20:36 +01:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package repairer
import (
"bytes"
"context"
2020-07-16 16:50:15 +01:00
"errors"
2022-08-01 10:30:33 +01:00
"hash"
2019-09-06 20:20:36 +01:00
"io"
"sort"
"sync"
"sync/atomic"
"time"
2020-03-27 19:00:57 +00:00
"github.com/calebcase/tmpfile"
2019-09-06 20:20:36 +01:00
"github.com/vivint/infectious"
"github.com/zeebo/errs"
"go.uber.org/zap"
2019-12-27 11:48:47 +00:00
"storj.io/common/errs2"
"storj.io/common/pb"
"storj.io/common/rpc"
2022-10-19 14:32:24 +01:00
"storj.io/common/rpc/rpcpool"
2019-12-27 11:48:47 +00:00
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/sync2"
2021-08-03 14:21:27 +01:00
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase"
2021-11-08 20:51:04 +00:00
"storj.io/storj/satellite/overlay"
2020-02-21 14:07:29 +00:00
"storj.io/uplink/private/eestream"
"storj.io/uplink/private/piecestore"
2019-09-06 20:20:36 +01:00
)
satellite/repair: avoid retrying GET_REPAIR incorrectly
We retry a GET_REPAIR operation in one case, and one case only (as far
as I can determine): when we are trying to connect to a node using its
last known working IP and port combination rather than its supplied
hostname, and we think the operation failed the first time because of a
Dial failure.
However, logs collected from storage node operators along with logs
collected from satellites are strongly indicating that we are retrying
GET_REPAIR operations in some cases even when we succeeded in connecting
to the node the first time. This results in the node complaining loudly
about being given a duplicate order limit (as it should), whereupon the
satellite counts that as an unknown error and potentially penalizes the
node.
See discussion at
https://forum.storj.io/t/get-repair-error-used-serial-already-exists-in-store/17922/36
.
Investigation into this problem has revealed that
`!piecestore.CloseError.Has(err)` may not be the best way of determining
whether a problem occurred during Dial. In fact, it is probably
downright Wrong. Handling of errors on a stream is somewhat complicated,
but it would appear that there are several paths by which an RPC error
originating on the remote side might show up during the Close() call,
and would thus be labeled as a "CloseError".
This change creates a new error class, repairer.ErrDialFailed, with
which we will now wrap errors that _really definitely_ occurred during
a Dial call. We will use this class to determine whether or not to retry
a GET_REPAIR operation. The error will still also be wrapped with
whatever wrapper classes it used to be wrapped with, so the potential
for breakage here should be minimal.
Refs: https://github.com/storj/storj/issues/4687
Change-Id: Ifdd3deadc8258f34cf3fbc42aff393fa545794eb
2022-07-06 04:02:49 +01:00
var (
// ErrPieceHashVerifyFailed is the errs class when a piece hash downloaded from storagenode fails to match the original hash.
ErrPieceHashVerifyFailed = errs . Class ( "piece hashes don't match" )
// ErrDialFailed is the errs class when a failure happens during Dial.
ErrDialFailed = errs . Class ( "dial failure" )
)
2019-09-13 17:21:20 +01:00
2019-09-06 20:20:36 +01:00
// ECRepairer allows the repairer to download, verify, and upload pieces from storagenodes.
type ECRepairer struct {
log * zap . Logger
2019-09-19 05:46:39 +01:00
dialer rpc . Dialer
2019-09-06 20:20:36 +01:00
satelliteSignee signing . Signee
2019-10-30 20:31:08 +00:00
downloadTimeout time . Duration
2020-03-27 19:00:57 +00:00
inmemory bool
2019-09-06 20:20:36 +01:00
}
// NewECRepairer creates a new repairer for interfacing with storagenodes.
2020-03-27 19:00:57 +00:00
func NewECRepairer ( log * zap . Logger , dialer rpc . Dialer , satelliteSignee signing . Signee , downloadTimeout time . Duration , inmemory bool ) * ECRepairer {
2019-09-06 20:20:36 +01:00
return & ECRepairer {
log : log ,
2019-09-19 05:46:39 +01:00
dialer : dialer ,
2019-09-06 20:20:36 +01:00
satelliteSignee : satelliteSignee ,
2019-10-30 20:31:08 +00:00
downloadTimeout : downloadTimeout ,
2020-03-27 19:00:57 +00:00
inmemory : inmemory ,
2019-09-06 20:20:36 +01:00
}
}
2020-05-19 16:49:13 +01:00
func ( ec * ECRepairer ) dialPiecestore ( ctx context . Context , n storj . NodeURL ) ( * piecestore . Client , error ) {
2022-10-19 14:32:24 +01:00
client , err := piecestore . Dial ( rpcpool . WithForceDial ( ctx ) , ec . dialer , n , piecestore . DefaultConfig )
satellite/repair: avoid retrying GET_REPAIR incorrectly
We retry a GET_REPAIR operation in one case, and one case only (as far
as I can determine): when we are trying to connect to a node using its
last known working IP and port combination rather than its supplied
hostname, and we think the operation failed the first time because of a
Dial failure.
However, logs collected from storage node operators along with logs
collected from satellites are strongly indicating that we are retrying
GET_REPAIR operations in some cases even when we succeeded in connecting
to the node the first time. This results in the node complaining loudly
about being given a duplicate order limit (as it should), whereupon the
satellite counts that as an unknown error and potentially penalizes the
node.
See discussion at
https://forum.storj.io/t/get-repair-error-used-serial-already-exists-in-store/17922/36
.
Investigation into this problem has revealed that
`!piecestore.CloseError.Has(err)` may not be the best way of determining
whether a problem occurred during Dial. In fact, it is probably
downright Wrong. Handling of errors on a stream is somewhat complicated,
but it would appear that there are several paths by which an RPC error
originating on the remote side might show up during the Close() call,
and would thus be labeled as a "CloseError".
This change creates a new error class, repairer.ErrDialFailed, with
which we will now wrap errors that _really definitely_ occurred during
a Dial call. We will use this class to determine whether or not to retry
a GET_REPAIR operation. The error will still also be wrapped with
whatever wrapper classes it used to be wrapped with, so the potential
for breakage here should be minimal.
Refs: https://github.com/storj/storj/issues/4687
Change-Id: Ifdd3deadc8258f34cf3fbc42aff393fa545794eb
2022-07-06 04:02:49 +01:00
return client , ErrDialFailed . Wrap ( err )
2019-09-06 20:20:36 +01:00
}
// Get downloads pieces from storagenodes using the provided order limits, and decodes those pieces into a segment.
2023-02-15 21:47:47 +00:00
// It attempts to download from the minimum required number based on the redundancy scheme. It will further wait
// for additional error/failure results up to minFailures, for testing purposes. Under normal conditions,
// minFailures will be 0.
//
2019-09-06 20:20:36 +01:00
// After downloading a piece, the ECRepairer will verify the hash and original order limit for that piece.
// If verification fails, another piece will be downloaded until we reach the minimum required or run out of order limits.
2019-09-13 17:21:20 +01:00
// If piece hash verification fails, it will return all failed node IDs.
2023-02-15 21:47:47 +00:00
func ( ec * ECRepairer ) Get ( ctx context . Context , limits [ ] * pb . AddressedOrderLimit , cachedNodesInfo map [ storj . NodeID ] overlay . NodeReputation , privateKey storj . PiecePrivateKey , es eestream . ErasureScheme , dataSize int64 , minFailures int ) ( _ io . ReadCloser , _ FetchResultReport , err error ) {
2019-09-06 20:20:36 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
if len ( limits ) != es . TotalCount ( ) {
2022-09-19 22:13:43 +01:00
return nil , FetchResultReport { } , Error . New ( "number of limits slice (%d) does not match total count (%d) of erasure scheme" , len ( limits ) , es . TotalCount ( ) )
2019-09-06 20:20:36 +01:00
}
nonNilLimits := nonNilCount ( limits )
2023-02-15 21:47:47 +00:00
if nonNilLimits < es . RequiredCount ( ) + minFailures {
return nil , FetchResultReport { } , Error . New ( "number of non-nil limits (%d) is less than requested result count (%d)" , nonNilCount ( limits ) , es . RequiredCount ( ) + minFailures )
2019-09-06 20:20:36 +01:00
}
pieceSize := eestream . CalcPieceSize ( dataSize , es )
2023-02-15 21:47:47 +00:00
errorCount := 0
2019-09-06 20:20:36 +01:00
var successfulPieces , inProgress int
unusedLimits := nonNilLimits
pieceReaders := make ( map [ int ] io . ReadCloser )
2022-09-19 22:13:43 +01:00
var pieces FetchResultReport
2019-09-06 20:20:36 +01:00
limiter := sync2 . NewLimiter ( es . RequiredCount ( ) )
cond := sync . NewCond ( & sync . Mutex { } )
2021-08-10 14:23:56 +01:00
2019-09-06 20:20:36 +01:00
for currentLimitIndex , limit := range limits {
if limit == nil {
continue
}
currentLimitIndex , limit := currentLimitIndex , limit
limiter . Go ( ctx , func ( ) {
cond . L . Lock ( )
defer cond . Signal ( )
defer cond . L . Unlock ( )
for {
2023-02-15 21:47:47 +00:00
if successfulPieces >= es . RequiredCount ( ) && errorCount >= minFailures {
// already downloaded required number of pieces
2019-09-06 20:20:36 +01:00
cond . Broadcast ( )
return
}
2023-02-15 21:47:47 +00:00
if successfulPieces + inProgress + unusedLimits < es . RequiredCount ( ) || errorCount + inProgress + unusedLimits < minFailures {
2019-09-06 20:20:36 +01:00
// not enough available limits left to get required number of pieces
cond . Broadcast ( )
return
}
if successfulPieces + inProgress >= es . RequiredCount ( ) {
cond . Wait ( )
continue
}
unusedLimits --
inProgress ++
cond . L . Unlock ( )
2021-11-08 20:51:04 +00:00
info := cachedNodesInfo [ limit . GetLimit ( ) . StorageNodeId ]
2021-07-13 14:52:37 +01:00
address := limit . GetStorageNodeAddress ( ) . GetAddress ( )
var triedLastIPPort bool
2021-11-08 20:51:04 +00:00
if info . LastIPPort != "" && info . LastIPPort != address {
address = info . LastIPPort
2021-07-13 14:52:37 +01:00
triedLastIPPort = true
}
2022-02-13 04:19:29 +00:00
pieceReadCloser , _ , _ , err := ec . downloadAndVerifyPiece ( ctx , limit , address , privateKey , "" , pieceSize )
2021-07-13 14:52:37 +01:00
// if piecestore dial with last ip:port failed try again with node address
satellite/repair: avoid retrying GET_REPAIR incorrectly
We retry a GET_REPAIR operation in one case, and one case only (as far
as I can determine): when we are trying to connect to a node using its
last known working IP and port combination rather than its supplied
hostname, and we think the operation failed the first time because of a
Dial failure.
However, logs collected from storage node operators along with logs
collected from satellites are strongly indicating that we are retrying
GET_REPAIR operations in some cases even when we succeeded in connecting
to the node the first time. This results in the node complaining loudly
about being given a duplicate order limit (as it should), whereupon the
satellite counts that as an unknown error and potentially penalizes the
node.
See discussion at
https://forum.storj.io/t/get-repair-error-used-serial-already-exists-in-store/17922/36
.
Investigation into this problem has revealed that
`!piecestore.CloseError.Has(err)` may not be the best way of determining
whether a problem occurred during Dial. In fact, it is probably
downright Wrong. Handling of errors on a stream is somewhat complicated,
but it would appear that there are several paths by which an RPC error
originating on the remote side might show up during the Close() call,
and would thus be labeled as a "CloseError".
This change creates a new error class, repairer.ErrDialFailed, with
which we will now wrap errors that _really definitely_ occurred during
a Dial call. We will use this class to determine whether or not to retry
a GET_REPAIR operation. The error will still also be wrapped with
whatever wrapper classes it used to be wrapped with, so the potential
for breakage here should be minimal.
Refs: https://github.com/storj/storj/issues/4687
Change-Id: Ifdd3deadc8258f34cf3fbc42aff393fa545794eb
2022-07-06 04:02:49 +01:00
if triedLastIPPort && ErrDialFailed . Has ( err ) {
2022-02-13 04:19:29 +00:00
if pieceReadCloser != nil {
_ = pieceReadCloser . Close ( )
}
pieceReadCloser , _ , _ , err = ec . downloadAndVerifyPiece ( ctx , limit , limit . GetStorageNodeAddress ( ) . GetAddress ( ) , privateKey , "" , pieceSize )
2021-07-13 14:52:37 +01:00
}
2021-08-03 14:21:27 +01:00
2019-09-06 20:20:36 +01:00
cond . L . Lock ( )
inProgress --
2021-08-03 14:21:27 +01:00
piece := metabase . Piece {
Number : uint16 ( currentLimitIndex ) ,
StorageNode : limit . GetLimit ( ) . StorageNodeId ,
}
2019-09-06 20:20:36 +01:00
if err != nil {
2022-02-13 04:19:29 +00:00
if pieceReadCloser != nil {
_ = pieceReadCloser . Close ( )
}
2019-09-16 18:13:24 +01:00
// gather nodes where the calculated piece hash doesn't match the uplink signed piece hash
2019-09-13 17:21:20 +01:00
if ErrPieceHashVerifyFailed . Has ( err ) {
2022-03-02 14:31:50 +00:00
ec . log . Info ( "audit failed" ,
zap . Stringer ( "node ID" , limit . GetLimit ( ) . StorageNodeId ) ,
zap . Stringer ( "Piece ID" , limit . Limit . PieceId ) ,
2021-04-21 21:41:19 +01:00
zap . String ( "reason" , err . Error ( ) ) )
2022-09-19 22:16:48 +01:00
pieces . Failed = append ( pieces . Failed , PieceFetchResult { Piece : piece , Err : err } )
2023-02-15 21:47:47 +00:00
errorCount ++
2021-08-03 14:21:27 +01:00
return
}
pieceAudit := audit . PieceAuditFromErr ( err )
switch pieceAudit {
case audit . PieceAuditFailure :
2022-09-19 22:16:48 +01:00
ec . log . Debug ( "Failed to download piece for repair: piece not found (audit failed)" ,
2021-08-03 14:21:27 +01:00
zap . Stringer ( "Node ID" , limit . GetLimit ( ) . StorageNodeId ) ,
2022-03-02 14:31:50 +00:00
zap . Stringer ( "Piece ID" , limit . Limit . PieceId ) ,
2021-08-03 14:21:27 +01:00
zap . Error ( err ) )
2022-09-19 22:16:48 +01:00
pieces . Failed = append ( pieces . Failed , PieceFetchResult { Piece : piece , Err : err } )
2023-02-15 21:47:47 +00:00
errorCount ++
2021-08-03 14:21:27 +01:00
case audit . PieceAuditOffline :
2022-09-19 22:16:48 +01:00
ec . log . Debug ( "Failed to download piece for repair: dial timeout (offline)" ,
2021-08-03 14:21:27 +01:00
zap . Stringer ( "Node ID" , limit . GetLimit ( ) . StorageNodeId ) ,
2022-03-02 14:31:50 +00:00
zap . Stringer ( "Piece ID" , limit . Limit . PieceId ) ,
2021-08-03 14:21:27 +01:00
zap . Error ( err ) )
2022-09-19 22:16:48 +01:00
pieces . Offline = append ( pieces . Offline , PieceFetchResult { Piece : piece , Err : err } )
2023-02-15 21:47:47 +00:00
errorCount ++
2021-08-03 14:21:27 +01:00
case audit . PieceAuditContained :
2022-09-19 22:16:48 +01:00
ec . log . Info ( "Failed to download piece for repair: download timeout (contained)" ,
2021-08-03 14:21:27 +01:00
zap . Stringer ( "Node ID" , limit . GetLimit ( ) . StorageNodeId ) ,
2022-03-02 14:31:50 +00:00
zap . Stringer ( "Piece ID" , limit . Limit . PieceId ) ,
2019-10-16 16:28:56 +01:00
zap . Error ( err ) )
2022-09-19 22:16:48 +01:00
pieces . Contained = append ( pieces . Contained , PieceFetchResult { Piece : piece , Err : err } )
2023-02-15 21:47:47 +00:00
errorCount ++
2021-08-03 14:21:27 +01:00
case audit . PieceAuditUnknown :
2022-09-19 22:16:48 +01:00
ec . log . Info ( "Failed to download piece for repair: unknown transport error (skipped)" ,
2021-08-03 14:21:27 +01:00
zap . Stringer ( "Node ID" , limit . GetLimit ( ) . StorageNodeId ) ,
2022-03-02 14:31:50 +00:00
zap . Stringer ( "Piece ID" , limit . Limit . PieceId ) ,
2021-08-03 14:21:27 +01:00
zap . Error ( err ) )
2022-09-19 22:16:48 +01:00
pieces . Unknown = append ( pieces . Unknown , PieceFetchResult { Piece : piece , Err : err } )
2023-02-15 21:47:47 +00:00
errorCount ++
2019-09-13 17:21:20 +01:00
}
2021-08-03 14:21:27 +01:00
2019-09-06 20:20:36 +01:00
return
}
2020-03-18 23:55:09 +00:00
pieceReaders [ currentLimitIndex ] = pieceReadCloser
2022-09-19 22:16:48 +01:00
pieces . Successful = append ( pieces . Successful , PieceFetchResult { Piece : piece } )
2019-09-06 20:20:36 +01:00
successfulPieces ++
return
}
} )
}
limiter . Wait ( )
if successfulPieces < es . RequiredCount ( ) {
2020-10-13 13:13:41 +01:00
mon . Meter ( "download_failed_not_enough_pieces_repair" ) . Mark ( 1 ) //mon:locked
2021-08-03 14:21:27 +01:00
return nil , pieces , & irreparableError {
2020-02-24 20:13:12 +00:00
piecesAvailable : int32 ( successfulPieces ) ,
piecesRequired : int32 ( es . RequiredCount ( ) ) ,
}
2019-09-06 20:20:36 +01:00
}
2023-02-15 21:47:47 +00:00
if errorCount < minFailures {
return nil , pieces , Error . New ( "expected %d failures, but only observed %d" , minFailures , errorCount )
}
2019-09-06 20:20:36 +01:00
fec , err := infectious . NewFEC ( es . RequiredCount ( ) , es . TotalCount ( ) )
if err != nil {
2021-08-03 14:21:27 +01:00
return nil , pieces , Error . Wrap ( err )
2019-09-06 20:20:36 +01:00
}
esScheme := eestream . NewUnsafeRSScheme ( fec , es . ErasureShareSize ( ) )
expectedSize := pieceSize * int64 ( es . RequiredCount ( ) )
ctx , cancel := context . WithCancel ( ctx )
2021-05-06 14:53:55 +01:00
decodeReader := eestream . DecodeReaders2 ( ctx , cancel , pieceReaders , esScheme , expectedSize , 0 , false )
2019-09-06 20:20:36 +01:00
2021-08-03 14:21:27 +01:00
return decodeReader , pieces , nil
2019-09-06 20:20:36 +01:00
}
2022-08-01 10:30:33 +01:00
// lazyHashWriter is a writer which can get the hash algorithm just before the first write.
type lazyHashWriter struct {
hasher hash . Hash
downloader * piecestore . Download
}
func ( l * lazyHashWriter ) Write ( p [ ] byte ) ( n int , err error ) {
// hash is available only after receiving the first message.
if l . hasher == nil {
h , _ := l . downloader . GetHashAndLimit ( )
l . hasher = pb . NewHashFromAlgorithm ( h . HashAlgorithm )
}
return l . hasher . Write ( p )
}
// Sum delegates hash calculation to the real hash algorithm.
func ( l * lazyHashWriter ) Sum ( b [ ] byte ) [ ] byte {
if l . hasher == nil {
return [ ] byte { }
}
return l . hasher . Sum ( b )
}
var _ io . Writer = & lazyHashWriter { }
2019-09-06 20:20:36 +01:00
// downloadAndVerifyPiece downloads a piece from a storagenode,
// expects the original order limit to have the correct piece public key,
// and expects the hash of the data to match the signed hash provided by the storagenode.
2022-02-13 04:19:29 +00:00
func ( ec * ECRepairer ) downloadAndVerifyPiece ( ctx context . Context , limit * pb . AddressedOrderLimit , address string , privateKey storj . PiecePrivateKey , tmpDir string , pieceSize int64 ) ( pieceReadCloser io . ReadCloser , hash * pb . PieceHash , originalLimit * pb . OrderLimit , err error ) {
2020-05-28 20:24:52 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-09-06 20:20:36 +01:00
// contact node
2019-10-30 20:31:08 +00:00
downloadCtx , cancel := context . WithTimeout ( ctx , ec . downloadTimeout )
defer cancel ( )
2020-05-19 16:49:13 +01:00
ps , err := ec . dialPiecestore ( downloadCtx , storj . NodeURL {
ID : limit . GetLimit ( ) . StorageNodeId ,
2021-07-13 14:52:37 +01:00
Address : address ,
2019-09-06 20:20:36 +01:00
} )
if err != nil {
2022-02-13 04:19:29 +00:00
return nil , nil , nil , err
2019-09-06 20:20:36 +01:00
}
2019-10-06 18:41:53 +01:00
defer func ( ) { err = errs . Combine ( err , ps . Close ( ) ) } ( )
2019-09-06 20:20:36 +01:00
2019-10-30 20:31:08 +00:00
downloader , err := ps . Download ( downloadCtx , limit . GetLimit ( ) , privateKey , 0 , pieceSize )
2019-09-06 20:20:36 +01:00
if err != nil {
2022-02-13 04:19:29 +00:00
return nil , nil , nil , err
2019-09-06 20:20:36 +01:00
}
defer func ( ) { err = errs . Combine ( err , downloader . Close ( ) ) } ( )
2022-08-01 10:30:33 +01:00
hashWriter := & lazyHashWriter {
downloader : downloader ,
}
2020-03-18 23:55:09 +00:00
downloadReader := io . TeeReader ( downloader , hashWriter )
var downloadedPieceSize int64
2020-03-27 19:00:57 +00:00
if ec . inmemory {
2022-10-11 12:39:08 +01:00
pieceBytes , err := io . ReadAll ( downloadReader )
2020-03-18 23:55:09 +00:00
if err != nil {
2022-02-13 04:19:29 +00:00
return nil , nil , nil , err
2020-03-18 23:55:09 +00:00
}
downloadedPieceSize = int64 ( len ( pieceBytes ) )
2022-10-11 12:39:08 +01:00
pieceReadCloser = io . NopCloser ( bytes . NewReader ( pieceBytes ) )
2020-03-18 23:55:09 +00:00
} else {
2022-02-13 04:19:29 +00:00
tempfile , err := tmpfile . New ( tmpDir , "satellite-repair-*" )
2020-03-18 23:55:09 +00:00
if err != nil {
2022-02-13 04:19:29 +00:00
return nil , nil , nil , err
2020-03-18 23:55:09 +00:00
}
2022-02-13 04:19:29 +00:00
// no defer tempfile.Close() here; caller is responsible for closing
// the file, even if an error results (the caller might want the data
// even if there is a verification error).
2020-03-27 19:00:57 +00:00
downloadedPieceSize , err = io . Copy ( tempfile , downloadReader )
2020-03-18 23:55:09 +00:00
if err != nil {
2022-02-13 04:19:29 +00:00
return tempfile , nil , nil , err
2020-03-18 23:55:09 +00:00
}
2020-03-27 19:00:57 +00:00
2020-03-18 23:55:09 +00:00
// seek to beginning of file so the repair job starts at the beginning of the piece
2020-03-27 19:00:57 +00:00
_ , err = tempfile . Seek ( 0 , io . SeekStart )
2020-03-18 23:55:09 +00:00
if err != nil {
2022-02-13 04:19:29 +00:00
return tempfile , nil , nil , err
2020-03-18 23:55:09 +00:00
}
2020-03-27 19:00:57 +00:00
pieceReadCloser = tempfile
2019-09-06 20:20:36 +01:00
}
2021-07-20 15:41:38 +01:00
mon . Meter ( "repair_bytes_downloaded" ) . Mark64 ( downloadedPieceSize ) //mon:locked
2020-03-18 23:55:09 +00:00
if downloadedPieceSize != pieceSize {
2022-02-13 04:19:29 +00:00
return pieceReadCloser , nil , nil , Error . New ( "didn't download the correct amount of data, want %d, got %d" , pieceSize , downloadedPieceSize )
2019-09-06 20:20:36 +01:00
}
// get signed piece hash and original order limit
2022-02-13 04:19:29 +00:00
hash , originalLimit = downloader . GetHashAndLimit ( )
2019-09-06 20:20:36 +01:00
if hash == nil {
2022-02-13 04:19:29 +00:00
return pieceReadCloser , hash , originalLimit , Error . New ( "hash was not sent from storagenode" )
2019-09-06 20:20:36 +01:00
}
if originalLimit == nil {
2022-02-13 04:19:29 +00:00
return pieceReadCloser , hash , originalLimit , Error . New ( "original order limit was not sent from storagenode" )
2019-09-06 20:20:36 +01:00
}
// verify order limit from storage node is signed by the satellite
if err := verifyOrderLimitSignature ( ctx , ec . satelliteSignee , originalLimit ) ; err != nil {
2022-02-13 04:19:29 +00:00
return pieceReadCloser , hash , originalLimit , err
2019-09-06 20:20:36 +01:00
}
// verify the hashes from storage node
2020-03-18 23:55:09 +00:00
calculatedHash := hashWriter . Sum ( nil )
2019-09-06 20:20:36 +01:00
if err := verifyPieceHash ( ctx , originalLimit , hash , calculatedHash ) ; err != nil {
2021-04-21 21:41:19 +01:00
2022-02-13 04:19:29 +00:00
return pieceReadCloser , hash , originalLimit , ErrPieceHashVerifyFailed . Wrap ( err )
2019-09-06 20:20:36 +01:00
}
2022-02-13 04:19:29 +00:00
return pieceReadCloser , hash , originalLimit , nil
2019-09-06 20:20:36 +01:00
}
func verifyPieceHash ( ctx context . Context , limit * pb . OrderLimit , hash * pb . PieceHash , expectedHash [ ] byte ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
if limit == nil || hash == nil || len ( expectedHash ) == 0 {
return Error . New ( "invalid arguments" )
}
if limit . PieceId != hash . PieceId {
return Error . New ( "piece id changed" )
}
if ! bytes . Equal ( hash . Hash , expectedHash ) {
2021-04-21 21:41:19 +01:00
return Error . New ( "hash from storage node, %x, does not match calculated hash, %x" , hash . Hash , expectedHash )
2019-09-06 20:20:36 +01:00
}
if err := signing . VerifyUplinkPieceHashSignature ( ctx , limit . UplinkPublicKey , hash ) ; err != nil {
return Error . New ( "invalid piece hash signature" )
}
return nil
}
func verifyOrderLimitSignature ( ctx context . Context , satellite signing . Signee , limit * pb . OrderLimit ) ( err error ) {
if err := signing . VerifyOrderLimitSignature ( ctx , satellite , limit ) ; err != nil {
return Error . New ( "invalid order limit signature: %v" , err )
}
return nil
}
// Repair takes a provided segment, encodes it with the provided redundancy strategy,
// and uploads the pieces in need of repair to new nodes provided by order limits.
2021-06-17 16:05:04 +01:00
func ( ec * ECRepairer ) Repair ( ctx context . Context , limits [ ] * pb . AddressedOrderLimit , privateKey storj . PiecePrivateKey , rs eestream . RedundancyStrategy , data io . Reader , timeout time . Duration , successfulNeeded int ) ( successfulNodes [ ] * pb . Node , successfulHashes [ ] * pb . PieceHash , err error ) {
2019-09-06 20:20:36 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
pieceCount := len ( limits )
if pieceCount != rs . TotalCount ( ) {
return nil , nil , Error . New ( "size of limits slice (%d) does not match total count (%d) of erasure scheme" , pieceCount , rs . TotalCount ( ) )
}
if ! unique ( limits ) {
return nil , nil , Error . New ( "duplicated nodes are not allowed" )
}
2022-10-11 12:39:08 +01:00
readers , err := eestream . EncodeReader2 ( ctx , io . NopCloser ( data ) , rs )
2019-09-06 20:20:36 +01:00
if err != nil {
return nil , nil , err
}
// info contains data about a single piece transfer
type info struct {
i int
err error
hash * pb . PieceHash
}
// this channel is used to synchronize concurrently uploaded pieces with the overall repair
infos := make ( chan info , pieceCount )
psCtx , cancel := context . WithCancel ( ctx )
defer cancel ( )
for i , addressedLimit := range limits {
go func ( i int , addressedLimit * pb . AddressedOrderLimit ) {
2021-06-17 16:05:04 +01:00
hash , err := ec . putPiece ( psCtx , ctx , addressedLimit , privateKey , readers [ i ] )
2019-09-06 20:20:36 +01:00
infos <- info { i : i , err : err , hash : hash }
} ( i , addressedLimit )
}
2020-03-30 10:59:56 +01:00
ec . log . Debug ( "Starting a timer for repair so that the number of pieces will be closer to the success threshold" ,
2019-09-06 20:20:36 +01:00
zap . Duration ( "Timer" , timeout ) ,
zap . Int ( "Node Count" , nonNilCount ( limits ) ) ,
zap . Int ( "Optimal Threshold" , rs . OptimalThreshold ( ) ) ,
)
var successfulCount , failureCount , cancellationCount int32
timer := time . AfterFunc ( timeout , func ( ) {
2020-07-16 16:50:15 +01:00
if ! errors . Is ( ctx . Err ( ) , context . Canceled ) {
2020-03-30 10:59:56 +01:00
ec . log . Debug ( "Timer expired. Canceling the long tail..." ,
2019-09-06 20:20:36 +01:00
zap . Int32 ( "Successfully repaired" , atomic . LoadInt32 ( & successfulCount ) ) ,
)
cancel ( )
}
} )
successfulNodes = make ( [ ] * pb . Node , pieceCount )
successfulHashes = make ( [ ] * pb . PieceHash , pieceCount )
for range limits {
info := <- infos
if limits [ info . i ] == nil {
continue
}
if info . err != nil {
if ! errs2 . IsCanceled ( info . err ) {
failureCount ++
2020-04-15 20:32:22 +01:00
ec . log . Warn ( "Repair to a storage node failed" ,
zap . Stringer ( "Node ID" , limits [ info . i ] . GetLimit ( ) . StorageNodeId ) ,
zap . Error ( info . err ) ,
)
2019-09-06 20:20:36 +01:00
} else {
cancellationCount ++
2020-04-15 20:32:22 +01:00
ec . log . Debug ( "Repair to storage node cancelled" ,
zap . Stringer ( "Node ID" , limits [ info . i ] . GetLimit ( ) . StorageNodeId ) ,
zap . Error ( info . err ) ,
)
2019-09-06 20:20:36 +01:00
}
continue
}
successfulNodes [ info . i ] = & pb . Node {
Id : limits [ info . i ] . GetLimit ( ) . StorageNodeId ,
Address : limits [ info . i ] . GetStorageNodeAddress ( ) ,
}
successfulHashes [ info . i ] = info . hash
successfulCount ++
2020-05-28 21:19:44 +01:00
if successfulCount >= int32 ( successfulNeeded ) {
ec . log . Debug ( "Number of successful uploads met. Canceling the long tail..." ,
zap . Int32 ( "Successfully repaired" , atomic . LoadInt32 ( & successfulCount ) ) ,
)
cancel ( )
}
2019-09-06 20:20:36 +01:00
}
// Ensure timer is stopped
_ = timer . Stop ( )
// TODO: clean up the partially uploaded segment's pieces
defer func ( ) {
select {
case <- ctx . Done ( ) :
err = Error . New ( "repair cancelled" )
default :
}
} ( )
if successfulCount == 0 {
2019-10-16 16:28:56 +01:00
return nil , nil , Error . New ( "repair to all nodes failed" )
2019-09-06 20:20:36 +01:00
}
2020-04-15 20:32:22 +01:00
ec . log . Debug ( "Successfully repaired" ,
2019-09-06 20:20:36 +01:00
zap . Int32 ( "Success Count" , atomic . LoadInt32 ( & successfulCount ) ) ,
)
2020-10-13 13:13:41 +01:00
mon . IntVal ( "repair_segment_pieces_total" ) . Observe ( int64 ( pieceCount ) ) //mon:locked
mon . IntVal ( "repair_segment_pieces_successful" ) . Observe ( int64 ( successfulCount ) ) //mon:locked
mon . IntVal ( "repair_segment_pieces_failed" ) . Observe ( int64 ( failureCount ) ) //mon:locked
mon . IntVal ( "repair_segment_pieces_canceled" ) . Observe ( int64 ( cancellationCount ) ) //mon:locked
2019-09-06 20:20:36 +01:00
return successfulNodes , successfulHashes , nil
}
2021-06-17 16:05:04 +01:00
func ( ec * ECRepairer ) putPiece ( ctx , parent context . Context , limit * pb . AddressedOrderLimit , privateKey storj . PiecePrivateKey , data io . ReadCloser ) ( hash * pb . PieceHash , err error ) {
2020-05-28 20:24:52 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-09-06 20:20:36 +01:00
nodeName := "nil"
if limit != nil {
nodeName = limit . GetLimit ( ) . StorageNodeId . String ( ) [ 0 : 8 ]
}
defer mon . Task ( ) ( & ctx , "node: " + nodeName ) ( & err )
defer func ( ) { err = errs . Combine ( err , data . Close ( ) ) } ( )
if limit == nil {
2022-10-11 12:39:08 +01:00
_ , _ = io . Copy ( io . Discard , data )
2019-09-06 20:20:36 +01:00
return nil , nil
}
storageNodeID := limit . GetLimit ( ) . StorageNodeId
pieceID := limit . GetLimit ( ) . PieceId
2020-05-19 16:49:13 +01:00
ps , err := ec . dialPiecestore ( ctx , storj . NodeURL {
ID : storageNodeID ,
Address : limit . GetStorageNodeAddress ( ) . Address ,
2019-09-06 20:20:36 +01:00
} )
if err != nil {
ec . log . Debug ( "Failed dialing for putting piece to node" ,
2019-11-05 21:04:07 +00:00
zap . Stringer ( "Piece ID" , pieceID ) ,
zap . Stringer ( "Node ID" , storageNodeID ) ,
2019-09-06 20:20:36 +01:00
zap . Error ( err ) ,
)
return nil , err
}
defer func ( ) { err = errs . Combine ( err , ps . Close ( ) ) } ( )
2020-04-18 06:41:20 +01:00
hash , err = ps . UploadReader ( ctx , limit . GetLimit ( ) , privateKey , data )
2019-09-06 20:20:36 +01:00
if err != nil {
2020-04-18 06:41:20 +01:00
if errors . Is ( ctx . Err ( ) , context . Canceled ) {
// Canceled context means the piece upload was interrupted by user or due
// to slow connection. No error logging for this case.
if errors . Is ( parent . Err ( ) , context . Canceled ) {
ec . log . Debug ( "Upload to node canceled by user" ,
zap . Stringer ( "Node ID" , storageNodeID ) )
} else {
ec . log . Debug ( "Node cut from upload due to slow connection" ,
zap . Stringer ( "Node ID" , storageNodeID ) )
}
2019-09-06 20:20:36 +01:00
2020-04-18 06:41:20 +01:00
// make sure context.Canceled is the primary error in the error chain
// for later errors.Is/errs2.IsCanceled checking
err = errs . Combine ( context . Canceled , err )
2019-09-06 20:20:36 +01:00
} else {
2020-04-18 06:41:20 +01:00
nodeAddress := "nil"
if limit . GetStorageNodeAddress ( ) != nil {
nodeAddress = limit . GetStorageNodeAddress ( ) . GetAddress ( )
}
2019-09-06 20:20:36 +01:00
2020-04-18 06:41:20 +01:00
ec . log . Debug ( "Failed uploading piece to node" ,
zap . Stringer ( "Piece ID" , pieceID ) ,
zap . Stringer ( "Node ID" , storageNodeID ) ,
zap . String ( "Node Address" , nodeAddress ) ,
zap . Error ( err ) ,
)
}
2019-09-06 20:20:36 +01:00
}
return hash , err
}
func nonNilCount ( limits [ ] * pb . AddressedOrderLimit ) int {
total := 0
for _ , limit := range limits {
if limit != nil {
total ++
}
}
return total
}
func unique ( limits [ ] * pb . AddressedOrderLimit ) bool {
if len ( limits ) < 2 {
return true
}
ids := make ( storj . NodeIDList , len ( limits ) )
for i , addressedLimit := range limits {
if addressedLimit != nil {
ids [ i ] = addressedLimit . GetLimit ( ) . StorageNodeId
}
}
// sort the ids and check for identical neighbors
sort . Sort ( ids )
// sort.Slice(ids, func(i, k int) bool { return ids[i].Less(ids[k]) })
for i := 1 ; i < len ( ids ) ; i ++ {
if ids [ i ] != ( storj . NodeID { } ) && ids [ i ] == ids [ i - 1 ] {
return false
}
}
return true
}