2019-10-15 16:29:47 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package gracefulexit
|
|
|
|
|
|
|
|
import (
|
2019-10-25 18:16:20 +01:00
|
|
|
"bytes"
|
2019-10-15 16:29:47 +01:00
|
|
|
"context"
|
2019-10-22 21:42:21 +01:00
|
|
|
"io"
|
|
|
|
"os"
|
2019-10-15 16:29:47 +01:00
|
|
|
"time"
|
|
|
|
|
2019-12-03 22:09:39 +00:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2019-10-15 16:29:47 +01:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/sync2"
|
2019-10-22 21:42:21 +01:00
|
|
|
"storj.io/storj/storagenode/pieces"
|
|
|
|
"storj.io/storj/storagenode/piecestore"
|
2019-10-15 16:29:47 +01:00
|
|
|
"storj.io/storj/storagenode/satellites"
|
2019-10-22 21:42:21 +01:00
|
|
|
"storj.io/storj/uplink/ecclient"
|
2019-10-15 16:29:47 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Worker is responsible for completing the graceful exit for a given satellite.
|
|
|
|
type Worker struct {
|
2019-10-30 17:40:57 +00:00
|
|
|
log *zap.Logger
|
|
|
|
store *pieces.Store
|
|
|
|
satelliteDB satellites.DB
|
|
|
|
dialer rpc.Dialer
|
2019-11-05 15:33:44 +00:00
|
|
|
limiter *sync2.Limiter
|
2019-10-30 17:40:57 +00:00
|
|
|
satelliteID storj.NodeID
|
|
|
|
satelliteAddr string
|
|
|
|
ecclient ecclient.Client
|
|
|
|
minBytesPerSecond memory.Size
|
|
|
|
minDownloadTimeout time.Duration
|
2019-10-15 16:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWorker instantiates Worker.
|
2019-11-05 15:33:44 +00:00
|
|
|
func NewWorker(log *zap.Logger, store *pieces.Store, satelliteDB satellites.DB, dialer rpc.Dialer, satelliteID storj.NodeID, satelliteAddr string, config Config) *Worker {
|
2019-10-15 16:29:47 +01:00
|
|
|
return &Worker{
|
2019-10-30 17:40:57 +00:00
|
|
|
log: log,
|
|
|
|
store: store,
|
|
|
|
satelliteDB: satelliteDB,
|
|
|
|
dialer: dialer,
|
2019-11-05 15:33:44 +00:00
|
|
|
limiter: sync2.NewLimiter(config.NumConcurrentTransfers),
|
2019-10-30 17:40:57 +00:00
|
|
|
satelliteID: satelliteID,
|
|
|
|
satelliteAddr: satelliteAddr,
|
|
|
|
ecclient: ecclient.NewClient(log, dialer, 0),
|
2019-11-05 15:33:44 +00:00
|
|
|
minBytesPerSecond: config.MinBytesPerSecond,
|
|
|
|
minDownloadTimeout: config.MinDownloadTimeout,
|
2019-10-15 16:29:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run calls the satellite endpoint, transfers pieces, validates, and responds with success or failure.
|
|
|
|
// It also marks the satellite finished once all the pieces have been transferred
|
2019-10-22 21:42:21 +01:00
|
|
|
func (worker *Worker) Run(ctx context.Context, done func()) (err error) {
|
2019-10-15 16:29:47 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
defer done()
|
2019-10-22 21:42:21 +01:00
|
|
|
|
2019-10-15 16:29:47 +01:00
|
|
|
worker.log.Debug("running worker")
|
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
conn, err := worker.dialer.DialAddressID(ctx, worker.satelliteAddr, worker.satelliteID)
|
|
|
|
if err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, conn.Close())
|
|
|
|
}()
|
|
|
|
|
2019-12-22 15:07:50 +00:00
|
|
|
client := pb.NewDRPCSatelliteGracefulExitClient(conn.Raw())
|
2019-10-22 21:42:21 +01:00
|
|
|
|
|
|
|
c, err := client.Process(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
response, err := c.Recv()
|
|
|
|
if errs.Is(err, io.EOF) {
|
|
|
|
// Done
|
2019-11-14 08:31:30 +00:00
|
|
|
return nil
|
2019-10-22 21:42:21 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// TODO what happened
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch msg := response.GetMessage().(type) {
|
|
|
|
case *pb.SatelliteMessage_NotReady:
|
2019-11-14 08:31:30 +00:00
|
|
|
return nil
|
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
case *pb.SatelliteMessage_TransferPiece:
|
2019-11-05 15:33:44 +00:00
|
|
|
transferPieceMsg := msg.TransferPiece
|
|
|
|
worker.limiter.Go(ctx, func() {
|
|
|
|
err = worker.transferPiece(ctx, transferPieceMsg, c)
|
|
|
|
if err != nil {
|
2019-11-22 02:10:02 +00:00
|
|
|
worker.log.Error("failed to transfer piece.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-11-05 15:33:44 +00:00
|
|
|
}
|
|
|
|
})
|
2019-11-14 08:31:30 +00:00
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
case *pb.SatelliteMessage_DeletePiece:
|
2019-11-05 15:33:44 +00:00
|
|
|
deletePieceMsg := msg.DeletePiece
|
|
|
|
worker.limiter.Go(ctx, func() {
|
|
|
|
pieceID := deletePieceMsg.OriginalPieceId
|
|
|
|
err := worker.deleteOnePieceOrAll(ctx, &pieceID)
|
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("failed to delete piece.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-11-05 15:33:44 +00:00
|
|
|
}
|
|
|
|
})
|
2019-10-30 14:46:56 +00:00
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
case *pb.SatelliteMessage_ExitFailed:
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("graceful exit failed.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("reason", msg.ExitFailed.Reason))
|
2019-10-22 21:42:21 +01:00
|
|
|
|
2019-12-03 22:09:39 +00:00
|
|
|
exitFailedBytes, err := proto.Marshal(msg.ExitFailed)
|
|
|
|
if err != nil {
|
|
|
|
worker.log.Error("failed to marshal exit failed message.")
|
|
|
|
}
|
|
|
|
err = worker.satelliteDB.CompleteGracefulExit(ctx, worker.satelliteID, time.Now(), satellites.ExitFailed, exitFailedBytes)
|
2019-11-14 08:31:30 +00:00
|
|
|
return errs.Wrap(err)
|
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
case *pb.SatelliteMessage_ExitCompleted:
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Info("graceful exit completed.", zap.Stringer("Satellite ID", worker.satelliteID))
|
2019-10-22 21:42:21 +01:00
|
|
|
|
2019-12-03 22:09:39 +00:00
|
|
|
exitCompletedBytes, err := proto.Marshal(msg.ExitCompleted)
|
|
|
|
if err != nil {
|
|
|
|
worker.log.Error("failed to marshal exit completed message.")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = worker.satelliteDB.CompleteGracefulExit(ctx, worker.satelliteID, time.Now(), satellites.ExitSucceeded, exitCompletedBytes)
|
2019-10-22 21:42:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return errs.Wrap(err)
|
|
|
|
}
|
2019-10-30 14:46:56 +00:00
|
|
|
// delete all remaining pieces
|
|
|
|
err = worker.deleteOnePieceOrAll(ctx, nil)
|
2019-11-14 08:31:30 +00:00
|
|
|
return errs.Wrap(err)
|
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
default:
|
|
|
|
// TODO handle err
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("unknown graceful exit message.", zap.Stringer("Satellite ID", worker.satelliteID))
|
2019-10-22 21:42:21 +01:00
|
|
|
}
|
|
|
|
}
|
2019-10-15 16:29:47 +01:00
|
|
|
}
|
|
|
|
|
2019-10-26 14:53:35 +01:00
|
|
|
type gracefulExitStream interface {
|
|
|
|
Context() context.Context
|
|
|
|
Send(*pb.StorageNodeMessage) error
|
|
|
|
Recv() (*pb.SatelliteMessage, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (worker *Worker) transferPiece(ctx context.Context, transferPiece *pb.TransferPiece, c gracefulExitStream) error {
|
2019-10-25 18:16:20 +01:00
|
|
|
pieceID := transferPiece.OriginalPieceId
|
|
|
|
reader, err := worker.store.Reader(ctx, worker.satelliteID, pieceID)
|
|
|
|
if err != nil {
|
|
|
|
transferErr := pb.TransferFailed_UNKNOWN
|
|
|
|
if errs.Is(err, os.ErrNotExist) {
|
|
|
|
transferErr = pb.TransferFailed_NOT_FOUND
|
|
|
|
}
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("failed to get piece reader.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-10-25 18:16:20 +01:00
|
|
|
worker.handleFailure(ctx, transferErr, pieceID, c.Send)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
addrLimit := transferPiece.GetAddressedOrderLimit()
|
|
|
|
pk := transferPiece.PrivateKey
|
|
|
|
|
2019-11-13 19:15:31 +00:00
|
|
|
originalHash, originalOrderLimit, err := worker.store.GetHashAndLimit(ctx, worker.satelliteID, pieceID, reader)
|
2019-10-25 18:16:20 +01:00
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("failed to get piece hash and order limit.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-10-25 18:16:20 +01:00
|
|
|
worker.handleFailure(ctx, pb.TransferFailed_UNKNOWN, pieceID, c.Send)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-30 17:40:57 +00:00
|
|
|
if worker.minBytesPerSecond == 0 {
|
|
|
|
// set minBytesPerSecond to default 128B if set to 0
|
|
|
|
worker.minBytesPerSecond = 128 * memory.B
|
|
|
|
}
|
|
|
|
maxTransferTime := time.Duration(int64(time.Second) * originalHash.PieceSize / worker.minBytesPerSecond.Int64())
|
|
|
|
if maxTransferTime < worker.minDownloadTimeout {
|
|
|
|
maxTransferTime = worker.minDownloadTimeout
|
|
|
|
}
|
|
|
|
putCtx, cancel := context.WithTimeout(ctx, maxTransferTime)
|
2019-10-25 18:16:20 +01:00
|
|
|
defer cancel()
|
|
|
|
|
2019-10-30 15:35:00 +00:00
|
|
|
pieceHash, peerID, err := worker.ecclient.PutPiece(putCtx, ctx, addrLimit, pk, reader)
|
2019-10-25 18:16:20 +01:00
|
|
|
if err != nil {
|
|
|
|
if piecestore.ErrVerifyUntrusted.Has(err) {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("failed hash verification.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-10-25 18:16:20 +01:00
|
|
|
worker.handleFailure(ctx, pb.TransferFailed_HASH_VERIFICATION, pieceID, c.Send)
|
|
|
|
} else {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("failed to put piece.",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-10-25 18:16:20 +01:00
|
|
|
// TODO look at error type to decide on the transfer error
|
|
|
|
worker.handleFailure(ctx, pb.TransferFailed_STORAGE_NODE_UNAVAILABLE, pieceID, c.Send)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(originalHash.Hash, pieceHash.Hash) {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("piece hash from new storagenode does not match",
|
|
|
|
zap.Stringer("Storagenode ID", addrLimit.Limit.StorageNodeId),
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID))
|
2019-10-25 18:16:20 +01:00
|
|
|
worker.handleFailure(ctx, pb.TransferFailed_HASH_VERIFICATION, pieceID, c.Send)
|
|
|
|
return Error.New("piece hash from new storagenode does not match")
|
|
|
|
}
|
|
|
|
if pieceHash.PieceId != addrLimit.Limit.PieceId {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("piece id from new storagenode does not match order limit",
|
|
|
|
zap.Stringer("Storagenode ID", addrLimit.Limit.StorageNodeId),
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID))
|
2019-10-25 18:16:20 +01:00
|
|
|
worker.handleFailure(ctx, pb.TransferFailed_HASH_VERIFICATION, pieceID, c.Send)
|
|
|
|
return Error.New("piece id from new storagenode does not match order limit")
|
|
|
|
}
|
|
|
|
|
|
|
|
signee := signing.SigneeFromPeerIdentity(peerID)
|
|
|
|
err = signing.VerifyPieceHashSignature(ctx, signee, pieceHash)
|
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("invalid piece hash signature from new storagenode",
|
|
|
|
zap.Stringer("Storagenode ID", addrLimit.Limit.StorageNodeId),
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID),
|
|
|
|
zap.Error(errs.Wrap(err)))
|
2019-10-25 18:16:20 +01:00
|
|
|
worker.handleFailure(ctx, pb.TransferFailed_HASH_VERIFICATION, pieceID, c.Send)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
success := &pb.StorageNodeMessage{
|
|
|
|
Message: &pb.StorageNodeMessage_Succeeded{
|
|
|
|
Succeeded: &pb.TransferSucceeded{
|
|
|
|
OriginalPieceId: transferPiece.OriginalPieceId,
|
2019-11-13 19:15:31 +00:00
|
|
|
OriginalPieceHash: &originalHash,
|
|
|
|
OriginalOrderLimit: &originalOrderLimit,
|
2019-10-25 18:16:20 +01:00
|
|
|
ReplacementPieceHash: pieceHash,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2019-11-22 02:10:02 +00:00
|
|
|
worker.log.Info("piece transferred to new storagenode",
|
|
|
|
zap.Stringer("Storagenode ID", addrLimit.Limit.StorageNodeId),
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", pieceID))
|
2019-10-25 18:16:20 +01:00
|
|
|
return c.Send(success)
|
|
|
|
}
|
|
|
|
|
2019-10-30 14:46:56 +00:00
|
|
|
// deleteOnePieceOrAll deletes pieces stored for a satellite. When no piece ID are specified, all pieces stored by a satellite will be deleted.
|
|
|
|
func (worker *Worker) deleteOnePieceOrAll(ctx context.Context, pieceID *storj.PieceID) error {
|
|
|
|
// get piece size
|
|
|
|
pieceMap := make(map[pb.PieceID]int64)
|
|
|
|
ctxWithCancel, cancel := context.WithCancel(ctx)
|
|
|
|
err := worker.store.WalkSatellitePieces(ctxWithCancel, worker.satelliteID, func(piece pieces.StoredPieceAccess) error {
|
2019-12-21 13:11:24 +00:00
|
|
|
_, size, err := piece.Size(ctxWithCancel)
|
2019-10-30 14:46:56 +00:00
|
|
|
if err != nil {
|
|
|
|
worker.log.Debug("failed to retrieve piece info", zap.Stringer("Satellite ID", worker.satelliteID), zap.Error(err))
|
|
|
|
}
|
|
|
|
if pieceID == nil {
|
|
|
|
pieceMap[piece.PieceID()] = size
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if piece.PieceID() == *pieceID {
|
|
|
|
pieceMap[*pieceID] = size
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil && !errs.Is(err, context.Canceled) {
|
|
|
|
worker.log.Debug("failed to retrieve piece info", zap.Stringer("Satellite ID", worker.satelliteID), zap.Error(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
var totalDeleted int64
|
|
|
|
for id, size := range pieceMap {
|
|
|
|
if size == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err := worker.store.Delete(ctx, worker.satelliteID, id)
|
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Debug("failed to delete a piece",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", id),
|
|
|
|
zap.Error(err))
|
2019-10-30 14:46:56 +00:00
|
|
|
err = worker.store.DeleteFailed(ctx, pieces.ExpiredInfo{
|
|
|
|
SatelliteID: worker.satelliteID,
|
|
|
|
PieceID: id,
|
|
|
|
InPieceInfo: true,
|
|
|
|
}, time.Now().UTC())
|
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Debug("failed to mark a deletion failure for a piece",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", id),
|
|
|
|
zap.Error(err))
|
2019-10-30 14:46:56 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2019-11-22 02:10:02 +00:00
|
|
|
worker.log.Debug("delete piece",
|
|
|
|
zap.Stringer("Satellite ID", worker.satelliteID),
|
|
|
|
zap.Stringer("Piece ID", id))
|
2019-10-30 14:46:56 +00:00
|
|
|
totalDeleted += size
|
|
|
|
}
|
|
|
|
|
|
|
|
// update transfer progress
|
|
|
|
return worker.satelliteDB.UpdateGracefulExit(ctx, worker.satelliteID, totalDeleted)
|
|
|
|
}
|
|
|
|
|
2019-10-22 21:42:21 +01:00
|
|
|
func (worker *Worker) handleFailure(ctx context.Context, transferError pb.TransferFailed_Error, pieceID pb.PieceID, send func(*pb.StorageNodeMessage) error) {
|
|
|
|
failure := &pb.StorageNodeMessage{
|
|
|
|
Message: &pb.StorageNodeMessage_Failed{
|
|
|
|
Failed: &pb.TransferFailed{
|
|
|
|
OriginalPieceId: pieceID,
|
|
|
|
Error: transferError,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
sendErr := send(failure)
|
|
|
|
if sendErr != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
worker.log.Error("unable to send failure.", zap.Stringer("Satellite ID", worker.satelliteID))
|
2019-10-22 21:42:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-15 16:29:47 +01:00
|
|
|
// Close halts the worker.
|
|
|
|
func (worker *Worker) Close() error {
|
2019-12-17 15:06:47 +00:00
|
|
|
worker.limiter.Wait()
|
2019-10-15 16:29:47 +01:00
|
|
|
return nil
|
|
|
|
}
|