Use SignedHash on client/uplink side (#1354)
* psclient receives storage node hash and compare it to own hash for verification * uplink sends delete request when hashes don't match * valid hashes are propagated up to segments.Store for future sending to satellite
This commit is contained in:
parent
bad733cfac
commit
81408a3c9e
@ -23,10 +23,13 @@ import (
|
||||
"storj.io/storj/pkg/transport"
|
||||
)
|
||||
|
||||
// ClientError is any error returned by the client
|
||||
var ClientError = errs.Class("piecestore client error")
|
||||
|
||||
var (
|
||||
// ClientError is any error returned by the client
|
||||
ClientError = errs.Class("piecestore client error")
|
||||
|
||||
// ErrHashDoesNotMatch indicates hash comparison failed
|
||||
ErrHashDoesNotMatch = ClientError.New("hash does not match")
|
||||
|
||||
defaultBandwidthMsgSize = 32 * memory.KB
|
||||
maxBandwidthMsgSize = 64 * memory.KB
|
||||
)
|
||||
@ -43,7 +46,7 @@ func init() {
|
||||
// Client is an interface describing the functions for interacting with piecestore nodes
|
||||
type Client interface {
|
||||
Meta(ctx context.Context, id PieceID) (*pb.PieceSummary, error)
|
||||
Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) error
|
||||
Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) (*pb.SignedHash, error)
|
||||
Get(ctx context.Context, id PieceID, size int64, ba *pb.OrderLimit) (ranger.Ranger, error)
|
||||
Delete(ctx context.Context, pieceID PieceID, satelliteID storj.NodeID) error
|
||||
io.Closer
|
||||
@ -117,10 +120,10 @@ func (ps *PieceStore) Meta(ctx context.Context, id PieceID) (*pb.PieceSummary, e
|
||||
}
|
||||
|
||||
// Put uploads a Piece to a piece store Server
|
||||
func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, pba *pb.OrderLimit) error {
|
||||
func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, pba *pb.OrderLimit) (*pb.SignedHash, error) {
|
||||
stream, err := ps.client.Store(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Making a clone, otherwise there will be a data race
|
||||
@ -142,25 +145,32 @@ func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl t
|
||||
zap.S().Errorf("error closing stream %s :: %v.Send() = %v", closeErr, stream, closeErr)
|
||||
}
|
||||
|
||||
return fmt.Errorf("%v.Send() = %v", stream, err)
|
||||
return nil, fmt.Errorf("%v.Send() = %v", stream, err)
|
||||
}
|
||||
|
||||
writer := &StreamWriter{signer: ps, stream: stream, rba: rba}
|
||||
|
||||
defer func() {
|
||||
if err := writer.Close(); err != nil && err != io.EOF {
|
||||
zap.S().Debugf("failed to close writer: %s\n", err)
|
||||
}
|
||||
}()
|
||||
writer := NewStreamWriter(stream, ps, rba)
|
||||
|
||||
bufw := bufio.NewWriterSize(writer, 32*1024)
|
||||
|
||||
_, err = io.Copy(bufw, data)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bufw.Flush()
|
||||
err = bufw.Flush()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err == ErrHashDoesNotMatch {
|
||||
return nil, errs.Combine(err, ps.Delete(ctx, id, rba.PayerAllocation.SatelliteId))
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, ClientError.New("failure during closing writer: %v", err)
|
||||
}
|
||||
|
||||
return writer.storagenodeHash, nil
|
||||
}
|
||||
|
||||
// Get begins downloading a Piece from a piece store Server
|
||||
|
@ -4,7 +4,10 @@
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
@ -14,12 +17,24 @@ import (
|
||||
"storj.io/storj/pkg/utils"
|
||||
)
|
||||
|
||||
// StreamWriter creates a StreamWriter for writing data to the piece store server
|
||||
// StreamWriter handles uplink or satellite writing data to the piece store server
|
||||
type StreamWriter struct {
|
||||
stream pb.PieceStoreRoutes_StoreClient
|
||||
signer *PieceStore // We need this for signing
|
||||
totalWritten int64
|
||||
rba *pb.Order
|
||||
stream pb.PieceStoreRoutes_StoreClient
|
||||
signer *PieceStore // We need this for signing
|
||||
totalWritten int64
|
||||
rba *pb.Order
|
||||
hash hash.Hash
|
||||
storagenodeHash *pb.SignedHash
|
||||
}
|
||||
|
||||
// NewStreamWriter creates a StreamWriter for writing data to the piece store server
|
||||
func NewStreamWriter(stream pb.PieceStoreRoutes_StoreClient, signer *PieceStore, rba *pb.Order) *StreamWriter {
|
||||
return &StreamWriter{
|
||||
stream: stream,
|
||||
signer: signer,
|
||||
rba: rba,
|
||||
hash: sha256.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Write Piece data to a piece store server upload stream
|
||||
@ -41,6 +56,11 @@ func (s *StreamWriter) Write(b []byte) (int, error) {
|
||||
if err := s.stream.Send(msg); err != nil {
|
||||
return 0, fmt.Errorf("%v.Send() = %v", s.stream, err)
|
||||
}
|
||||
|
||||
_, err = s.hash.Write(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
@ -51,6 +71,17 @@ func (s *StreamWriter) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := auth.VerifyMsg(reply.SignedHash, s.signer.remoteID); err != nil {
|
||||
return ClientError.Wrap(err)
|
||||
}
|
||||
|
||||
clientHash := s.hash.Sum(nil)
|
||||
if bytes.Compare(reply.SignedHash.Hash, clientHash) != 0 {
|
||||
return ErrHashDoesNotMatch
|
||||
}
|
||||
|
||||
s.storagenodeHash = reply.SignedHash
|
||||
|
||||
zap.S().Debugf("Stream close and recv summary: %v", reply)
|
||||
|
||||
return nil
|
||||
|
@ -28,7 +28,7 @@ var mon = monkit.Package()
|
||||
|
||||
// Client defines an interface for storing erasure coded data to piece store nodes
|
||||
type Client interface {
|
||||
Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, err error)
|
||||
Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, successfulHashes []*pb.SignedHash, err error)
|
||||
Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme, pieceID psclient.PieceID, size int64, pba *pb.OrderLimit) (ranger.Ranger, error)
|
||||
Delete(ctx context.Context, nodes []*pb.Node, pieceID psclient.PieceID, satelliteID storj.NodeID) error
|
||||
}
|
||||
@ -56,29 +56,30 @@ func (ec *ecClient) newPSClient(ctx context.Context, n *pb.Node) (psclient.Clien
|
||||
return ec.newPSClientFunc(ctx, ec.transport, n, 0)
|
||||
}
|
||||
|
||||
func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, err error) {
|
||||
func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, successfulHashes []*pb.SignedHash, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if len(nodes) != rs.TotalCount() {
|
||||
return nil, Error.New("size of nodes slice (%d) does not match total count (%d) of erasure scheme", len(nodes), rs.TotalCount())
|
||||
return nil, nil, Error.New("size of nodes slice (%d) does not match total count (%d) of erasure scheme", len(nodes), rs.TotalCount())
|
||||
}
|
||||
|
||||
if nonNilCount(nodes) < rs.RepairThreshold() {
|
||||
return nil, Error.New("number of non-nil nodes (%d) is less than repair threshold (%d) of erasure scheme", nonNilCount(nodes), rs.RepairThreshold())
|
||||
return nil, nil, Error.New("number of non-nil nodes (%d) is less than repair threshold (%d) of erasure scheme", nonNilCount(nodes), rs.RepairThreshold())
|
||||
}
|
||||
|
||||
if !unique(nodes) {
|
||||
return nil, Error.New("duplicated nodes are not allowed")
|
||||
return nil, nil, Error.New("duplicated nodes are not allowed")
|
||||
}
|
||||
|
||||
padded := eestream.PadReader(ioutil.NopCloser(data), rs.StripeSize())
|
||||
readers, err := eestream.EncodeReader(ctx, padded, rs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
type info struct {
|
||||
i int
|
||||
err error
|
||||
i int
|
||||
err error
|
||||
hash *pb.SignedHash
|
||||
}
|
||||
infos := make(chan info, len(nodes))
|
||||
|
||||
@ -93,12 +94,13 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
|
||||
}
|
||||
|
||||
go func(i int, node *pb.Node) {
|
||||
err := ec.putPiece(psCtx, ctx, node, pieceID, readers[i], expiration, pba)
|
||||
infos <- info{i: i, err: err}
|
||||
hash, err := ec.putPiece(psCtx, ctx, node, pieceID, readers[i], expiration, pba)
|
||||
infos <- info{i: i, err: err, hash: hash}
|
||||
}(i, node)
|
||||
}
|
||||
|
||||
successfulNodes = make([]*pb.Node, len(nodes))
|
||||
successfulHashes = make([]*pb.SignedHash, len(nodes))
|
||||
var successfulCount int32
|
||||
var timer *time.Timer
|
||||
|
||||
@ -106,6 +108,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
|
||||
info := <-infos
|
||||
if info.err == nil {
|
||||
successfulNodes[info.i] = nodes[info.i]
|
||||
successfulHashes[info.i] = info.hash
|
||||
|
||||
switch int(atomic.AddInt32(&successfulCount, 1)) {
|
||||
case rs.RepairThreshold():
|
||||
@ -140,32 +143,32 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
|
||||
}()
|
||||
|
||||
if int(atomic.LoadInt32(&successfulCount)) < rs.RepairThreshold() {
|
||||
return nil, Error.New("successful puts (%d) less than repair threshold (%d)", successfulCount, rs.RepairThreshold())
|
||||
return nil, nil, Error.New("successful puts (%d) less than repair threshold (%d)", successfulCount, rs.RepairThreshold())
|
||||
}
|
||||
|
||||
return successfulNodes, nil
|
||||
return successfulNodes, successfulHashes, nil
|
||||
}
|
||||
|
||||
func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID psclient.PieceID, data io.ReadCloser, expiration time.Time, pba *pb.OrderLimit) (err error) {
|
||||
func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID psclient.PieceID, data io.ReadCloser, expiration time.Time, pba *pb.OrderLimit) (hash *pb.SignedHash, err error) {
|
||||
defer func() { err = errs.Combine(err, data.Close()) }()
|
||||
|
||||
if node == nil {
|
||||
_, err = io.Copy(ioutil.Discard, data)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
derivedPieceID, err := pieceID.Derive(node.Id.Bytes())
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
ps, err := ec.newPSClient(ctx, node)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed dialing for putting piece %s -> %s to node %s: %v",
|
||||
pieceID, derivedPieceID, node.Id, err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
err = ps.Put(ctx, derivedPieceID, data, expiration, pba)
|
||||
hash, err = ps.Put(ctx, derivedPieceID, data, expiration, pba)
|
||||
defer func() { err = errs.Combine(err, ps.Close()) }()
|
||||
// Canceled context means the piece upload was interrupted by user or due
|
||||
// to slow connection. No error logging for this case.
|
||||
@ -185,7 +188,7 @@ func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID
|
||||
pieceID, derivedPieceID, node.Id, nodeAddress, err)
|
||||
}
|
||||
|
||||
return err
|
||||
return hash, err
|
||||
}
|
||||
|
||||
func (ec *ecClient) Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme,
|
||||
|
@ -156,7 +156,7 @@ TestLoop:
|
||||
}
|
||||
ps := NewMockPSClient(ctrl)
|
||||
gomock.InOrder(
|
||||
ps.EXPECT().Put(gomock.Any(), derivedID, gomock.Any(), ttl, &pb.OrderLimit{}).Return(errs[n]).
|
||||
ps.EXPECT().Put(gomock.Any(), derivedID, gomock.Any(), ttl, &pb.OrderLimit{}).Return(&pb.SignedHash{}, errs[n]).
|
||||
Do(func(ctx context.Context, id psclient.PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) {
|
||||
// simulate that the mocked piece store client is reading the data
|
||||
_, err := io.Copy(ioutil.Discard, data)
|
||||
@ -173,7 +173,7 @@ TestLoop:
|
||||
r := io.LimitReader(rand.Reader, int64(size))
|
||||
ec := ecClient{newPSClientFunc: mockNewPSClient(clients)}
|
||||
|
||||
successfulNodes, err := ec.Put(ctx, tt.nodes, rs, id, r, ttl, &pb.OrderLimit{})
|
||||
successfulNodes, successfulHashes, err := ec.Put(ctx, tt.nodes, rs, id, r, ttl, &pb.OrderLimit{})
|
||||
|
||||
if tt.errString != "" {
|
||||
assert.EqualError(t, err, tt.errString, errTag)
|
||||
@ -187,10 +187,14 @@ TestLoop:
|
||||
for i := range tt.nodes {
|
||||
if tt.errs[i] != nil {
|
||||
assert.Nil(t, successfulNodes[i], errTag)
|
||||
assert.Nil(t, successfulHashes[i], errTag)
|
||||
} else if successfulNodes[i] == nil && tt.nodes[i] != nil {
|
||||
slowNodes++
|
||||
} else {
|
||||
assert.Equal(t, tt.nodes[i], successfulNodes[i], errTag)
|
||||
if successfulNodes[i] != nil {
|
||||
assert.NotNil(t, successfulHashes[i], errTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *go
|
||||
}
|
||||
|
||||
// Get mocks base method
|
||||
func (m *MockClient) Get(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.ErasureScheme, arg3 psclient.PieceID, arg4 int64, arg5 *pb.OrderLimit) (ranger.Ranger, error) {
|
||||
func (m *MockClient) Get(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.ErasureScheme, arg3 psclient.PieceID, arg4 int64, arg5 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
ret0, _ := ret[0].(ranger.Ranger)
|
||||
@ -70,12 +70,13 @@ func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4, arg5 interfa
|
||||
}
|
||||
|
||||
// Put mocks base method
|
||||
func (m *MockClient) Put(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.RedundancyStrategy, arg3 psclient.PieceID, arg4 io.Reader, arg5 time.Time, arg6 *pb.OrderLimit) ([]*pb.Node, error) {
|
||||
func (m *MockClient) Put(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.RedundancyStrategy, arg3 psclient.PieceID, arg4 io.Reader, arg5 time.Time, arg6 *pb.PayerBandwidthAllocation) ([]*pb.Node, []*pb.SignedHash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||
ret0, _ := ret[0].([]*pb.Node)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
ret1, _ := ret[1].([]*pb.SignedHash)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// Put indicates an expected call of Put
|
||||
|
@ -68,7 +68,7 @@ func (mr *MockPSClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock
|
||||
}
|
||||
|
||||
// Get mocks base method
|
||||
func (m *MockPSClient) Get(arg0 context.Context, arg1 psclient.PieceID, arg2 int64, arg3 *pb.OrderLimit) (ranger.Ranger, error) {
|
||||
func (m *MockPSClient) Get(arg0 context.Context, arg1 psclient.PieceID, arg2 int64, arg3 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(ranger.Ranger)
|
||||
@ -98,11 +98,12 @@ func (mr *MockPSClientMockRecorder) Meta(arg0, arg1 interface{}) *gomock.Call {
|
||||
}
|
||||
|
||||
// Put mocks base method
|
||||
func (m *MockPSClient) Put(arg0 context.Context, arg1 psclient.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *pb.OrderLimit) error {
|
||||
func (m *MockPSClient) Put(arg0 context.Context, arg1 psclient.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *pb.PayerBandwidthAllocation) (*pb.SignedHash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
ret0, _ := ret[0].(*pb.SignedHash)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Put indicates an expected call of Put
|
||||
|
@ -141,7 +141,7 @@ func (s *Repairer) Repair(ctx context.Context, path storj.Path, lostPieces []int
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
// Upload the repaired pieces to the repairNodes
|
||||
successfulNodes, err := s.ec.Put(ctx, repairNodes, rs, pid, r, convertTime(pr.GetExpirationDate()), pbaPut)
|
||||
successfulNodes, _, err := s.ec.Put(ctx, repairNodes, rs, pid, r, convertTime(pr.GetExpirationDate()), pbaPut)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func TestSegmentStoreRepairRemote(t *testing.T) {
|
||||
mockPDB.EXPECT().PayerBandwidthAllocation(gomock.Any(), gomock.Any()),
|
||||
mockEC.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(tt.newNodes, nil),
|
||||
).Return(tt.newNodes, nil, nil),
|
||||
mockPDB.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(nil),
|
||||
|
@ -142,7 +142,7 @@ func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
successfulNodes, err := s.ec.Put(ctx, nodes, s.rs, pieceID, sizedReader, expiration, pba)
|
||||
successfulNodes, _, err := s.ec.Put(ctx, nodes, s.rs, pieceID, sizedReader, expiration, pba)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user