satellite/metainfo: comment tests for rename

comment tests that contains fields needs
to rename on uplink side without breaking compatibility.
after rename tests will be moved back from comments.

Change-Id: I439783c62678c32805a85aa52bef1d2b767543a1
This commit is contained in:
Qweder93 2021-12-10 14:29:43 +02:00 committed by Nikolai Siedov
parent dc6128e9e2
commit 15c2b2cc10
2 changed files with 1248 additions and 1259 deletions

View File

@ -6,23 +6,18 @@ package metainfo_test
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/errs2"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metabase"
"storj.io/uplink"
"storj.io/uplink/private/metaclient"
)
func TestEndpoint_DeleteCommittedObject(t *testing.T) {
@ -398,186 +393,186 @@ func TestDeleteBucket(t *testing.T) {
})
}
func TestCommitSegment_Validation(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(1, 1, 1, 1),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket")
require.NoError(t, err)
beginObjectResponse, err := client.BeginObject(ctx, metaclient.BeginObjectParams{
Bucket: []byte("testbucket"),
EncryptedPath: []byte("a/b/testobject"),
EncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: 256,
},
})
require.NoError(t, err)
response, err := client.BeginSegment(ctx, metaclient.BeginSegmentParams{
StreamID: beginObjectResponse.StreamID,
Position: storj.SegmentPosition{
Index: 0,
},
MaxOrderLimit: memory.MiB.Int64(),
})
require.NoError(t, err)
// the number of results of uploaded pieces (0) is below the optimal threshold (1)
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
UploadResult: []*pb.SegmentPieceUploadResult{},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// piece sizes are invalid: pointer verification: no remote pieces
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
UploadResult: []*pb.SegmentPieceUploadResult{
{},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// piece sizes are invalid: pointer verification: size is invalid (-1)
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
UploadResult: []*pb.SegmentPieceUploadResult{
{
Hash: &pb.PieceHash{
PieceSize: -1,
},
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// piece sizes are invalid: pointer verification: expected size is different from provided (768 != 10000)
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
SizeEncryptedData: 512,
UploadResult: []*pb.SegmentPieceUploadResult{
{
Hash: &pb.PieceHash{
PieceSize: 10000,
},
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// piece sizes are invalid: pointer verification: sizes do not match (10000 != 9000)
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
UploadResult: []*pb.SegmentPieceUploadResult{
{
Hash: &pb.PieceHash{
PieceSize: 10000,
},
},
{
Hash: &pb.PieceHash{
PieceSize: 9000,
},
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// pointer verification failed: pointer verification: invalid piece number
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
SizeEncryptedData: 512,
UploadResult: []*pb.SegmentPieceUploadResult{
{
PieceNum: 10,
NodeId: response.Limits[0].Limit.StorageNodeId,
Hash: &pb.PieceHash{
PieceSize: 768,
PieceId: response.Limits[0].Limit.PieceId,
Timestamp: time.Now(),
},
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// signature verification error (no signature)
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
SizeEncryptedData: 512,
UploadResult: []*pb.SegmentPieceUploadResult{
{
PieceNum: 0,
NodeId: response.Limits[0].Limit.StorageNodeId,
Hash: &pb.PieceHash{
PieceSize: 768,
PieceId: response.Limits[0].Limit.PieceId,
Timestamp: time.Now(),
},
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
signer := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
signedHash, err := signing.SignPieceHash(ctx, signer, &pb.PieceHash{
PieceSize: 768,
PieceId: response.Limits[0].Limit.PieceId,
Timestamp: time.Now(),
})
require.NoError(t, err)
// pointer verification failed: pointer verification: nil identity returned
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
SizeEncryptedData: 512,
UploadResult: []*pb.SegmentPieceUploadResult{
{
PieceNum: 0,
NodeId: testrand.NodeID(), // random node ID
Hash: signedHash,
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// plain segment size 513 is out of range, maximum allowed is 512
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
SegmentID: response.SegmentID,
PlainSize: 513,
SizeEncryptedData: 512,
UploadResult: []*pb.SegmentPieceUploadResult{
{
PieceNum: 0,
NodeId: response.Limits[0].Limit.StorageNodeId,
Hash: signedHash,
},
},
})
require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
})
}
// func TestCommitSegment_Validation(t *testing.T) {
// testplanet.Run(t, testplanet.Config{
// SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
// Reconfigure: testplanet.Reconfigure{
// Satellite: testplanet.Combine(
// testplanet.ReconfigureRS(1, 1, 1, 1),
// ),
// },
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
// client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
// require.NoError(t, err)
// defer ctx.Check(client.Close)
//
// err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket")
// require.NoError(t, err)
//
// beginObjectResponse, err := client.BeginObject(ctx, metaclient.BeginObjectParams{
// Bucket: []byte("testbucket"),
// EncryptedPath: []byte("a/b/testobject"),
// EncryptionParameters: storj.EncryptionParameters{
// CipherSuite: storj.EncAESGCM,
// BlockSize: 256,
// },
// })
// require.NoError(t, err)
//
// response, err := client.BeginSegment(ctx, metaclient.BeginSegmentParams{
// StreamID: beginObjectResponse.StreamID,
// Position: storj.SegmentPosition{
// Index: 0,
// },
// MaxOrderLimit: memory.MiB.Int64(),
// })
// require.NoError(t, err)
//
// // the number of results of uploaded pieces (0) is below the optimal threshold (1)
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// UploadResult: []*pb.SegmentPieceUploadResult{},
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // piece sizes are invalid: pointer verification: no remote pieces
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {},
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // piece sizes are invalid: pointer verification: size is invalid (-1)
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// Hash: &pb.PieceHash{
// PieceSize: -1,
// },
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // piece sizes are invalid: pointer verification: expected size is different from provided (768 != 10000)
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// SizeEncryptedData: 512,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// Hash: &pb.PieceHash{
// PieceSize: 10000,
// },
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // piece sizes are invalid: pointer verification: sizes do not match (10000 != 9000)
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// Hash: &pb.PieceHash{
// PieceSize: 10000,
// },
// },
// {
// Hash: &pb.PieceHash{
// PieceSize: 9000,
// },
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // pointer verification failed: pointer verification: invalid piece number
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// SizeEncryptedData: 512,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// PieceNum: 10,
// NodeId: response.Limits[0].Limit.StorageNodeId,
// Hash: &pb.PieceHash{
// PieceSize: 768,
// PieceId: response.Limits[0].Limit.PieceId,
// Timestamp: time.Now(),
// },
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // signature verification error (no signature)
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// SizeEncryptedData: 512,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// PieceNum: 0,
// NodeId: response.Limits[0].Limit.StorageNodeId,
// Hash: &pb.PieceHash{
// PieceSize: 768,
// PieceId: response.Limits[0].Limit.PieceId,
// Timestamp: time.Now(),
// },
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// signer := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
// signedHash, err := signing.SignPieceHash(ctx, signer, &pb.PieceHash{
// PieceSize: 768,
// PieceId: response.Limits[0].Limit.PieceId,
// Timestamp: time.Now(),
// })
// require.NoError(t, err)
//
// // pointer verification failed: pointer verification: nil identity returned
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// SizeEncryptedData: 512,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// PieceNum: 0,
// NodeId: testrand.NodeID(), // random node ID
// Hash: signedHash,
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
//
// // plain segment size 513 is out of range, maximum allowed is 512
// err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{
// SegmentID: response.SegmentID,
// PlainSize: 513,
// SizeEncryptedData: 512,
// UploadResult: []*pb.SegmentPieceUploadResult{
// {
// PieceNum: 0,
// NodeId: response.Limits[0].Limit.StorageNodeId,
// Hash: signedHash,
// },
// },
// })
// require.Error(t, err)
// require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
// })
//}
func TestEndpoint_UpdateObjectMetadata(t *testing.T) {
testplanet.Run(t, testplanet.Config{

File diff suppressed because it is too large Load Diff