satellite/metainfo: add missing metadata validation
We are missing metadata validation for UpdateObjectMetadata and FinishCopyOject requests. Change-Id: Idca6a4d1fe108e1593405fd3913442f5b69d09e7
This commit is contained in:
parent
3c0fc3a530
commit
b722c29e77
@ -98,7 +98,7 @@ func TestBatch(t *testing.T) {
|
||||
requests = append(requests, &metaclient.CommitObjectParams{
|
||||
EncryptedMetadata: metadata,
|
||||
EncryptedMetadataNonce: testrand.Nonce(),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
||||
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
||||
})
|
||||
|
||||
responses, err := metainfoClient.Batch(ctx, requests...)
|
||||
@ -176,7 +176,7 @@ func TestBatch(t *testing.T) {
|
||||
StreamID: beginObjectResp.StreamID,
|
||||
EncryptedMetadata: metadata,
|
||||
EncryptedMetadataNonce: testrand.Nonce(),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(48),
|
||||
})
|
||||
|
||||
responses, err := metainfoClient.Batch(ctx, requests...)
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"storj.io/common/encryption"
|
||||
"storj.io/common/errs2"
|
||||
"storj.io/common/macaroon"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/rpc/rpcstatus"
|
||||
"storj.io/common/storj"
|
||||
@ -209,9 +208,8 @@ func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommit
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metadataSize := memory.Size(len(req.EncryptedMetadata))
|
||||
if metadataSize > endpoint.config.MaxMetadataSize {
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, fmt.Sprintf("Metadata is too large, got %v, maximum allowed is %v", metadataSize, endpoint.config.MaxMetadataSize))
|
||||
if err := endpoint.checkEncryptedMetadataSize(req.EncryptedMetadata, req.EncryptedMetadataEncryptedKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
@ -1081,6 +1079,10 @@ func (endpoint *Endpoint) UpdateObjectMetadata(ctx context.Context, req *pb.Obje
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
if err := endpoint.checkEncryptedMetadataSize(req.EncryptedMetadata, req.EncryptedMetadataEncryptedKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
streamID, err := endpoint.unmarshalSatStreamID(ctx, req.StreamId)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
|
||||
@ -1865,6 +1867,10 @@ func (endpoint *Endpoint) FinishCopyObject(ctx context.Context, req *pb.ObjectFi
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
if err := endpoint.checkEncryptedMetadataSize(req.NewEncryptedMetadata, req.NewEncryptedMetadataKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := endpoint.buckets.HasBucket(ctx, req.NewBucket, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
|
@ -228,7 +228,7 @@ func TestObject_NoStorageNodes(t *testing.T) {
|
||||
StreamID: beginObjectResponse.StreamID,
|
||||
EncryptedMetadata: metadata,
|
||||
EncryptedMetadataNonce: testrand.Nonce(),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
||||
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
||||
})
|
||||
require.Error(t, err)
|
||||
assertInvalidArgument(t, err, true)
|
||||
@ -243,7 +243,7 @@ func TestObject_NoStorageNodes(t *testing.T) {
|
||||
StreamID: beginObjectResponse.StreamID,
|
||||
EncryptedMetadata: metadata,
|
||||
EncryptedMetadataNonce: testrand.Nonce(),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
||||
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -271,7 +271,7 @@ func TestObject_NoStorageNodes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
testEncryptedMetadata := testrand.BytesInt(64)
|
||||
testEncryptedMetadataEncryptedKey := testrand.BytesInt(32)
|
||||
testEncryptedMetadataEncryptedKey := randomEncryptedKey
|
||||
testEncryptedMetadataNonce := testrand.Nonce()
|
||||
|
||||
// update the object metadata
|
||||
@ -618,7 +618,7 @@ func TestEndpoint_Object_With_StorageNodes(t *testing.T) {
|
||||
StreamID: beginObjectResponse.StreamID,
|
||||
EncryptedMetadata: metadata,
|
||||
EncryptedMetadataNonce: testrand.Nonce(),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
||||
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -1290,6 +1290,38 @@ func TestEndpoint_CopyObject(t *testing.T) {
|
||||
EncryptedKey: []byte("newencryptedkey"),
|
||||
}
|
||||
|
||||
{
|
||||
// metadata too large
|
||||
_, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
||||
Header: &pb.RequestHeader{
|
||||
ApiKey: apiKey.SerializeRaw(),
|
||||
},
|
||||
StreamId: getResp.Object.StreamId,
|
||||
NewBucket: []byte("testbucket"),
|
||||
NewEncryptedObjectKey: []byte("newobjectkey"),
|
||||
NewEncryptedMetadata: testrand.Bytes(satelliteSys.Config.Metainfo.MaxMetadataSize + 1),
|
||||
NewEncryptedMetadataKeyNonce: testEncryptedMetadataNonce,
|
||||
NewEncryptedMetadataKey: []byte("encryptedmetadatakey"),
|
||||
NewSegmentKeys: []*pb.EncryptedKeyAndNonce{&segmentKeys},
|
||||
})
|
||||
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
||||
|
||||
// invalid encrypted metadata key
|
||||
_, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
||||
Header: &pb.RequestHeader{
|
||||
ApiKey: apiKey.SerializeRaw(),
|
||||
},
|
||||
StreamId: getResp.Object.StreamId,
|
||||
NewBucket: []byte("testbucket"),
|
||||
NewEncryptedObjectKey: []byte("newobjectkey"),
|
||||
NewEncryptedMetadata: testrand.Bytes(satelliteSys.Config.Metainfo.MaxMetadataSize),
|
||||
NewEncryptedMetadataKeyNonce: testEncryptedMetadataNonce,
|
||||
NewEncryptedMetadataKey: []byte("encryptedmetadatakey"),
|
||||
NewSegmentKeys: []*pb.EncryptedKeyAndNonce{&segmentKeys},
|
||||
})
|
||||
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
||||
}
|
||||
|
||||
_, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
||||
Header: &pb.RequestHeader{
|
||||
ApiKey: apiKey.SerializeRaw(),
|
||||
@ -1557,3 +1589,69 @@ func TestEndpoint_ParallelDeletesSameAncestor(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEndpoint_UpdateObjectMetadata(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()].SerializeRaw()
|
||||
err := planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "object", testrand.Bytes(256))
|
||||
require.NoError(t, err)
|
||||
|
||||
objects, err := satellite.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
validMetadata := testrand.Bytes(satellite.Config.Metainfo.MaxMetadataSize)
|
||||
validKey := randomEncryptedKey
|
||||
|
||||
getObjectResponse, err := satellite.API.Metainfo.Endpoint.GetObject(ctx, &pb.ObjectGetRequest{
|
||||
Header: &pb.RequestHeader{ApiKey: apiKey},
|
||||
Bucket: []byte("testbucket"),
|
||||
EncryptedPath: []byte(objects[0].ObjectKey),
|
||||
Version: int32(objects[0].Version),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = satellite.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
||||
Header: &pb.RequestHeader{ApiKey: apiKey},
|
||||
Bucket: []byte("testbucket"),
|
||||
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
||||
Version: int32(objects[0].Version),
|
||||
StreamId: getObjectResponse.Object.StreamId,
|
||||
EncryptedMetadata: validMetadata,
|
||||
EncryptedMetadataEncryptedKey: validKey,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// too large metadata
|
||||
_, err = satellite.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
||||
Header: &pb.RequestHeader{ApiKey: apiKey},
|
||||
Bucket: []byte("testbucket"),
|
||||
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
||||
Version: int32(objects[0].Version),
|
||||
|
||||
EncryptedMetadata: testrand.Bytes(satellite.Config.Metainfo.MaxMetadataSize + 1),
|
||||
EncryptedMetadataEncryptedKey: validKey,
|
||||
})
|
||||
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
||||
|
||||
// invalid encrypted metadata key
|
||||
_, err = satellite.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
||||
Header: &pb.RequestHeader{ApiKey: apiKey},
|
||||
Bucket: []byte("testbucket"),
|
||||
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
||||
Version: int32(objects[0].Version),
|
||||
|
||||
EncryptedMetadata: validMetadata,
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(16),
|
||||
})
|
||||
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
||||
|
||||
// verify that metadata didn't change with rejected requests
|
||||
objects, err = satellite.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, validMetadata, objects[0].EncryptedMetadata)
|
||||
require.Equal(t, validKey, objects[0].EncryptedMetadataEncryptedKey)
|
||||
})
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ func TestInlineSegment(t *testing.T) {
|
||||
StreamID: beginObjectResp.StreamID,
|
||||
EncryptedMetadata: metadata,
|
||||
EncryptedMetadataNonce: testrand.Nonce(),
|
||||
EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
||||
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -27,6 +27,8 @@ import (
|
||||
"storj.io/uplink/private/metaclient"
|
||||
)
|
||||
|
||||
var randomEncryptedKey = testrand.Bytes(48)
|
||||
|
||||
func TestEndpoint_NoStorageNodes(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, UplinkCount: 3,
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"storj.io/common/encryption"
|
||||
"storj.io/common/errs2"
|
||||
"storj.io/common/macaroon"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/rpc/rpcstatus"
|
||||
"storj.io/common/storj"
|
||||
@ -28,7 +29,11 @@ import (
|
||||
"storj.io/storj/satellite/metabase"
|
||||
)
|
||||
|
||||
var ipRegexp = regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`)
|
||||
const encryptedKeySize = 48
|
||||
|
||||
var (
|
||||
ipRegexp = regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`)
|
||||
)
|
||||
|
||||
func getAPIKey(ctx context.Context, header *pb.RequestHeader) (key *macaroon.APIKey, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
@ -437,3 +442,18 @@ func (endpoint *Endpoint) addStorageUsageUpToLimit(ctx context.Context, projectI
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkEncryptedMetadata checks encrypted metadata and it's encrypted key sizes. Metadata encrypted key nonce
|
||||
// is serialized to storj.Nonce automatically.
|
||||
func (endpoint *Endpoint) checkEncryptedMetadataSize(encryptedMetadata, encryptedKey []byte) error {
|
||||
metadataSize := memory.Size(len(encryptedMetadata))
|
||||
if metadataSize > endpoint.config.MaxMetadataSize {
|
||||
return rpcstatus.Errorf(rpcstatus.InvalidArgument, "Encrypted metadata is too large, got %v, maximum allowed is %v", metadataSize, endpoint.config.MaxMetadataSize)
|
||||
}
|
||||
|
||||
// verify key only if any metadata was set
|
||||
if metadataSize > 0 && len(encryptedKey) != encryptedKeySize {
|
||||
return rpcstatus.Errorf(rpcstatus.InvalidArgument, "Encrypted metadata key size is invalid, got %v, expected %v", len(encryptedKey), encryptedKeySize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user