storj/uplink/metainfo/kvmetainfo/objects_test.go

548 lines
16 KiB
Go
Raw Normal View History

2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
2018-11-15 15:31:33 +00:00
// See LICENSE for copying information.
package kvmetainfo_test
2018-11-15 15:31:33 +00:00
import (
"context"
"encoding/base64"
2018-11-15 15:31:33 +00:00
"fmt"
2018-11-26 07:39:05 +00:00
"io"
"sort"
"strings"
2018-11-15 15:31:33 +00:00
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
2018-11-15 15:31:33 +00:00
"storj.io/storj/pkg/encryption"
"storj.io/storj/pkg/paths"
2018-11-15 15:31:33 +00:00
"storj.io/storj/pkg/storj"
"storj.io/storj/private/memory"
"storj.io/storj/private/testcontext"
"storj.io/storj/private/testplanet"
"storj.io/storj/private/testrand"
"storj.io/storj/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams"
"storj.io/storj/uplink/stream"
2018-11-15 15:31:33 +00:00
)
2018-11-30 13:50:52 +00:00
const TestFile = "test-file"
func TestCreateObject(t *testing.T) {
customRS := storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
RequiredShares: 29,
RepairShares: 35,
OptimalShares: 80,
TotalShares: 95,
ShareSize: 2 * memory.KiB.Int32(),
2018-11-30 13:50:52 +00:00
}
const stripesPerBlock = 2
customEP := storj.EncryptionParameters{
CipherSuite: storj.EncNull,
BlockSize: stripesPerBlock * customRS.StripeSize(),
2018-11-30 13:50:52 +00:00
}
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
runTest(t, func(t *testing.T, ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, streams streams.Store) {
2018-11-15 15:31:33 +00:00
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
2018-11-30 13:50:52 +00:00
for i, tt := range []struct {
create *kvmetainfo.CreateObject
2018-11-30 13:50:52 +00:00
expectedRS storj.RedundancyScheme
expectedEP storj.EncryptionParameters
2018-11-30 13:50:52 +00:00
}{
{
create: nil,
expectedRS: kvmetainfo.DefaultRS,
expectedEP: kvmetainfo.DefaultES,
},
{
create: &kvmetainfo.CreateObject{RedundancyScheme: customRS, EncryptionParameters: customEP},
2018-11-30 13:50:52 +00:00
expectedRS: customRS,
expectedEP: customEP,
},
{
create: &kvmetainfo.CreateObject{RedundancyScheme: customRS},
2018-11-30 13:50:52 +00:00
expectedRS: customRS,
expectedEP: storj.EncryptionParameters{CipherSuite: kvmetainfo.DefaultES.CipherSuite, BlockSize: kvmetainfo.DefaultES.BlockSize},
},
{
create: &kvmetainfo.CreateObject{EncryptionParameters: customEP},
expectedRS: kvmetainfo.DefaultRS,
expectedEP: storj.EncryptionParameters{CipherSuite: customEP.CipherSuite, BlockSize: kvmetainfo.DefaultES.BlockSize},
2018-11-30 13:50:52 +00:00
},
} {
errTag := fmt.Sprintf("%d. %+v", i, tt)
obj, err := db.CreateObject(ctx, bucket, TestFile, tt.create)
require.NoError(t, err)
2018-11-30 13:50:52 +00:00
info := obj.Info()
assert.Equal(t, TestBucket, info.Bucket.Name, errTag)
assert.Equal(t, storj.EncAESGCM, info.Bucket.PathCipher, errTag)
2018-11-30 13:50:52 +00:00
assert.Equal(t, TestFile, info.Path, errTag)
assert.EqualValues(t, 0, info.Size, errTag)
assert.Equal(t, tt.expectedRS, info.RedundancyScheme, errTag)
assert.Equal(t, tt.expectedEP, info.EncryptionParameters, errTag)
2018-11-15 15:31:33 +00:00
}
2018-11-30 13:50:52 +00:00
})
}
2018-11-15 15:31:33 +00:00
2018-11-30 13:50:52 +00:00
func TestGetObject(t *testing.T) {
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
runTest(t, func(t *testing.T, ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, streams streams.Store) {
2018-11-30 13:50:52 +00:00
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
require.NoError(t, err)
upload(ctx, t, db, streams, bucket, TestFile, nil)
2018-11-30 13:50:52 +00:00
_, err = db.GetObject(ctx, storj.Bucket{}, "")
2018-11-15 15:31:33 +00:00
assert.True(t, storj.ErrNoBucket.Has(err))
_, err = db.GetObject(ctx, bucket, "")
2018-11-30 13:50:52 +00:00
assert.True(t, storj.ErrNoPath.Has(err))
2018-11-15 15:31:33 +00:00
nonExistingBucket := storj.Bucket{
Name: "non-existing-bucket",
PathCipher: storj.EncNull,
}
_, err = db.GetObject(ctx, nonExistingBucket, TestFile)
assert.True(t, storj.ErrObjectNotFound.Has(err))
2018-11-15 15:31:33 +00:00
_, err = db.GetObject(ctx, bucket, "non-existing-file")
assert.True(t, storj.ErrObjectNotFound.Has(err))
2018-11-15 15:31:33 +00:00
object, err := db.GetObject(ctx, bucket, TestFile)
2018-11-15 15:31:33 +00:00
if assert.NoError(t, err) {
2018-11-30 13:50:52 +00:00
assert.Equal(t, TestFile, object.Path)
assert.Equal(t, TestBucket, object.Bucket.Name)
assert.Equal(t, storj.EncAESGCM, object.Bucket.PathCipher)
2018-11-15 15:31:33 +00:00
}
})
}
func TestGetObjectStream(t *testing.T) {
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
runTest(t, func(t *testing.T, ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, streams streams.Store) {
data := testrand.Bytes(32 * memory.KiB)
2018-11-21 14:35:53 +00:00
2018-11-30 13:50:52 +00:00
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
emptyFile := upload(ctx, t, db, streams, bucket, "empty-file", nil)
smallFile := upload(ctx, t, db, streams, bucket, "small-file", []byte("test"))
largeFile := upload(ctx, t, db, streams, bucket, "large-file", data)
2018-11-30 13:50:52 +00:00
emptyBucket := storj.Bucket{
PathCipher: storj.EncNull,
}
_, err = db.GetObjectStream(ctx, emptyBucket, storj.Object{})
2018-11-15 15:31:33 +00:00
assert.True(t, storj.ErrNoBucket.Has(err))
_, err = db.GetObjectStream(ctx, bucket, storj.Object{})
2018-11-30 13:50:52 +00:00
assert.True(t, storj.ErrNoPath.Has(err))
2018-11-15 15:31:33 +00:00
nonExistingBucket := storj.Bucket{
Name: "non-existing-bucket",
PathCipher: storj.EncNull,
}
2018-11-15 15:31:33 +00:00
// no error because we are not doing satellite connection with this method
_, err = db.GetObjectStream(ctx, nonExistingBucket, smallFile)
assert.NoError(t, err)
2018-11-15 15:31:33 +00:00
// no error because we are not doing satellite connection with this method
_, err = db.GetObjectStream(ctx, bucket, storj.Object{
Path: "non-existing-file",
})
assert.NoError(t, err)
assertStream(ctx, t, db, streams, bucket, emptyFile, []byte{})
assertStream(ctx, t, db, streams, bucket, smallFile, []byte("test"))
assertStream(ctx, t, db, streams, bucket, largeFile, data)
2019-02-08 20:35:59 +00:00
/* TODO: Disable stopping due to flakiness.
// Stop randomly half of the storage nodes and remove them from satellite's overlay
perm := mathrand.Perm(len(planet.StorageNodes))
for _, i := range perm[:(len(perm) / 2)] {
assert.NoError(t, planet.StopPeer(planet.StorageNodes[i]))
_, err := planet.Satellites[0].Overlay.Service.UpdateUptime(ctx, planet.StorageNodes[i].ID(), false)
assert.NoError(t, err)
}
// try downloading the large file again
assertStream(ctx, t, db, streams, bucket, "large-file", 32*memory.KiB.Int64(), data)
2019-02-08 20:35:59 +00:00
*/
2018-11-15 15:31:33 +00:00
})
}
func upload(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams streams.Store, bucket storj.Bucket, path storj.Path, data []byte) storj.Object {
obj, err := db.CreateObject(ctx, bucket, path, nil)
require.NoError(t, err)
2018-11-30 13:50:52 +00:00
str, err := obj.CreateStream(ctx)
require.NoError(t, err)
2018-11-30 13:50:52 +00:00
upload := stream.NewUpload(ctx, str, streams)
2018-11-30 13:50:52 +00:00
_, err = upload.Write(data)
require.NoError(t, err)
2018-11-30 13:50:52 +00:00
err = upload.Close()
require.NoError(t, err)
2018-11-30 13:50:52 +00:00
err = obj.Commit(ctx)
require.NoError(t, err)
return obj.Info()
2018-11-30 13:50:52 +00:00
}
func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams streams.Store, bucket storj.Bucket, object storj.Object, content []byte) {
readOnly, err := db.GetObjectStream(ctx, bucket, object)
require.NoError(t, err)
assert.Equal(t, object.Path, readOnly.Info().Path)
assert.Equal(t, TestBucket, readOnly.Info().Bucket.Name)
assert.Equal(t, storj.EncAESGCM, readOnly.Info().Bucket.PathCipher)
2018-11-15 15:31:33 +00:00
2018-11-26 07:39:05 +00:00
segments, more, err := readOnly.Segments(ctx, 0, 0)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
assert.False(t, more)
if !assert.Equal(t, 1, len(segments)) {
2018-11-21 14:35:53 +00:00
return
2018-11-15 15:31:33 +00:00
}
2018-11-21 14:35:53 +00:00
2018-11-15 15:31:33 +00:00
assert.EqualValues(t, 0, segments[0].Index)
assert.EqualValues(t, len(content), segments[0].Size)
if segments[0].Size > 4*memory.KiB.Int64() {
2018-11-21 14:35:53 +00:00
assertRemoteSegment(t, segments[0])
} else {
assertInlineSegment(t, segments[0], content)
}
2018-11-26 07:39:05 +00:00
download := stream.NewDownload(ctx, readOnly, streams)
2018-11-26 07:39:05 +00:00
defer func() {
err = download.Close()
assert.NoError(t, err)
}()
data := make([]byte, len(content))
n, err := io.ReadFull(download, data)
require.NoError(t, err)
2018-11-26 07:39:05 +00:00
assert.Equal(t, len(content), n)
assert.Equal(t, content, data)
2018-11-21 14:35:53 +00:00
}
2018-11-15 15:31:33 +00:00
2018-11-21 14:35:53 +00:00
func assertInlineSegment(t *testing.T, segment storj.Segment, content []byte) {
assert.Equal(t, content, segment.Inline)
assert.True(t, segment.PieceID.IsZero())
2018-11-21 14:35:53 +00:00
assert.Equal(t, 0, len(segment.Pieces))
}
func assertRemoteSegment(t *testing.T, segment storj.Segment) {
assert.Nil(t, segment.Inline)
assert.NotNil(t, segment.PieceID)
// check that piece numbers and nodes are unique
nums := make(map[byte]struct{})
nodes := make(map[string]struct{})
for _, piece := range segment.Pieces {
if _, ok := nums[piece.Number]; ok {
t.Fatalf("piece number %d is not unique", piece.Number)
}
nums[piece.Number] = struct{}{}
2018-11-15 15:31:33 +00:00
id := piece.Location.String()
2018-11-21 14:35:53 +00:00
if _, ok := nodes[id]; ok {
t.Fatalf("node id %s is not unique", id)
}
nodes[id] = struct{}{}
}
2018-11-15 15:31:33 +00:00
}
func TestDeleteObject(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
encStore := newTestEncStore(TestEncKey)
db, streams, err := newMetainfoParts(planet, encStore)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
if !assert.NoError(t, err) {
return
}
unencryptedPath := paths.NewUnencrypted(TestFile)
encryptedPath, err := encryption.EncryptPath(bucket.Name, unencryptedPath, storj.EncAESGCM, encStore)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
for i, path := range []string{unencryptedPath.String(), encryptedPath.String()} {
upload(ctx, t, db, streams, bucket, path, nil)
2018-11-15 15:31:33 +00:00
if i < 0 {
// Enable encryption bypass
encStore.EncryptionBypass = true
}
2018-11-15 15:31:33 +00:00
err = db.DeleteObject(ctx, storj.Bucket{}, "")
assert.True(t, storj.ErrNoBucket.Has(err))
err = db.DeleteObject(ctx, bucket, "")
assert.True(t, storj.ErrNoPath.Has(err))
2018-11-15 15:31:33 +00:00
{
unexistingBucket := storj.Bucket{
Name: bucket.Name + "-not-exist",
PathCipher: bucket.PathCipher,
}
err = db.DeleteObject(ctx, unexistingBucket, TestFile)
assert.True(t, storj.ErrObjectNotFound.Has(err))
}
err = db.DeleteObject(ctx, bucket, "non-existing-file")
assert.True(t, storj.ErrObjectNotFound.Has(err))
{
invalidPathCipherBucket := storj.Bucket{
Name: bucket.Name,
PathCipher: bucket.PathCipher + 1,
}
err = db.DeleteObject(ctx, invalidPathCipherBucket, TestFile)
assert.True(t, storj.ErrObjectNotFound.Has(err))
}
err = db.DeleteObject(ctx, bucket, path)
assert.NoError(t, err)
}
2018-11-15 15:31:33 +00:00
})
}
func TestListObjectsEmpty(t *testing.T) {
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
runTest(t, func(t *testing.T, ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, streams streams.Store) {
testBucketInfo, err := db.CreateBucket(ctx, TestBucket, nil)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
_, err = db.ListObjects(ctx, storj.Bucket{}, storj.ListOptions{})
2018-11-15 15:31:33 +00:00
assert.True(t, storj.ErrNoBucket.Has(err))
_, err = db.ListObjects(ctx, testBucketInfo, storj.ListOptions{})
2018-11-15 15:31:33 +00:00
assert.EqualError(t, err, "kvmetainfo: invalid direction 0")
// TODO for now we are supporting only storj.After
2018-11-15 15:31:33 +00:00
for _, direction := range []storj.ListDirection{
// storj.Forward,
2018-11-15 15:31:33 +00:00
storj.After,
} {
list, err := db.ListObjects(ctx, testBucketInfo, storj.ListOptions{Direction: direction})
2018-11-15 15:31:33 +00:00
if assert.NoError(t, err) {
assert.False(t, list.More)
assert.Equal(t, 0, len(list.Items))
}
}
})
}
func TestListObjects_EncryptionBypass(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
encStore := newTestEncStore(TestEncKey)
db, streams, err := newMetainfoParts(planet, encStore)
require.NoError(t, err)
bucket, err := db.CreateBucket(ctx, TestBucket, &storj.Bucket{PathCipher: storj.EncAESGCM})
require.NoError(t, err)
filePaths := []string{
"a", "aa", "b", "bb", "c",
"a/xa", "a/xaa", "a/xb", "a/xbb", "a/xc",
"b/ya", "b/yaa", "b/yb", "b/ybb", "b/yc",
}
for _, path := range filePaths {
upload(ctx, t, db, streams, bucket, path, nil)
}
sort.Strings(filePaths)
// Enable encryption bypass
encStore.EncryptionBypass = true
opts := options("", "", 0)
opts.Recursive = true
encodedList, err := db.ListObjects(ctx, bucket, opts)
require.NoError(t, err)
require.Equal(t, len(filePaths), len(encodedList.Items))
seenPaths := make(map[string]struct{})
for _, item := range encodedList.Items {
iter := paths.NewUnencrypted(item.Path).Iterator()
var decoded, next string
for !iter.Done() {
next = iter.Next()
decodedNextBytes, err := base64.URLEncoding.DecodeString(next)
require.NoError(t, err)
decoded += string(decodedNextBytes) + "/"
}
decoded = strings.TrimRight(decoded, "/")
encryptedPath := paths.NewEncrypted(decoded)
decryptedPath, err := encryption.DecryptPath(bucket.Name, encryptedPath, storj.EncAESGCM, encStore)
require.NoError(t, err)
// NB: require decrypted path is a member of `filePaths`.
result := sort.Search(len(filePaths), func(i int) bool {
return !paths.NewUnencrypted(filePaths[i]).Less(decryptedPath)
})
require.NotEqual(t, len(filePaths), result)
// NB: ensure each path is only seen once.
_, ok := seenPaths[decryptedPath.String()]
require.False(t, ok)
seenPaths[decryptedPath.String()] = struct{}{}
}
})
}
2018-11-15 15:31:33 +00:00
func TestListObjects(t *testing.T) {
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
runTest(t, func(t *testing.T, ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, streams streams.Store) {
bucket, err := db.CreateBucket(ctx, TestBucket, &storj.Bucket{PathCipher: storj.EncNull})
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
filePaths := []string{
"a", "aa", "b", "bb", "c",
"a/xa", "a/xaa", "a/xb", "a/xbb", "a/xc",
"b/ya", "b/yaa", "b/yb", "b/ybb", "b/yc",
}
2018-11-30 13:50:52 +00:00
2018-11-15 15:31:33 +00:00
for _, path := range filePaths {
upload(ctx, t, db, streams, bucket, path, nil)
2018-11-15 15:31:33 +00:00
}
otherBucket, err := db.CreateBucket(ctx, "otherbucket", nil)
require.NoError(t, err)
2018-11-15 15:31:33 +00:00
upload(ctx, t, db, streams, otherBucket, "file-in-other-bucket", nil)
2018-11-15 15:31:33 +00:00
for i, tt := range []struct {
options storj.ListOptions
more bool
result []string
}{
{
options: options("", "", 0),
2018-11-15 15:31:33 +00:00
result: []string{"a", "a/", "aa", "b", "b/", "bb", "c"},
}, {
options: options("", "`", 0),
2018-11-15 15:31:33 +00:00
result: []string{"a", "a/", "aa", "b", "b/", "bb", "c"},
}, {
options: options("", "b", 0),
2018-11-15 15:31:33 +00:00
result: []string{"b/", "bb", "c"},
}, {
options: options("", "c", 0),
2018-11-15 15:31:33 +00:00
result: []string{},
}, {
options: options("", "ca", 0),
2018-11-15 15:31:33 +00:00
result: []string{},
}, {
options: options("", "", 1),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"a"},
}, {
options: options("", "`", 1),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"a"},
}, {
options: options("", "aa", 1),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"b"},
}, {
options: options("", "c", 1),
2018-11-15 15:31:33 +00:00
result: []string{},
}, {
options: options("", "ca", 1),
2018-11-15 15:31:33 +00:00
result: []string{},
}, {
options: options("", "", 2),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"a", "a/"},
}, {
options: options("", "`", 2),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"a", "a/"},
}, {
options: options("", "aa", 2),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"b", "b/"},
}, {
options: options("", "bb", 2),
2018-11-15 15:31:33 +00:00
result: []string{"c"},
}, {
options: options("", "c", 2),
2018-11-15 15:31:33 +00:00
result: []string{},
}, {
options: options("", "ca", 2),
2018-11-15 15:31:33 +00:00
result: []string{},
}, {
options: optionsRecursive("", "", 0),
2018-11-15 15:31:33 +00:00
result: []string{"a", "a/xa", "a/xaa", "a/xb", "a/xbb", "a/xc", "aa", "b", "b/ya", "b/yaa", "b/yb", "b/ybb", "b/yc", "bb", "c"},
}, {
options: options("a", "", 0),
2018-11-15 15:31:33 +00:00
result: []string{"xa", "xaa", "xb", "xbb", "xc"},
}, {
options: options("a/", "", 0),
2018-11-15 15:31:33 +00:00
result: []string{"xa", "xaa", "xb", "xbb", "xc"},
}, {
options: options("a/", "xb", 0),
2018-11-15 15:31:33 +00:00
result: []string{"xbb", "xc"},
}, {
options: optionsRecursive("", "a/xbb", 5),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"a/xc", "aa", "b", "b/ya", "b/yaa"},
}, {
options: options("a/", "xaa", 2),
2018-11-15 15:31:33 +00:00
more: true,
result: []string{"xb", "xbb"},
},
} {
errTag := fmt.Sprintf("%d. %+v", i, tt)
list, err := db.ListObjects(ctx, bucket, tt.options)
2018-11-15 15:31:33 +00:00
if assert.NoError(t, err, errTag) {
assert.Equal(t, tt.more, list.More, errTag)
for i, item := range list.Items {
assert.Equal(t, tt.result[i], item.Path, errTag)
assert.Equal(t, TestBucket, item.Bucket.Name, errTag)
assert.Equal(t, storj.EncNull, item.Bucket.PathCipher, errTag)
}
2018-11-15 15:31:33 +00:00
}
}
})
}
func options(prefix, cursor string, limit int) storj.ListOptions {
2018-11-15 15:31:33 +00:00
return storj.ListOptions{
Prefix: prefix,
Cursor: cursor,
Direction: storj.After,
2018-11-15 15:31:33 +00:00
Limit: limit,
}
}
func optionsRecursive(prefix, cursor string, limit int) storj.ListOptions {
2018-11-15 15:31:33 +00:00
return storj.ListOptions{
Prefix: prefix,
Cursor: cursor,
Direction: storj.After,
2018-11-15 15:31:33 +00:00
Limit: limit,
Recursive: true,
}
}