storj/pkg/miniogw/gateway_test.go

787 lines
26 KiB
Go
Raw Normal View History

2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package miniogw
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"strings"
"testing"
"time"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/hash"
"github.com/stretchr/testify/assert"
"github.com/vivint/infectious"
"go.uber.org/zap/zaptest"
"storj.io/storj/internal/memory"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
libuplink "storj.io/storj/lib/uplink"
"storj.io/storj/pkg/macaroon"
2018-11-30 15:02:01 +00:00
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/console"
"storj.io/storj/uplink/ecclient"
"storj.io/storj/uplink/eestream"
"storj.io/storj/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams"
)
const (
TestEncKey = "test-encryption-key"
TestBucket = "test-bucket"
TestFile = "test-file"
DestBucket = "dest-bucket"
DestFile = "dest-file"
)
var TestAPIKey = "test-api-key"
func TestMakeBucketWithLocation(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when creating bucket with empty name
err := layer.MakeBucketWithLocation(ctx, "", "")
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Create a bucket with the Minio API
err = layer.MakeBucketWithLocation(ctx, TestBucket, "")
assert.NoError(t, err)
// Check that the bucket is created using the Metainfo API
bucket, err := m.GetBucket(ctx, TestBucket)
assert.NoError(t, err)
assert.Equal(t, TestBucket, bucket.Name)
assert.True(t, time.Since(bucket.Created) < 1*time.Minute)
assert.Equal(t, storj.EncAESGCM, bucket.PathCipher)
// Check the error when trying to create an existing bucket
err = layer.MakeBucketWithLocation(ctx, TestBucket, "")
assert.Equal(t, minio.BucketAlreadyExists{Bucket: TestBucket}, err)
})
}
func TestGetBucketInfo(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when getting info about bucket with empty name
_, err := layer.GetBucketInfo(ctx, "")
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when getting info about non-existing bucket
_, err = layer.GetBucketInfo(ctx, TestBucket)
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the bucket using the Metainfo API
info, err := m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
// Check the bucket info using the Minio API
bucket, err := layer.GetBucketInfo(ctx, TestBucket)
if assert.NoError(t, err) {
assert.Equal(t, TestBucket, bucket.Name)
assert.Equal(t, info.Created, bucket.Created)
}
})
}
func TestDeleteBucket(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when deleting bucket with empty name
err := layer.DeleteBucket(ctx, "")
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when deleting non-existing bucket
err = layer.DeleteBucket(ctx, TestBucket)
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create a bucket with a file using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
_, err = createFile(ctx, m, strms, TestBucket, TestFile, nil, nil)
assert.NoError(t, err)
// Check the error when deleting non-empty bucket
err = layer.DeleteBucket(ctx, TestBucket)
assert.Equal(t, minio.BucketNotEmpty{Bucket: TestBucket}, err)
// Delete the file using the Metainfo API, so the bucket becomes empty
err = m.DeleteObject(ctx, TestBucket, TestFile)
assert.NoError(t, err)
// Delete the bucket info using the Minio API
err = layer.DeleteBucket(ctx, TestBucket)
assert.NoError(t, err)
// Check that the bucket is deleted using the Metainfo API
_, err = m.GetBucket(ctx, TestBucket)
assert.True(t, storj.ErrBucketNotFound.Has(err))
})
}
func TestListBuckets(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check that empty list is return if no buckets exist yet
bucketInfos, err := layer.ListBuckets(ctx)
assert.NoError(t, err)
assert.Empty(t, bucketInfos)
// Create all expected buckets using the Metainfo API
2019-06-24 10:52:25 +01:00
bucketNames := []string{"bucket-1", "bucket-2", "bucket-3"}
buckets := make([]storj.Bucket, len(bucketNames))
for i, bucketName := range bucketNames {
bucket, err := m.CreateBucket(ctx, bucketName, nil)
buckets[i] = bucket
assert.NoError(t, err)
}
// Check that the expected buckets can be listed using the Minio API
bucketInfos, err = layer.ListBuckets(ctx)
if assert.NoError(t, err) {
assert.Equal(t, len(bucketNames), len(bucketInfos))
for i, bucketInfo := range bucketInfos {
assert.Equal(t, bucketNames[i], bucketInfo.Name)
assert.Equal(t, buckets[i].Created, bucketInfo.Created)
}
}
})
}
func TestPutObject(t *testing.T) {
data, err := hash.NewReader(bytes.NewReader([]byte("test")),
int64(len("test")),
"098f6bcd4621d373cade4e832627b4f6",
"9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08")
if err != nil {
t.Fatal(err)
}
metadata := map[string]string{
"content-type": "media/foo",
"key1": "value1",
"key2": "value2",
}
serMetaInfo := pb.SerializableMeta{
ContentType: metadata["content-type"],
UserDefined: map[string]string{
"key1": metadata["key1"],
"key2": metadata["key2"],
},
}
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when putting an object to a bucket with empty name
_, err := layer.PutObject(ctx, "", "", nil, nil)
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when putting an object to a non-existing bucket
_, err = layer.PutObject(ctx, TestBucket, TestFile, nil, nil)
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the bucket using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
// Check the error when putting an object with empty name
_, err = layer.PutObject(ctx, TestBucket, "", nil, nil)
assert.Equal(t, minio.ObjectNameInvalid{Bucket: TestBucket}, err)
// Put the object using the Minio API
info, err := layer.PutObject(ctx, TestBucket, TestFile, data, metadata)
if assert.NoError(t, err) {
assert.Equal(t, TestFile, info.Name)
assert.Equal(t, TestBucket, info.Bucket)
assert.False(t, info.IsDir)
assert.True(t, time.Since(info.ModTime) < 1*time.Minute)
assert.Equal(t, data.Size(), info.Size)
// assert.Equal(t, data.SHA256HexString(), info.ETag) TODO: when we start calculating checksums
assert.Equal(t, serMetaInfo.ContentType, info.ContentType)
assert.Equal(t, serMetaInfo.UserDefined, info.UserDefined)
}
// Check that the object is uploaded using the Metainfo API
obj, err := m.GetObject(ctx, TestBucket, TestFile)
if assert.NoError(t, err) {
assert.Equal(t, TestFile, obj.Path)
assert.Equal(t, TestBucket, obj.Bucket.Name)
assert.False(t, obj.IsPrefix)
assert.Equal(t, info.ModTime, obj.Modified)
assert.Equal(t, info.Size, obj.Size)
assert.Equal(t, info.ETag, hex.EncodeToString(obj.Checksum))
assert.Equal(t, info.ContentType, obj.ContentType)
assert.Equal(t, info.UserDefined, obj.Metadata)
}
})
}
func TestGetObjectInfo(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when getting an object from a bucket with empty name
_, err := layer.GetObjectInfo(ctx, "", "")
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when getting an object from non-existing bucket
_, err = layer.GetObjectInfo(ctx, TestBucket, TestFile)
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the bucket using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
// Check the error when getting an object with empty name
_, err = layer.GetObjectInfo(ctx, TestBucket, "")
assert.Equal(t, minio.ObjectNameInvalid{Bucket: TestBucket}, err)
// Check the error when getting a non-existing object
_, err = layer.GetObjectInfo(ctx, TestBucket, TestFile)
assert.Equal(t, minio.ObjectNotFound{Bucket: TestBucket, Object: TestFile}, err)
// Create the object using the Metainfo API
createInfo := storj.CreateObject{
ContentType: "text/plain",
Metadata: map[string]string{"key1": "value1", "key2": "value2"},
}
obj, err := createFile(ctx, m, strms, TestBucket, TestFile, &createInfo, []byte("test"))
assert.NoError(t, err)
// Get the object info using the Minio API
info, err := layer.GetObjectInfo(ctx, TestBucket, TestFile)
if assert.NoError(t, err) {
assert.Equal(t, TestFile, info.Name)
assert.Equal(t, TestBucket, info.Bucket)
assert.False(t, info.IsDir)
assert.Equal(t, obj.Modified, info.ModTime)
assert.Equal(t, obj.Size, info.Size)
assert.Equal(t, hex.EncodeToString(obj.Checksum), info.ETag)
assert.Equal(t, createInfo.ContentType, info.ContentType)
assert.Equal(t, createInfo.Metadata, info.UserDefined)
}
})
}
func TestGetObject(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when getting an object from a bucket with empty name
err := layer.GetObject(ctx, "", "", 0, 0, nil, "")
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when getting an object from non-existing bucket
err = layer.GetObject(ctx, TestBucket, TestFile, 0, 0, nil, "")
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the bucket using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
// Check the error when getting an object with empty name
err = layer.GetObject(ctx, TestBucket, "", 0, 0, nil, "")
assert.Equal(t, minio.ObjectNameInvalid{Bucket: TestBucket}, err)
// Check the error when getting a non-existing object
err = layer.GetObject(ctx, TestBucket, TestFile, 0, 0, nil, "")
assert.Equal(t, minio.ObjectNotFound{Bucket: TestBucket, Object: TestFile}, err)
// Create the object using the Metainfo API
createInfo := storj.CreateObject{
ContentType: "text/plain",
Metadata: map[string]string{"key1": "value1", "key2": "value2"},
}
_, err = createFile(ctx, m, strms, TestBucket, TestFile, &createInfo, []byte("abcdef"))
assert.NoError(t, err)
for i, tt := range []struct {
offset, length int64
substr string
err error
}{
{offset: 0, length: 0, substr: ""},
{offset: 3, length: 0, substr: ""},
{offset: 0, length: -1, substr: "abcdef"},
{offset: 0, length: 6, substr: "abcdef"},
{offset: 0, length: 5, substr: "abcde"},
{offset: 0, length: 4, substr: "abcd"},
{offset: 1, length: 4, substr: "bcde"},
{offset: 2, length: 4, substr: "cdef"},
{offset: 0, length: 7, substr: "", err: minio.InvalidRange{OffsetBegin: 0, OffsetEnd: 7, ResourceSize: 6}},
{offset: -1, length: 7, substr: "", err: minio.InvalidRange{OffsetBegin: -1, OffsetEnd: 6, ResourceSize: 6}},
{offset: 0, length: -2, substr: "", err: minio.InvalidRange{OffsetBegin: 0, OffsetEnd: -2, ResourceSize: 6}},
} {
errTag := fmt.Sprintf("%d. %+v", i, tt)
var buf bytes.Buffer
// Get the object info using the Minio API
err = layer.GetObject(ctx, TestBucket, TestFile, tt.offset, tt.length, &buf, "")
if tt.err != nil {
assert.Equal(t, tt.err, err, errTag)
} else if assert.NoError(t, err) {
assert.Equal(t, tt.substr, buf.String(), errTag)
}
}
})
}
func TestCopyObject(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when copying an object from a bucket with empty name
_, err := layer.CopyObject(ctx, "", TestFile, DestBucket, DestFile, minio.ObjectInfo{})
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when copying an object from non-existing bucket
_, err = layer.CopyObject(ctx, TestBucket, TestFile, DestBucket, DestFile, minio.ObjectInfo{})
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the source bucket using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
// Check the error when copying an object with empty name
_, err = layer.CopyObject(ctx, TestBucket, "", DestBucket, DestFile, minio.ObjectInfo{})
assert.Equal(t, minio.ObjectNameInvalid{Bucket: TestBucket}, err)
// Create the source object using the Metainfo API
createInfo := storj.CreateObject{
ContentType: "text/plain",
Metadata: map[string]string{"key1": "value1", "key2": "value2"},
}
obj, err := createFile(ctx, m, strms, TestBucket, TestFile, &createInfo, []byte("test"))
assert.NoError(t, err)
// Get the source object info using the Minio API
srcInfo, err := layer.GetObjectInfo(ctx, TestBucket, TestFile)
assert.NoError(t, err)
// Check the error when copying an object to a bucket with empty name
_, err = layer.CopyObject(ctx, TestBucket, TestFile, "", DestFile, srcInfo)
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when copying an object to a non-existing bucket
_, err = layer.CopyObject(ctx, TestBucket, TestFile, DestBucket, DestFile, srcInfo)
assert.Equal(t, minio.BucketNotFound{Bucket: DestBucket}, err)
// Create the destination bucket using the Metainfo API
_, err = m.CreateBucket(ctx, DestBucket, nil)
assert.NoError(t, err)
// Copy the object using the Minio API
info, err := layer.CopyObject(ctx, TestBucket, TestFile, DestBucket, DestFile, srcInfo)
if assert.NoError(t, err) {
assert.Equal(t, DestFile, info.Name)
assert.Equal(t, DestBucket, info.Bucket)
assert.False(t, info.IsDir)
assert.True(t, info.ModTime.Sub(obj.Modified) < 1*time.Minute)
assert.Equal(t, obj.Size, info.Size)
assert.Equal(t, hex.EncodeToString(obj.Checksum), info.ETag)
assert.Equal(t, createInfo.ContentType, info.ContentType)
assert.Equal(t, createInfo.Metadata, info.UserDefined)
}
// Check that the destination object is uploaded using the Metainfo API
obj, err = m.GetObject(ctx, DestBucket, DestFile)
if assert.NoError(t, err) {
assert.Equal(t, DestFile, obj.Path)
assert.Equal(t, DestBucket, obj.Bucket.Name)
assert.False(t, obj.IsPrefix)
assert.Equal(t, info.ModTime, obj.Modified)
assert.Equal(t, info.Size, obj.Size)
assert.Equal(t, info.ETag, hex.EncodeToString(obj.Checksum))
assert.Equal(t, info.ContentType, obj.ContentType)
assert.Equal(t, info.UserDefined, obj.Metadata)
}
})
}
func TestDeleteObject(t *testing.T) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when deleting an object from a bucket with empty name
err := layer.DeleteObject(ctx, "", "")
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when deleting an object from non-existing bucket
err = layer.DeleteObject(ctx, TestBucket, TestFile)
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the bucket using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, nil)
assert.NoError(t, err)
// Check the error when deleting an object with empty name
err = layer.DeleteObject(ctx, TestBucket, "")
assert.Equal(t, minio.ObjectNameInvalid{Bucket: TestBucket}, err)
// Check the error when deleting a non-existing object
err = layer.DeleteObject(ctx, TestBucket, TestFile)
assert.Equal(t, minio.ObjectNotFound{Bucket: TestBucket, Object: TestFile}, err)
// Create the object using the Metainfo API
_, err = createFile(ctx, m, strms, TestBucket, TestFile, nil, nil)
assert.NoError(t, err)
// Delete the object info using the Minio API
err = layer.DeleteObject(ctx, TestBucket, TestFile)
assert.NoError(t, err)
// Check that the object is deleted using the Metainfo API
_, err = m.GetObject(ctx, TestBucket, TestFile)
assert.True(t, storj.ErrObjectNotFound.Has(err))
})
}
func TestListObjects(t *testing.T) {
testListObjects(t, func(ctx context.Context, layer minio.ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int) ([]string, []minio.ObjectInfo, bool, error) {
list, err := layer.ListObjects(ctx, TestBucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return nil, nil, false, err
}
return list.Prefixes, list.Objects, list.IsTruncated, nil
})
}
func TestListObjectsV2(t *testing.T) {
testListObjects(t, func(ctx context.Context, layer minio.ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int) ([]string, []minio.ObjectInfo, bool, error) {
list, err := layer.ListObjectsV2(ctx, TestBucket, prefix, marker, delimiter, maxKeys, false, "")
if err != nil {
return nil, nil, false, err
}
return list.Prefixes, list.Objects, list.IsTruncated, nil
})
}
func testListObjects(t *testing.T, listObjects func(context.Context, minio.ObjectLayer, string, string, string, string, int) ([]string, []minio.ObjectInfo, bool, error)) {
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, m storj.Metainfo, strms streams.Store) {
// Check the error when listing objects with unsupported delimiter
_, err := layer.ListObjects(ctx, TestBucket, "", "", "#", 0)
assert.Equal(t, minio.UnsupportedDelimiter{Delimiter: "#"}, err)
// Check the error when listing objects in a bucket with empty name
_, err = layer.ListObjects(ctx, "", "", "", "/", 0)
assert.Equal(t, minio.BucketNameInvalid{}, err)
// Check the error when listing objects in a non-existing bucket
_, err = layer.ListObjects(ctx, TestBucket, "", "", "", 0)
assert.Equal(t, minio.BucketNotFound{Bucket: TestBucket}, err)
// Create the bucket and files using the Metainfo API
_, err = m.CreateBucket(ctx, TestBucket, &storj.Bucket{PathCipher: storj.EncNull})
assert.NoError(t, err)
filePaths := []string{
"a", "aa", "b", "bb", "c",
"a/xa", "a/xaa", "a/xb", "a/xbb", "a/xc",
"b/ya", "b/yaa", "b/yb", "b/ybb", "b/yc",
}
files := make(map[string]storj.Object, len(filePaths))
createInfo := storj.CreateObject{
ContentType: "text/plain",
Metadata: map[string]string{"key1": "value1", "key2": "value2"},
}
for _, filePath := range filePaths {
file, err := createFile(ctx, m, strms, TestBucket, filePath, &createInfo, []byte("test"))
files[filePath] = file
assert.NoError(t, err)
}
for i, tt := range []struct {
prefix string
marker string
delimiter string
maxKeys int
more bool
prefixes []string
objects []string
}{
{
delimiter: "/",
prefixes: []string{"a/", "b/"},
objects: []string{"a", "aa", "b", "bb", "c"},
}, {
marker: "`",
delimiter: "/",
prefixes: []string{"a/", "b/"},
objects: []string{"a", "aa", "b", "bb", "c"},
}, {
marker: "b",
delimiter: "/",
prefixes: []string{"b/"},
objects: []string{"bb", "c"},
}, {
marker: "c",
delimiter: "/",
}, {
marker: "ca",
delimiter: "/",
}, {
delimiter: "/",
maxKeys: 1,
more: true,
objects: []string{"a"},
}, {
marker: "`",
delimiter: "/",
maxKeys: 1,
more: true,
objects: []string{"a"},
}, {
marker: "aa",
delimiter: "/",
maxKeys: 1,
more: true,
objects: []string{"b"},
}, {
marker: "c",
delimiter: "/",
maxKeys: 1,
}, {
marker: "ca",
delimiter: "/",
maxKeys: 1,
}, {
delimiter: "/",
maxKeys: 2,
more: true,
prefixes: []string{"a/"},
objects: []string{"a"},
}, {
marker: "`",
delimiter: "/",
maxKeys: 2,
more: true,
prefixes: []string{"a/"},
objects: []string{"a"},
}, {
marker: "aa",
delimiter: "/",
maxKeys: 2,
more: true,
prefixes: []string{"b/"},
objects: []string{"b"},
}, {
marker: "bb",
delimiter: "/",
maxKeys: 2,
objects: []string{"c"},
}, {
marker: "c",
delimiter: "/",
maxKeys: 2,
}, {
marker: "ca",
delimiter: "/",
maxKeys: 2,
}, {
objects: []string{"a", "a/xa", "a/xaa", "a/xb", "a/xbb", "a/xc", "aa", "b", "b/ya", "b/yaa", "b/yb", "b/ybb", "b/yc", "bb", "c"},
}, {
prefix: "a",
delimiter: "/",
objects: []string{"xa", "xaa", "xb", "xbb", "xc"},
}, {
prefix: "a/",
delimiter: "/",
objects: []string{"xa", "xaa", "xb", "xbb", "xc"},
}, {
prefix: "a/",
marker: "xb",
delimiter: "/",
objects: []string{"xbb", "xc"},
}, {
marker: "a/xbb",
maxKeys: 5,
more: true,
objects: []string{"a/xc", "aa", "b", "b/ya", "b/yaa"},
}, {
prefix: "a/",
marker: "xaa",
delimiter: "/",
maxKeys: 2,
more: true,
objects: []string{"xb", "xbb"},
},
} {
errTag := fmt.Sprintf("%d. %+v", i, tt)
// Check that the expected objects can be listed using the Minio API
prefixes, objects, isTruncated, err := listObjects(ctx, layer, TestBucket, tt.prefix, tt.marker, tt.delimiter, tt.maxKeys)
if assert.NoError(t, err, errTag) {
assert.Equal(t, tt.more, isTruncated, errTag)
assert.Equal(t, tt.prefixes, prefixes, errTag)
assert.Equal(t, len(tt.objects), len(objects), errTag)
for i, objectInfo := range objects {
path := objectInfo.Name
if tt.prefix != "" {
path = storj.JoinPaths(strings.TrimSuffix(tt.prefix, "/"), path)
}
obj := files[path]
assert.Equal(t, tt.objects[i], objectInfo.Name, errTag)
assert.Equal(t, TestBucket, objectInfo.Bucket, errTag)
assert.False(t, objectInfo.IsDir, errTag)
assert.Equal(t, obj.Modified, objectInfo.ModTime, errTag)
assert.Equal(t, obj.Size, objectInfo.Size, errTag)
assert.Equal(t, hex.EncodeToString(obj.Checksum), objectInfo.ETag, errTag)
assert.Equal(t, obj.ContentType, objectInfo.ContentType, errTag)
assert.Equal(t, obj.Metadata, objectInfo.UserDefined, errTag)
}
}
}
})
}
func runTest(t *testing.T, test func(context.Context, minio.ObjectLayer, storj.Metainfo, streams.Store)) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 4, 1)
if !assert.NoError(t, err) {
return
}
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
layer, m, strms, err := initEnv(ctx, t, planet)
if !assert.NoError(t, err) {
return
}
test(ctx, layer, m, strms)
}
func initEnv(ctx context.Context, t *testing.T, planet *testplanet.Planet) (minio.ObjectLayer, storj.Metainfo, streams.Store, error) {
// TODO(kaloyan): We should have a better way for configuring the Satellite's API Key
// add project to satisfy constraint
project, err := planet.Satellites[0].DB.Console().Projects().Insert(ctx, &console.Project{
Name: "testProject",
})
if err != nil {
return nil, nil, nil, err
}
apiKey, err := macaroon.NewAPIKey([]byte("testSecret"))
if err != nil {
return nil, nil, nil, err
}
apiKeyInfo := console.APIKeyInfo{
ProjectID: project.ID,
Name: "testKey",
Secret: []byte("testSecret"),
}
// add api key to db
_, err = planet.Satellites[0].DB.Console().APIKeys().Create(ctx, apiKey.Head(), apiKeyInfo)
if err != nil {
return nil, nil, nil, err
}
m, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey.Serialize())
if err != nil {
return nil, nil, nil, err
}
2019-06-25 16:36:23 +01:00
// TODO(leak): close m metainfo.Client somehow
ec := ecclient.NewClient(planet.Uplinks[0].Log.Named("ecclient"), planet.Uplinks[0].Transport, 0)
fc, err := infectious.NewFEC(2, 4)
if err != nil {
return nil, nil, nil, err
}
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, 1*memory.KiB.Int()), 3, 4)
if err != nil {
return nil, nil, nil, err
}
segments := segments.NewSegmentStore(m, ec, rs, 4*memory.KiB.Int(), 8*memory.MiB.Int64())
lib/uplink: encryption context (#2349) * lib/uplink: encryption context Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280 * lib/uplink: bucket operation examples Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b * lib/uplink: encryption key sharing test cases Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe * fix encrypted path prefix restriction issue Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554 * implement panics in libuplink encryption code todo on cipher suite selection as well as an api concern Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d * implement GetProjectInfo api call to get salt Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf * some fixes and accessors for encryption store Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718 * general fixes to builds/tests/etc Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47 * java bindings changes Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6 * get libuplink examples passing Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07 * fix proto.lock file Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651 * fix proto.lock again Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9 * fix golint issues Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6 * more linting fixes Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce * bug fixed by structs bump Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a * retrigger Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13 * retrigger Change-Id: I753d63853171e6a436c104ce176048892eb974c5 * semantic merge conflict Change-Id: I9419448496de90340569047a6a16a1b858a7978a * update total to match prod defaults Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b * retrigger Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e * retrigger Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c * retrigger Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
2019-06-27 18:36:51 +01:00
var encKey storj.Key
copy(encKey[:], TestEncKey)
access := libuplink.NewEncryptionAccessWithDefaultKey(encKey)
encStore := access.Store()
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
blockSize := rs.StripeSize()
inlineThreshold := 4 * memory.KiB.Int()
strms, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), encStore, blockSize, storj.EncAESGCM, inlineThreshold)
if err != nil {
return nil, nil, nil, err
}
p, err := kvmetainfo.SetupProject(m)
if err != nil {
return nil, nil, nil, err
}
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
kvm := kvmetainfo.New(p, m, strms, segments, encStore)
cfg := libuplink.Config{}
cfg.Volatile.Log = zaptest.NewLogger(t)
cfg.Volatile.TLS.SkipPeerCAWhitelist = true
uplink, err := libuplink.NewUplink(ctx, &cfg)
if err != nil {
return nil, nil, nil, err
}
parsedAPIKey, err := libuplink.ParseAPIKey(apiKey.Serialize())
if err != nil {
return nil, nil, nil, err
}
proj, err := uplink.OpenProject(ctx, planet.Satellites[0].Addr(), parsedAPIKey)
if err != nil {
return nil, nil, nil, err
}
stripeSize := rs.StripeSize()
gateway := NewStorjGateway(
proj,
access,
storj.EncAESGCM,
storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: int32(stripeSize),
},
storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
RequiredShares: int16(rs.RequiredCount()),
RepairShares: int16(rs.RepairThreshold()),
OptimalShares: int16(rs.OptimalThreshold()),
TotalShares: int16(rs.TotalCount()),
ShareSize: int32(rs.ErasureShareSize()),
},
8*memory.MiB,
)
layer, err := gateway.NewGatewayLayer(auth.Credentials{})
return layer, kvm, strms, err
}
func createFile(ctx context.Context, m storj.Metainfo, strms streams.Store, bucket string, path storj.Path, createInfo *storj.CreateObject, data []byte) (storj.Object, error) {
mutableObject, err := m.CreateObject(ctx, bucket, path, createInfo)
if err != nil {
return storj.Object{}, err
}
err = upload(ctx, strms, mutableObject, bytes.NewReader(data))
if err != nil {
return storj.Object{}, err
}
err = mutableObject.Commit(ctx)
if err != nil {
return storj.Object{}, err
}
return mutableObject.Info(), nil
}