storj/lib/uplink/bucket_attrs_test.go

365 lines
12 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package uplink_test
import (
"bytes"
"io/ioutil"
"testing"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/lib/uplink"
"storj.io/storj/private/testplanet"
)
type testConfig struct {
uplinkCfg uplink.Config
}
func testPlanetWithLibUplink(t *testing.T, cfg testConfig,
testFunc func(*testing.T, *testcontext.Context, *testplanet.Planet, *uplink.Project)) {
2019-04-22 10:07:50 +01:00
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// we only use testUplink for the free API key, until such time
// as testplanet makes it easy to get another way :D
testUplink := planet.Uplinks[0]
satellite := planet.Satellites[0]
apiKey, err := uplink.ParseAPIKey(testUplink.APIKey[satellite.ID()].Serialize())
2019-04-22 10:07:50 +01:00
if err != nil {
t.Fatalf("could not parse API key from testplanet: %v", err)
}
up, err := uplink.NewUplink(ctx, &cfg.uplinkCfg)
2019-04-22 10:07:50 +01:00
if err != nil {
t.Fatalf("could not create new Uplink object: %v", err)
}
defer ctx.Check(up.Close)
proj, err := up.OpenProject(ctx, satellite.Addr(), apiKey)
2019-04-22 10:07:50 +01:00
if err != nil {
t.Fatalf("could not open project from uplink under testplanet: %v", err)
2019-04-22 10:07:50 +01:00
}
defer ctx.Check(proj.Close)
testFunc(t, ctx, planet, proj)
})
}
// check that partner bucket attributes are stored and retrieved correctly.
func TestBucket_PartnerAttribution(t *testing.T) {
var (
access = uplink.NewEncryptionAccessWithDefaultKey(storj.Key{0, 1, 2, 3, 4})
bucketName = "mightynein"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
apikey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[satellite.ID()].Serialize())
require.NoError(t, err)
partnerID := testrand.UUID()
t.Run("without partner id", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.Log = zaptest.NewLogger(t)
config.Volatile.TLS.SkipPeerCAWhitelist = true
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucketInfo, err := project.CreateBucket(ctx, bucketName, nil)
require.NoError(t, err)
assert.True(t, bucketInfo.PartnerID.IsZero())
_, err = project.CreateBucket(ctx, bucketName, nil)
require.Error(t, err)
})
t.Run("open with partner id", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.Log = zaptest.NewLogger(t)
config.Volatile.TLS.SkipPeerCAWhitelist = true
config.Volatile.PartnerID = partnerID.String()
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucket, err := project.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
bucketInfo, _, err := project.GetBucketInfo(ctx, bucketName)
require.NoError(t, err)
assert.Equal(t, bucketInfo.PartnerID.String(), config.Volatile.PartnerID)
})
t.Run("open with different partner id", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.Log = zaptest.NewLogger(t)
config.Volatile.TLS.SkipPeerCAWhitelist = true
config.Volatile.PartnerID = testrand.UUID().String()
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucket, err := project.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
bucketInfo, _, err := project.GetBucketInfo(ctx, bucketName)
require.NoError(t, err)
assert.NotEqual(t, bucketInfo.PartnerID.String(), config.Volatile.PartnerID)
})
})
}
// check that partner bucket attributes are stored and retrieved correctly.
func TestBucket_UserAgent(t *testing.T) {
var (
access = uplink.NewEncryptionAccessWithDefaultKey(storj.Key{0, 1, 2, 3, 4})
bucketName = "mightynein"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
apikey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[satellite.ID()].Serialize())
require.NoError(t, err)
t.Run("without user agent", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.Log = zaptest.NewLogger(t)
config.Volatile.TLS.SkipPeerCAWhitelist = true
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucketInfo, err := project.CreateBucket(ctx, bucketName, nil)
require.NoError(t, err)
assert.True(t, bucketInfo.PartnerID.IsZero())
_, err = project.CreateBucket(ctx, bucketName, nil)
require.Error(t, err)
})
t.Run("open with user agent", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.Log = zaptest.NewLogger(t)
config.Volatile.TLS.SkipPeerCAWhitelist = true
config.Volatile.UserAgent = "Zenko"
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucket, err := project.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
bucketInfo, _, err := project.GetBucketInfo(ctx, bucketName)
require.NoError(t, err)
partnerID, err := uuid.Parse("8cd605fa-ad00-45b6-823e-550eddc611d6")
require.NoError(t, err)
assert.Equal(t, *partnerID, bucketInfo.PartnerID)
})
t.Run("open with different user agent", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.Log = zaptest.NewLogger(t)
config.Volatile.TLS.SkipPeerCAWhitelist = true
config.Volatile.UserAgent = "Temporal"
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucket, err := project.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
bucketInfo, _, err := project.GetBucketInfo(ctx, bucketName)
require.NoError(t, err)
partnerID, err := uuid.Parse("8cd605fa-ad00-45b6-823e-550eddc611d6")
require.NoError(t, err)
assert.Equal(t, *partnerID, bucketInfo.PartnerID)
})
})
}
// check that bucket attributes are stored and retrieved correctly.
func TestBucketAttrs(t *testing.T) {
var (
access = uplink.NewEncryptionAccessWithDefaultKey(storj.Key{0, 1, 2, 3, 4})
bucketName = "mightynein"
shareSize = memory.KiB.Int32()
requiredShares = 2
stripeSize = shareSize * int32(requiredShares)
stripesPerBlock = 2
inBucketConfig = uplink.BucketConfig{
PathCipher: storj.EncSecretBox,
EncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: int32(stripesPerBlock) * stripeSize,
},
Volatile: struct {
RedundancyScheme storj.RedundancyScheme
SegmentsSize memory.Size
}{
RedundancyScheme: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: shareSize,
RequiredShares: int16(requiredShares),
RepairShares: 3,
OptimalShares: 4,
TotalShares: 5,
},
SegmentsSize: 688894,
},
}
)
cfg := testConfig{}
cfg.uplinkCfg.Volatile.TLS.SkipPeerCAWhitelist = true
testPlanetWithLibUplink(t, cfg,
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, proj *uplink.Project) {
before := time.Now()
bucket, err := proj.CreateBucket(ctx, bucketName, &inBucketConfig)
require.NoError(t, err)
assert.Equal(t, bucketName, bucket.Name)
assert.Falsef(t, bucket.Created.Before(before), "impossible creation time %v", bucket.Created)
lib/uplink: encryption context (#2349) * lib/uplink: encryption context Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280 * lib/uplink: bucket operation examples Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b * lib/uplink: encryption key sharing test cases Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe * fix encrypted path prefix restriction issue Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554 * implement panics in libuplink encryption code todo on cipher suite selection as well as an api concern Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d * implement GetProjectInfo api call to get salt Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf * some fixes and accessors for encryption store Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718 * general fixes to builds/tests/etc Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47 * java bindings changes Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6 * get libuplink examples passing Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07 * fix proto.lock file Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651 * fix proto.lock again Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9 * fix golint issues Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6 * more linting fixes Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce * bug fixed by structs bump Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a * retrigger Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13 * retrigger Change-Id: I753d63853171e6a436c104ce176048892eb974c5 * semantic merge conflict Change-Id: I9419448496de90340569047a6a16a1b858a7978a * update total to match prod defaults Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b * retrigger Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e * retrigger Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c * retrigger Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
2019-06-27 18:36:51 +01:00
got, err := proj.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(got.Close)
assert.Equal(t, bucketName, got.Name)
assert.Equal(t, inBucketConfig.PathCipher, got.PathCipher)
assert.Equal(t, inBucketConfig.EncryptionParameters, got.EncryptionParameters)
assert.Equal(t, inBucketConfig.Volatile.RedundancyScheme, got.Volatile.RedundancyScheme)
assert.Equal(t, inBucketConfig.Volatile.SegmentsSize, got.Volatile.SegmentsSize)
assert.Equal(t, inBucketConfig, got.BucketConfig)
err = proj.DeleteBucket(ctx, bucketName)
require.NoError(t, err)
})
}
// check that when uploading objects without any specific RS or encryption
// config, the bucket attributes apply. also when uploading objects _with_ more
// specific config, the specific config applies and not the bucket attrs.
func TestBucketAttrsApply(t *testing.T) {
var (
access = uplink.NewEncryptionAccessWithDefaultKey(storj.Key{0, 1, 2, 3, 4})
bucketName = "dodecahedron"
objectPath1 = "vax/vex/vox"
objectContents = "Willingham,Ray,Jaffe,Johnson,Riegel,O'Brien,Bailey,Mercer"
shareSize = 3 * memory.KiB.Int32()
requiredShares = 3
stripeSize = shareSize * int32(requiredShares)
stripesPerBlock = 2
inBucketConfig = uplink.BucketConfig{
PathCipher: storj.EncSecretBox,
EncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.EncSecretBox,
BlockSize: int32(stripesPerBlock) * stripeSize,
},
Volatile: struct {
RedundancyScheme storj.RedundancyScheme
SegmentsSize memory.Size
}{
RedundancyScheme: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: shareSize,
RequiredShares: int16(requiredShares),
RepairShares: 4,
OptimalShares: 5,
TotalShares: 5,
},
SegmentsSize: 1536,
},
}
testConfig testConfig
)
// so our test object will not be inlined (otherwise it will lose its RS params)
testConfig.uplinkCfg.Volatile.MaxInlineSize = 1
testConfig.uplinkCfg.Volatile.TLS.SkipPeerCAWhitelist = true
testPlanetWithLibUplink(t, testConfig,
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, proj *uplink.Project) {
_, err := proj.CreateBucket(ctx, bucketName, &inBucketConfig)
require.NoError(t, err)
lib/uplink: encryption context (#2349) * lib/uplink: encryption context Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280 * lib/uplink: bucket operation examples Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b * lib/uplink: encryption key sharing test cases Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe * fix encrypted path prefix restriction issue Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554 * implement panics in libuplink encryption code todo on cipher suite selection as well as an api concern Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d * implement GetProjectInfo api call to get salt Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf * some fixes and accessors for encryption store Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718 * general fixes to builds/tests/etc Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47 * java bindings changes Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6 * get libuplink examples passing Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07 * fix proto.lock file Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651 * fix proto.lock again Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9 * fix golint issues Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6 * more linting fixes Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce * bug fixed by structs bump Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a * retrigger Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13 * retrigger Change-Id: I753d63853171e6a436c104ce176048892eb974c5 * semantic merge conflict Change-Id: I9419448496de90340569047a6a16a1b858a7978a * update total to match prod defaults Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b * retrigger Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e * retrigger Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c * retrigger Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
2019-06-27 18:36:51 +01:00
bucket, err := proj.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
{
buf := bytes.NewBufferString(objectContents)
err := bucket.UploadObject(ctx, objectPath1, buf, nil)
require.NoError(t, err)
}
readBack, err := bucket.OpenObject(ctx, objectPath1)
require.NoError(t, err)
defer ctx.Check(readBack.Close)
assert.Equal(t, inBucketConfig.EncryptionParameters, readBack.Meta.Volatile.EncryptionParameters)
assert.Equal(t, inBucketConfig.Volatile.RedundancyScheme, readBack.Meta.Volatile.RedundancyScheme)
assert.Equal(t, inBucketConfig.Volatile.SegmentsSize.Int64(), readBack.Meta.Volatile.SegmentsSize)
strm, err := readBack.DownloadRange(ctx, 0, -1)
require.NoError(t, err)
defer ctx.Check(strm.Close)
contents, err := ioutil.ReadAll(strm)
require.NoError(t, err)
assert.Equal(t, string(contents), objectContents)
})
}