2019-04-10 23:27:04 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package uplink
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"io/ioutil"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
|
|
|
"storj.io/storj/internal/memory"
|
|
|
|
"storj.io/storj/internal/testcontext"
|
|
|
|
"storj.io/storj/internal/testplanet"
|
2019-06-26 17:22:01 +01:00
|
|
|
"storj.io/storj/internal/testrand"
|
2019-04-10 23:27:04 +01:00
|
|
|
"storj.io/storj/pkg/storj"
|
|
|
|
)
|
|
|
|
|
|
|
|
type testConfig struct {
|
|
|
|
uplinkCfg Config
|
|
|
|
}
|
|
|
|
|
2019-06-24 03:06:14 +01:00
|
|
|
func testPlanetWithLibUplink(t *testing.T, cfg testConfig,
|
2019-04-10 23:27:04 +01:00
|
|
|
testFunc func(*testing.T, *testcontext.Context, *testplanet.Planet, *Project)) {
|
2019-04-22 10:07:50 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// we only use testUplink for the free API key, until such time
|
|
|
|
// as testplanet makes it easy to get another way :D
|
|
|
|
testUplink := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
cfg.uplinkCfg.Volatile.TLS.SkipPeerCAWhitelist = true
|
|
|
|
|
|
|
|
apiKey, err := ParseAPIKey(testUplink.APIKey[satellite.ID()])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("could not parse API key from testplanet: %v", err)
|
|
|
|
}
|
|
|
|
uplink, err := NewUplink(ctx, &cfg.uplinkCfg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("could not create new Uplink object: %v", err)
|
|
|
|
}
|
|
|
|
defer ctx.Check(uplink.Close)
|
2019-06-24 03:06:14 +01:00
|
|
|
proj, err := uplink.OpenProject(ctx, satellite.Addr(), apiKey)
|
2019-04-22 10:07:50 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("could not open project from libuplink under testplanet: %v", err)
|
|
|
|
}
|
|
|
|
defer ctx.Check(proj.Close)
|
|
|
|
|
|
|
|
testFunc(t, ctx, planet, proj)
|
|
|
|
})
|
2019-04-10 23:27:04 +01:00
|
|
|
}
|
|
|
|
|
2019-06-27 18:36:51 +01:00
|
|
|
func simpleEncryptionAccess(encKey string) (access *EncryptionCtx) {
|
|
|
|
key, err := storj.NewKey([]byte(encKey))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return NewEncryptionCtxWithDefaultKey(*key)
|
2019-04-10 23:27:04 +01:00
|
|
|
}
|
|
|
|
|
2019-06-26 17:22:01 +01:00
|
|
|
// check that partner bucket attributes are stored and retrieved correctly.
|
|
|
|
func TestPartnerBucketAttrs(t *testing.T) {
|
|
|
|
var (
|
|
|
|
access = simpleEncryptionAccess("voxmachina")
|
|
|
|
bucketName = "mightynein"
|
|
|
|
)
|
|
|
|
|
|
|
|
testPlanetWithLibUplink(t, testConfig{},
|
|
|
|
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, proj *Project) {
|
|
|
|
_, err := proj.CreateBucket(ctx, bucketName, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
partnerID := testrand.UUID().String()
|
|
|
|
|
|
|
|
consoleProjects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
consoleProject := consoleProjects[0]
|
|
|
|
|
|
|
|
db := planet.Satellites[0].DB.Attribution()
|
|
|
|
_, err = db.Get(ctx, consoleProject.ID, []byte(bucketName))
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// partner ID set
|
|
|
|
proj.uplinkCfg.Volatile.PartnerID = partnerID
|
2019-06-27 18:36:51 +01:00
|
|
|
got, err := proj.OpenBucket(ctx, bucketName, access)
|
2019-06-26 17:22:01 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
info, err := db.Get(ctx, consoleProject.ID, []byte(bucketName))
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, info.PartnerID.String(), partnerID)
|
|
|
|
|
|
|
|
// partner ID NOT set
|
|
|
|
proj.uplinkCfg.Volatile.PartnerID = ""
|
2019-06-27 18:36:51 +01:00
|
|
|
got, err = proj.OpenBucket(ctx, bucketName, access)
|
2019-06-26 17:22:01 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(got.Close)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-10 23:27:04 +01:00
|
|
|
// check that bucket attributes are stored and retrieved correctly.
|
|
|
|
func TestBucketAttrs(t *testing.T) {
|
|
|
|
var (
|
2019-06-11 18:14:05 +01:00
|
|
|
access = simpleEncryptionAccess("voxmachina")
|
|
|
|
bucketName = "mightynein"
|
|
|
|
shareSize = memory.KiB.Int32()
|
|
|
|
requiredShares = 2
|
|
|
|
stripeSize = shareSize * int32(requiredShares)
|
|
|
|
stripesPerBlock = 2
|
|
|
|
inBucketConfig = BucketConfig{
|
2019-04-10 23:27:04 +01:00
|
|
|
PathCipher: storj.EncSecretBox,
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.EncAESGCM,
|
2019-06-11 18:14:05 +01:00
|
|
|
BlockSize: int32(stripesPerBlock) * stripeSize,
|
2019-04-10 23:27:04 +01:00
|
|
|
},
|
|
|
|
Volatile: struct {
|
|
|
|
RedundancyScheme storj.RedundancyScheme
|
|
|
|
SegmentsSize memory.Size
|
|
|
|
}{
|
|
|
|
RedundancyScheme: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
2019-06-06 19:55:10 +01:00
|
|
|
ShareSize: shareSize,
|
|
|
|
RequiredShares: int16(requiredShares),
|
2019-04-10 23:27:04 +01:00
|
|
|
RepairShares: 3,
|
|
|
|
OptimalShares: 4,
|
|
|
|
TotalShares: 5,
|
|
|
|
},
|
|
|
|
SegmentsSize: 688894,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2019-06-24 03:06:14 +01:00
|
|
|
testPlanetWithLibUplink(t, testConfig{},
|
2019-04-10 23:27:04 +01:00
|
|
|
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, proj *Project) {
|
|
|
|
before := time.Now()
|
|
|
|
bucket, err := proj.CreateBucket(ctx, bucketName, &inBucketConfig)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
assert.Equal(t, bucketName, bucket.Name)
|
|
|
|
assert.Falsef(t, bucket.Created.Before(before), "impossible creation time %v", bucket.Created)
|
|
|
|
|
2019-06-27 18:36:51 +01:00
|
|
|
got, err := proj.OpenBucket(ctx, bucketName, access)
|
2019-04-10 23:27:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(got.Close)
|
|
|
|
|
|
|
|
assert.Equal(t, bucketName, got.Name)
|
|
|
|
assert.Equal(t, inBucketConfig.PathCipher, got.PathCipher)
|
|
|
|
assert.Equal(t, inBucketConfig.EncryptionParameters, got.EncryptionParameters)
|
|
|
|
assert.Equal(t, inBucketConfig.Volatile.RedundancyScheme, got.Volatile.RedundancyScheme)
|
|
|
|
assert.Equal(t, inBucketConfig.Volatile.SegmentsSize, got.Volatile.SegmentsSize)
|
|
|
|
|
|
|
|
err = proj.DeleteBucket(ctx, bucketName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that when uploading objects without any specific RS or encryption
|
|
|
|
// config, the bucket attributes apply. also when uploading objects _with_ more
|
|
|
|
// specific config, the specific config applies and not the bucket attrs.
|
|
|
|
func TestBucketAttrsApply(t *testing.T) {
|
|
|
|
var (
|
2019-06-11 18:14:05 +01:00
|
|
|
access = simpleEncryptionAccess("howdoyouwanttodothis")
|
|
|
|
bucketName = "dodecahedron"
|
|
|
|
objectPath1 = "vax/vex/vox"
|
|
|
|
objectContents = "Willingham,Ray,Jaffe,Johnson,Riegel,O'Brien,Bailey,Mercer"
|
|
|
|
shareSize = 3 * memory.KiB.Int32()
|
|
|
|
requiredShares = 3
|
|
|
|
stripeSize = shareSize * int32(requiredShares)
|
|
|
|
stripesPerBlock = 2
|
|
|
|
inBucketConfig = BucketConfig{
|
2019-04-10 23:27:04 +01:00
|
|
|
PathCipher: storj.EncSecretBox,
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.EncSecretBox,
|
2019-06-11 18:14:05 +01:00
|
|
|
BlockSize: int32(stripesPerBlock) * stripeSize,
|
2019-04-10 23:27:04 +01:00
|
|
|
},
|
|
|
|
Volatile: struct {
|
|
|
|
RedundancyScheme storj.RedundancyScheme
|
|
|
|
SegmentsSize memory.Size
|
|
|
|
}{
|
|
|
|
RedundancyScheme: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
2019-06-06 19:55:10 +01:00
|
|
|
ShareSize: shareSize,
|
|
|
|
RequiredShares: int16(requiredShares),
|
2019-04-10 23:27:04 +01:00
|
|
|
RepairShares: 4,
|
|
|
|
OptimalShares: 5,
|
|
|
|
TotalShares: 5,
|
|
|
|
},
|
|
|
|
SegmentsSize: 1536,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
testConfig testConfig
|
|
|
|
)
|
|
|
|
// so our test object will not be inlined (otherwise it will lose its RS params)
|
|
|
|
testConfig.uplinkCfg.Volatile.MaxInlineSize = 1
|
|
|
|
|
2019-06-24 03:06:14 +01:00
|
|
|
testPlanetWithLibUplink(t, testConfig,
|
2019-04-10 23:27:04 +01:00
|
|
|
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, proj *Project) {
|
|
|
|
_, err := proj.CreateBucket(ctx, bucketName, &inBucketConfig)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-06-27 18:36:51 +01:00
|
|
|
bucket, err := proj.OpenBucket(ctx, bucketName, access)
|
2019-04-10 23:27:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(bucket.Close)
|
|
|
|
|
|
|
|
{
|
|
|
|
buf := bytes.NewBufferString(objectContents)
|
|
|
|
err := bucket.UploadObject(ctx, objectPath1, buf, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
readBack, err := bucket.OpenObject(ctx, objectPath1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(readBack.Close)
|
|
|
|
|
|
|
|
assert.Equal(t, inBucketConfig.EncryptionParameters, readBack.Meta.Volatile.EncryptionParameters)
|
|
|
|
assert.Equal(t, inBucketConfig.Volatile.RedundancyScheme, readBack.Meta.Volatile.RedundancyScheme)
|
|
|
|
assert.Equal(t, inBucketConfig.Volatile.SegmentsSize.Int64(), readBack.Meta.Volatile.SegmentsSize)
|
|
|
|
|
|
|
|
strm, err := readBack.DownloadRange(ctx, 0, -1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(strm.Close)
|
|
|
|
|
|
|
|
contents, err := ioutil.ReadAll(strm)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, string(contents), objectContents)
|
|
|
|
})
|
|
|
|
}
|