private/testplanet: remove old libuplink from testplanet
Change-Id: Ib1553f84d0b3ae12a5b00382f0f53357b6a273e2
This commit is contained in:
parent
73214c6d1c
commit
84892631c8
@ -36,10 +36,15 @@ func TestSetGetMeta(t *testing.T) {
|
||||
|
||||
// Configure uplink.
|
||||
{
|
||||
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
||||
|
||||
accessString, err := access.Serialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
output, err := exec.Command(uplinkExe,
|
||||
"--config-dir", ctx.Dir("uplink"),
|
||||
"import",
|
||||
planet.Uplinks[0].GetConfig(planet.Satellites[0]).Access,
|
||||
accessString,
|
||||
).CombinedOutput()
|
||||
t.Log(string(output))
|
||||
require.NoError(t, err)
|
||||
|
4
go.mod
4
go.mod
@ -40,9 +40,9 @@ require (
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/tools v0.0.0-20200428211428-0c9eba77bc32 // indirect
|
||||
storj.io/common v0.0.0-20200519144636-6a729faf9037
|
||||
storj.io/common v0.0.0-20200519171747-3ff8acf78c46
|
||||
storj.io/drpc v0.0.12
|
||||
storj.io/monkit-jaeger v0.0.0-20200424180155-d5f5530ea079
|
||||
storj.io/private v0.0.0-20200504130741-565a173f6d33
|
||||
storj.io/uplink v1.0.6-0.20200519150128-ca493a2906a0
|
||||
storj.io/uplink v1.0.6-0.20200525084935-af43d0ef8bb8
|
||||
)
|
||||
|
3
go.sum
3
go.sum
@ -649,6 +649,7 @@ storj.io/common v0.0.0-20200517125204-ceb772d8c054/go.mod h1:hqUDJlDHU1kZuZmfLoh
|
||||
storj.io/common v0.0.0-20200519144636-6a729faf9037 h1:CbUn4bph75bE4icyP8gyw+TcrFxWmu2Xzh0Y8OcP+x0=
|
||||
storj.io/common v0.0.0-20200519144636-6a729faf9037/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ=
|
||||
storj.io/common v0.0.0-20200519171747-3ff8acf78c46 h1:Yx73D928PKtyQYPXHuQ5WFES4t+0nufxbhwyf8VodMw=
|
||||
storj.io/common v0.0.0-20200519171747-3ff8acf78c46/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ=
|
||||
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw=
|
||||
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw=
|
||||
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
||||
@ -663,3 +664,5 @@ storj.io/uplink v1.0.5 h1:RH6LUZQPOJZ01JpM0YmghDu5xyex5gyn32NSCzsPSr4=
|
||||
storj.io/uplink v1.0.5/go.mod h1:GkChEUgHFuUR2WNpqFw3NeqglXz6/zp6n5Rxt0IVfHw=
|
||||
storj.io/uplink v1.0.6-0.20200519150128-ca493a2906a0 h1:0YfZBEmrl9AuIkIMfN2414vQB0qy5t9heFxsvKZxvEA=
|
||||
storj.io/uplink v1.0.6-0.20200519150128-ca493a2906a0/go.mod h1:E4yRIKl1Py+DK/BKp6hDM2uyEeJpP0tF981kmY7ke8Y=
|
||||
storj.io/uplink v1.0.6-0.20200525084935-af43d0ef8bb8 h1:AwcaTEDrZ+2qmnBvEwsIsDuo/KFa11pWGncp1+cOFaY=
|
||||
storj.io/uplink v1.0.6-0.20200525084935-af43d0ef8bb8/go.mod h1:F+AeZR4l9F9A7K1K+GXJJarwaKOQK0RWmbyy/maMFEI=
|
||||
|
@ -36,8 +36,12 @@ func TestAllowedPathPrefixListing(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(up.Close)
|
||||
|
||||
uplinkConfig := testUplink.GetConfig(testSatellite)
|
||||
access, err := uplinkConfig.GetAccess()
|
||||
newAccess := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
||||
|
||||
serializedNewAccess, err := newAccess.Serialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
access, err := uplink.ParseScope(serializedNewAccess)
|
||||
require.NoError(t, err)
|
||||
|
||||
encryptionAccess := access.EncryptionAccess
|
||||
@ -99,8 +103,12 @@ func TestUploadNotAllowedPath(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(up.Close)
|
||||
|
||||
uplinkConfig := testUplink.GetConfig(testSatellite)
|
||||
access, err := uplinkConfig.GetAccess()
|
||||
newAccess := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
||||
|
||||
serializedNewAccess, err := newAccess.Serialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
access, err := uplink.ParseScope(serializedNewAccess)
|
||||
require.NoError(t, err)
|
||||
|
||||
encryptionAccess := access.EncryptionAccess
|
||||
|
@ -31,7 +31,12 @@ func TestProjectListBuckets(t *testing.T) {
|
||||
cfg.Volatile.Log = zaptest.NewLogger(t)
|
||||
cfg.Volatile.TLS.SkipPeerCAWhitelist = true
|
||||
|
||||
access, err := planet.Uplinks[0].GetConfig(planet.Satellites[0]).GetAccess()
|
||||
newAccess := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
||||
|
||||
serializedNewAccess, err := newAccess.Serialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
access, err := uplink.ParseScope(serializedNewAccess)
|
||||
require.NoError(t, err)
|
||||
|
||||
ul, err := uplink.NewUplink(ctx, &cfg)
|
||||
@ -128,7 +133,9 @@ func TestProjectOpenNewBucket(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// download uploaded file with old libuplink
|
||||
oldUplink, err := planet.Uplinks[0].NewLibuplink(ctx)
|
||||
config := uplink.Config{}
|
||||
config.Volatile.TLS.SkipPeerCAWhitelist = true
|
||||
oldUplink, err := uplink.NewUplink(ctx, &config)
|
||||
require.NoError(t, err)
|
||||
|
||||
scope, err := uplink.ParseScope(serializedAccess)
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/identity/testidentity"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/storj/satellite"
|
||||
@ -50,6 +51,15 @@ var ShortenOnlineWindow = Reconfigure{
|
||||
},
|
||||
}
|
||||
|
||||
// Combine combines satellite reconfigure functions.
|
||||
var Combine = func(elements ...func(log *zap.Logger, index int, config *satellite.Config)) func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
return func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
for _, f := range elements {
|
||||
f(log, index, config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReconfigureRS returns function to change satellite redundancy scheme values
|
||||
var ReconfigureRS = func(minThreshold, repairThreshold, successThreshold, totalThreshold int) func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
return func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
@ -59,3 +69,10 @@ var ReconfigureRS = func(minThreshold, repairThreshold, successThreshold, totalT
|
||||
config.Metainfo.RS.TotalThreshold = totalThreshold
|
||||
}
|
||||
}
|
||||
|
||||
// MaxSegmentSize returns function to change satellite max segment size value.
|
||||
var MaxSegmentSize = func(maxSegmentSize memory.Size) func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
return func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.MaxSegmentSize = maxSegmentSize
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
package testplanet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/zap/zaptest"
|
||||
@ -11,6 +12,7 @@ import (
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/private/dbutil/pgtest"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Run runs testplanet in multiple configurations.
|
||||
@ -54,7 +56,22 @@ func Run(t *testing.T, config Config, test func(t *testing.T, ctx *testcontext.C
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
provisionUplinks(ctx, t, planet)
|
||||
|
||||
test(t, ctx, planet)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func provisionUplinks(ctx context.Context, t *testing.T, planet *Planet) {
|
||||
for _, planetUplink := range planet.Uplinks {
|
||||
for _, satellite := range planet.Satellites {
|
||||
apiKey := planetUplink.APIKey[satellite.ID()]
|
||||
access, err := uplink.RequestAccessWithPassphrase(ctx, satellite.URL(), apiKey.Serialize(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
planetUplink.Access[satellite.ID()] = access
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -797,3 +797,11 @@ func (planet *Planet) newGarbageCollection(count int, identity *identity.FullIde
|
||||
planet.databases = append(planet.databases, revocationDB)
|
||||
return satellite.NewGarbageCollection(log, identity, db, pointerDB, revocationDB, versionInfo, &config, nil)
|
||||
}
|
||||
|
||||
// atLeastOne returns 1 if value < 1, or value otherwise.
|
||||
func atLeastOne(value int) int {
|
||||
if value < 1 {
|
||||
return 1
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
@ -22,11 +21,10 @@ import (
|
||||
"storj.io/common/rpc"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/private/cfgstruct"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/private/metainfo"
|
||||
"storj.io/uplink/private/piecestore"
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
// Uplink is a general purpose
|
||||
@ -36,6 +34,7 @@ type Uplink struct {
|
||||
Dialer rpc.Dialer
|
||||
|
||||
APIKey map[storj.NodeID]*macaroon.APIKey
|
||||
Access map[storj.NodeID]*uplink.Access
|
||||
|
||||
// Projects is indexed by the satellite number.
|
||||
Projects []*Project
|
||||
@ -95,15 +94,16 @@ func (planet *Planet) newUplink(name string) (*Uplink, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uplink := &Uplink{
|
||||
planetUplink := &Uplink{
|
||||
Log: planet.log.Named(name),
|
||||
Identity: identity,
|
||||
APIKey: map[storj.NodeID]*macaroon.APIKey{},
|
||||
Access: map[storj.NodeID]*uplink.Access{},
|
||||
}
|
||||
|
||||
uplink.Log.Debug("id=" + identity.ID.String())
|
||||
planetUplink.Log.Debug("id=" + identity.ID.String())
|
||||
|
||||
uplink.Dialer = rpc.NewDefaultDialer(tlsOptions)
|
||||
planetUplink.Dialer = rpc.NewDefaultDialer(tlsOptions)
|
||||
|
||||
for j, satellite := range planet.Satellites {
|
||||
console := satellite.API.Console
|
||||
@ -133,10 +133,10 @@ func (planet *Planet) newUplink(name string) (*Uplink, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uplink.APIKey[satellite.ID()] = apiKey
|
||||
planetUplink.APIKey[satellite.ID()] = apiKey
|
||||
|
||||
uplink.Projects = append(uplink.Projects, &Project{
|
||||
client: uplink,
|
||||
planetUplink.Projects = append(planetUplink.Projects, &Project{
|
||||
client: planetUplink,
|
||||
|
||||
ID: project.ID,
|
||||
Owner: ProjectOwner{
|
||||
@ -151,9 +151,9 @@ func (planet *Planet) newUplink(name string) (*Uplink, error) {
|
||||
})
|
||||
}
|
||||
|
||||
planet.uplinks = append(planet.uplinks, uplink)
|
||||
planet.Uplinks = append(planet.Uplinks, planetUplink)
|
||||
|
||||
return uplink, nil
|
||||
return planetUplink, nil
|
||||
}
|
||||
|
||||
// ID returns uplink id
|
||||
@ -182,7 +182,12 @@ func (client *Uplink) Upload(ctx context.Context, satellite *Satellite, bucket s
|
||||
|
||||
// UploadWithExpiration data to specific satellite and expiration time
|
||||
func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, data []byte, expiration time.Time) error {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
_, found := testuplink.GetMaxSegmentSize(ctx)
|
||||
if !found {
|
||||
ctx = testuplink.WithMaxSegmentSize(ctx, satellite.Config.Metainfo.MaxSegmentSize)
|
||||
}
|
||||
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -210,29 +215,9 @@ func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satel
|
||||
return upload.Commit()
|
||||
}
|
||||
|
||||
// UploadWithClientConfig uploads data to specific satellite with custom client configuration
|
||||
func (client *Uplink) UploadWithClientConfig(ctx context.Context, satellite *Satellite, clientConfig UplinkConfig, bucketName string, path storj.Path, data []byte) (err error) {
|
||||
project, bucket, err := client.GetProjectAndBucket(ctx, satellite, bucketName, clientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, bucket.Close(), project.Close()) }()
|
||||
|
||||
opts := &libuplink.UploadOptions{}
|
||||
opts.Volatile.RedundancyScheme = clientConfig.GetRedundancyScheme()
|
||||
opts.Volatile.EncryptionParameters = clientConfig.GetEncryptionParameters()
|
||||
|
||||
reader := bytes.NewReader(data)
|
||||
if err := bucket.UploadObject(ctx, path, reader, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download data from specific satellite
|
||||
func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) ([]byte, error) {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -253,7 +238,7 @@ func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucket
|
||||
|
||||
// DownloadStream returns stream for downloading data
|
||||
func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) (_ io.ReadCloser, cleanup func() error, err error) {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -271,7 +256,7 @@ func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite,
|
||||
|
||||
// DownloadStreamRange returns stream for downloading data
|
||||
func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, start, limit int64) (_ io.ReadCloser, cleanup func() error, err error) {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -292,7 +277,7 @@ func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satell
|
||||
|
||||
// DeleteObject deletes an object at the path in a bucket
|
||||
func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) error {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -307,7 +292,7 @@ func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bu
|
||||
|
||||
// CreateBucket creates a new bucket
|
||||
func (client *Uplink) CreateBucket(ctx context.Context, satellite *Satellite, bucketName string) error {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -322,7 +307,7 @@ func (client *Uplink) CreateBucket(ctx context.Context, satellite *Satellite, bu
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
func (client *Uplink) DeleteBucket(ctx context.Context, satellite *Satellite, bucketName string) error {
|
||||
project, err := client.GetNewProject(ctx, satellite)
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -335,115 +320,9 @@ func (client *Uplink) DeleteBucket(ctx context.Context, satellite *Satellite, bu
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig returns a default config for a given satellite.
|
||||
func (client *Uplink) GetConfig(satellite *Satellite) UplinkConfig {
|
||||
config := getDefaultConfig()
|
||||
|
||||
// client.APIKey[satellite.ID()] is a *macaroon.APIKey, but we want a
|
||||
// *libuplink.APIKey, so, serialize and parse for now
|
||||
apiKey, err := libuplink.ParseAPIKey(client.APIKey[satellite.ID()].Serialize())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
encAccess := libuplink.NewEncryptionAccess()
|
||||
encAccess.SetDefaultKey(storj.Key{})
|
||||
encAccess.SetDefaultPathCipher(storj.EncAESGCM)
|
||||
|
||||
accessData, err := (&libuplink.Scope{
|
||||
SatelliteAddr: satellite.URL(),
|
||||
APIKey: apiKey,
|
||||
EncryptionAccess: encAccess,
|
||||
}).Serialize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
config.Access = accessData
|
||||
|
||||
// Support some legacy stuff
|
||||
config.Legacy.Client.APIKey = apiKey.Serialize()
|
||||
config.Legacy.Client.SatelliteAddr = satellite.Addr()
|
||||
|
||||
config.Client.DialTimeout = 10 * time.Second
|
||||
|
||||
config.RS.MinThreshold = satellite.Config.Metainfo.RS.MinThreshold
|
||||
config.RS.RepairThreshold = satellite.Config.Metainfo.RS.RepairThreshold
|
||||
config.RS.SuccessThreshold = satellite.Config.Metainfo.RS.SuccessThreshold
|
||||
config.RS.MaxThreshold = satellite.Config.Metainfo.RS.MaxTotalThreshold
|
||||
|
||||
config.TLS.UsePeerCAWhitelist = false
|
||||
config.TLS.Extensions.Revocation = false
|
||||
config.TLS.Extensions.WhitelistSignedLeaf = false
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func getDefaultConfig() UplinkConfig {
|
||||
config := UplinkConfig{}
|
||||
cfgstruct.Bind(&pflag.FlagSet{}, &config, cfgstruct.UseDevDefaults())
|
||||
return config
|
||||
}
|
||||
|
||||
// atLeastOne returns 1 if value < 1, or value otherwise.
|
||||
func atLeastOne(value int) int {
|
||||
if value < 1 {
|
||||
return 1
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// NewLibuplink creates a libuplink.Uplink object with the testplanet Uplink config
|
||||
func (client *Uplink) NewLibuplink(ctx context.Context) (*libuplink.Uplink, error) {
|
||||
config := getDefaultConfig()
|
||||
libuplinkCfg := &libuplink.Config{}
|
||||
libuplinkCfg.Volatile.Log = client.Log
|
||||
libuplinkCfg.Volatile.MaxInlineSize = config.Client.MaxInlineSize
|
||||
libuplinkCfg.Volatile.MaxMemory = config.RS.MaxBufferMem
|
||||
libuplinkCfg.Volatile.PeerIDVersion = config.TLS.PeerIDVersions
|
||||
libuplinkCfg.Volatile.TLS.SkipPeerCAWhitelist = !config.TLS.UsePeerCAWhitelist
|
||||
libuplinkCfg.Volatile.TLS.PeerCAWhitelistPath = config.TLS.PeerCAWhitelistPath
|
||||
libuplinkCfg.Volatile.DialTimeout = config.Client.DialTimeout
|
||||
|
||||
return libuplink.NewUplink(ctx, libuplinkCfg)
|
||||
}
|
||||
|
||||
// GetProject returns a libuplink.Project which allows interactions with a specific project
|
||||
func (client *Uplink) GetProject(ctx context.Context, satellite *Satellite) (*libuplink.Project, error) {
|
||||
testLibuplink, err := client.NewLibuplink(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, testLibuplink.Close()) }()
|
||||
|
||||
access, err := client.GetConfig(satellite).GetAccess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
project, err := testLibuplink.OpenProject(ctx, access.SatelliteAddr, access.APIKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return project, nil
|
||||
}
|
||||
|
||||
// GetNewProject returns a uplink.Project which allows interactions with a specific project
|
||||
func (client *Uplink) GetNewProject(ctx context.Context, satellite *Satellite) (*uplink.Project, error) {
|
||||
oldAccess, err := client.GetConfig(satellite).GetAccess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedOldAccess, err := oldAccess.Serialize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
access, err := uplink.ParseAccess(serializedOldAccess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// GetProject returns a uplink.Project which allows interactions with a specific project.
|
||||
func (client *Uplink) GetProject(ctx context.Context, satellite *Satellite) (*uplink.Project, error) {
|
||||
access := client.Access[satellite.ID()]
|
||||
|
||||
project, err := uplink.OpenProject(ctx, access)
|
||||
if err != nil {
|
||||
@ -451,54 +330,3 @@ func (client *Uplink) GetNewProject(ctx context.Context, satellite *Satellite) (
|
||||
}
|
||||
return project, nil
|
||||
}
|
||||
|
||||
// GetProjectAndBucket returns a libuplink.Project and Bucket which allows interactions with a specific project and its buckets
|
||||
func (client *Uplink) GetProjectAndBucket(ctx context.Context, satellite *Satellite, bucketName string, clientCfg UplinkConfig) (_ *libuplink.Project, _ *libuplink.Bucket, err error) {
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = errs.Combine(err, project.Close())
|
||||
}
|
||||
}()
|
||||
|
||||
access, err := client.GetConfig(satellite).GetAccess()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Check if the bucket exists, if not then create it
|
||||
_, _, err = project.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
if storj.ErrBucketNotFound.Has(err) {
|
||||
err := createBucket(ctx, clientCfg, *project, bucketName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bucket, err := project.OpenBucket(ctx, bucketName, access.EncryptionAccess)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return project, bucket, nil
|
||||
}
|
||||
|
||||
func createBucket(ctx context.Context, config UplinkConfig, project libuplink.Project, bucketName string) error {
|
||||
bucketCfg := &libuplink.BucketConfig{}
|
||||
bucketCfg.PathCipher = config.GetPathCipherSuite()
|
||||
bucketCfg.EncryptionParameters = config.GetEncryptionParameters()
|
||||
bucketCfg.Volatile.RedundancyScheme = config.GetRedundancyScheme()
|
||||
bucketCfg.Volatile.SegmentsSize = config.GetSegmentSize()
|
||||
|
||||
_, err := project.CreateBucket(ctx, bucketName, bucketCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1,223 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package testplanet
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/peertls/tlsopts"
|
||||
"storj.io/common/storj"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// RSConfig is a configuration struct that keeps details about default
|
||||
// redundancy strategy information
|
||||
type RSConfig struct {
|
||||
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4MiB" hidden:"true"`
|
||||
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B" hidden:"true"`
|
||||
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4" hidden:"true"`
|
||||
RepairThreshold int `help:"the minimum safe pieces before a repair is triggered. m." releaseDefault:"35" devDefault:"6" hidden:"true"`
|
||||
SuccessThreshold int `help:"the desired total pieces for a segment. o." releaseDefault:"80" devDefault:"8" hidden:"true"`
|
||||
MaxThreshold int `help:"the largest amount of pieces to encode to. n." releaseDefault:"110" devDefault:"10" hidden:"true"`
|
||||
}
|
||||
|
||||
// EncryptionConfig is a configuration struct that keeps details about
|
||||
// encrypting segments
|
||||
type EncryptionConfig struct {
|
||||
DataType int `help:"Type of encryption to use for content and metadata (2=AES-GCM, 3=SecretBox)" default:"2"`
|
||||
PathType int `help:"Type of encryption to use for paths (1=Unencrypted, 2=AES-GCM, 3=SecretBox)" default:"2"`
|
||||
}
|
||||
|
||||
// ClientConfig is a configuration struct for the uplink that controls how
|
||||
// to talk to the rest of the network.
|
||||
type ClientConfig struct {
|
||||
MaxInlineSize memory.Size `help:"max inline segment size in bytes" default:"4KiB"`
|
||||
SegmentSize memory.Size `help:"the size of a segment in bytes" default:"64MiB"`
|
||||
DialTimeout time.Duration `help:"timeout for dials" default:"0h2m00s"`
|
||||
}
|
||||
|
||||
// UplinkConfig uplink configuration
|
||||
type UplinkConfig struct {
|
||||
AccessConfig
|
||||
Client ClientConfig
|
||||
RS RSConfig
|
||||
Enc EncryptionConfig
|
||||
TLS tlsopts.Config
|
||||
}
|
||||
|
||||
// AccessConfig holds information about which accesses exist and are selected.
|
||||
type AccessConfig struct {
|
||||
Accesses map[string]string `internal:"true"`
|
||||
Access string `help:"the serialized access, or name of the access to use" default:"" basic-help:"true"`
|
||||
|
||||
// used for backward compatibility
|
||||
Scopes map[string]string `internal:"true"` // deprecated
|
||||
Scope string `internal:"true"` // deprecated
|
||||
|
||||
Legacy // Holds on to legacy configuration values
|
||||
}
|
||||
|
||||
// Legacy holds deprecated configuration values
|
||||
type Legacy struct {
|
||||
Client struct {
|
||||
APIKey string `default:"" help:"the api key to use for the satellite (deprecated)" noprefix:"true" deprecated:"true"`
|
||||
SatelliteAddr string `releaseDefault:"127.0.0.1:7777" devDefault:"127.0.0.1:10000" help:"the address to use for the satellite (deprecated)" noprefix:"true"`
|
||||
}
|
||||
Enc struct {
|
||||
EncryptionKey string `help:"the root key for encrypting the data which will be stored in KeyFilePath (deprecated)" setup:"true" deprecated:"true"`
|
||||
KeyFilepath string `help:"the path to the file which contains the root key for encrypting the data (deprecated)" deprecated:"true"`
|
||||
EncAccessFilepath string `help:"the path to a file containing a serialized encryption access (deprecated)" deprecated:"true"`
|
||||
}
|
||||
}
|
||||
|
||||
// normalize looks for usage of deprecated config values and sets the respective
|
||||
// non-deprecated config values accordingly and returns them in a copy of the config.
|
||||
func (a AccessConfig) normalize() (_ AccessConfig) {
|
||||
// fallback to scope if access not found
|
||||
if a.Access == "" {
|
||||
a.Access = a.Scope
|
||||
}
|
||||
|
||||
if a.Accesses == nil {
|
||||
a.Accesses = make(map[string]string)
|
||||
}
|
||||
|
||||
// fallback to scopes if accesses not found
|
||||
if len(a.Accesses) == 0 {
|
||||
for name, access := range a.Scopes {
|
||||
a.Accesses[name] = access
|
||||
}
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// GetAccess returns the appropriate access for the config.
|
||||
func (a AccessConfig) GetAccess() (_ *libuplink.Scope, err error) {
|
||||
a = a.normalize()
|
||||
|
||||
access, err := a.GetNamedAccess(a.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if access != nil {
|
||||
return access, nil
|
||||
}
|
||||
|
||||
// Otherwise, try to load the access name as a serialized access.
|
||||
if access, err := libuplink.ParseScope(a.Access); err == nil {
|
||||
return access, nil
|
||||
}
|
||||
|
||||
// fall back to trying to load the legacy values.
|
||||
apiKey, err := libuplink.ParseAPIKey(a.Legacy.Client.APIKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
satelliteAddr := a.Legacy.Client.SatelliteAddr
|
||||
if satelliteAddr == "" {
|
||||
return nil, errs.New("must specify a satellite address")
|
||||
}
|
||||
|
||||
var encAccess *libuplink.EncryptionAccess
|
||||
if a.Legacy.Enc.EncAccessFilepath != "" {
|
||||
data, err := ioutil.ReadFile(a.Legacy.Enc.EncAccessFilepath)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
encAccess, err = libuplink.ParseEncryptionAccess(strings.TrimSpace(string(data)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
data := []byte(a.Legacy.Enc.EncryptionKey)
|
||||
if a.Legacy.Enc.KeyFilepath != "" {
|
||||
data, err = ioutil.ReadFile(a.Legacy.Enc.KeyFilepath)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
key, err := storj.NewKey(data)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
encAccess = libuplink.NewEncryptionAccessWithDefaultKey(*key)
|
||||
encAccess.SetDefaultPathCipher(storj.EncAESGCM)
|
||||
}
|
||||
|
||||
return &libuplink.Scope{
|
||||
APIKey: apiKey,
|
||||
SatelliteAddr: satelliteAddr,
|
||||
EncryptionAccess: encAccess,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetNewAccess returns the appropriate access for the config.
|
||||
func (a AccessConfig) GetNewAccess() (_ *uplink.Access, err error) {
|
||||
oldAccess, err := a.GetAccess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedOldAccess, err := oldAccess.Serialize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
access, err := uplink.ParseAccess(serializedOldAccess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return access, nil
|
||||
}
|
||||
|
||||
// GetNamedAccess returns named access if exists.
|
||||
func (a AccessConfig) GetNamedAccess(name string) (_ *libuplink.Scope, err error) {
|
||||
// if an access exists for that name, try to load it.
|
||||
if data, ok := a.Accesses[name]; ok {
|
||||
return libuplink.ParseScope(data)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetRedundancyScheme returns the configured redundancy scheme for new uploads
|
||||
func (c UplinkConfig) GetRedundancyScheme() storj.RedundancyScheme {
|
||||
return storj.RedundancyScheme{
|
||||
Algorithm: storj.ReedSolomon,
|
||||
ShareSize: c.RS.ErasureShareSize.Int32(),
|
||||
RequiredShares: int16(c.RS.MinThreshold),
|
||||
RepairShares: int16(c.RS.RepairThreshold),
|
||||
OptimalShares: int16(c.RS.SuccessThreshold),
|
||||
TotalShares: int16(c.RS.MaxThreshold),
|
||||
}
|
||||
}
|
||||
|
||||
// GetPathCipherSuite returns the cipher suite used for path encryption for bucket objects
|
||||
func (c UplinkConfig) GetPathCipherSuite() storj.CipherSuite {
|
||||
return storj.CipherSuite(c.Enc.PathType)
|
||||
}
|
||||
|
||||
// GetEncryptionParameters returns the configured encryption scheme for new uploads
|
||||
// Blocksize should align with the stripe size therefore multiples of stripes
|
||||
// should fit in every encryption block. Instead of lettings users configure this
|
||||
// multiple value, we hardcode stripesPerBlock as 2 for simplicity.
|
||||
func (c UplinkConfig) GetEncryptionParameters() storj.EncryptionParameters {
|
||||
const stripesPerBlock = 2
|
||||
return storj.EncryptionParameters{
|
||||
CipherSuite: storj.CipherSuite(c.Enc.DataType),
|
||||
BlockSize: c.GetRedundancyScheme().StripeSize() * stripesPerBlock,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSegmentSize returns the segment size set in uplink config
|
||||
func (c UplinkConfig) GetSegmentSize() memory.Size {
|
||||
return c.Client.SegmentSize
|
||||
}
|
@ -242,12 +242,13 @@ func TestDownloadFromUnresponsiveNode(t *testing.T) {
|
||||
func TestDeleteWithOfflineStoragenode(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: testplanet.MaxSegmentSize(1 * memory.MiB),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
expectedData := testrand.Bytes(5 * memory.MiB)
|
||||
|
||||
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
|
||||
config.Client.SegmentSize = 1 * memory.MiB
|
||||
err := planet.Uplinks[0].UploadWithClientConfig(ctx, planet.Satellites[0], config, "test-bucket", "test-file", expectedData)
|
||||
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test-bucket", "test-file", expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, node := range planet.StorageNodes {
|
||||
|
@ -389,6 +389,9 @@ func TestBilling_ZombieSegments(t *testing.T) {
|
||||
t.Skip("Zombie segments do get billed. Wait for resolution of SM-592")
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: testplanet.MaxSegmentSize(5 * memory.KiB),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
const (
|
||||
bucketName = "a-bucket"
|
||||
@ -403,10 +406,7 @@ func TestBilling_ZombieSegments(t *testing.T) {
|
||||
uplnk := planet.Uplinks[0]
|
||||
{
|
||||
data := testrand.Bytes(10 * memory.KiB)
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 5 * memory.KiB,
|
||||
}}, bucketName, objectKey, data)
|
||||
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectKey, data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -122,9 +122,6 @@ func TestCalculateNodeAtRestData(t *testing.T) {
|
||||
// Setup: create 50KiB of data for the uplink to upload
|
||||
expectedData := testrand.Bytes(50 * memory.KiB)
|
||||
|
||||
// Setup: get the expected size of the data that will be stored in pointer
|
||||
uplinkConfig := uplink.GetConfig(planet.Satellites[0])
|
||||
|
||||
// TODO uplink currently hardcode block size so we need to use the same value in test
|
||||
encryptionParameters := storj.EncryptionParameters{
|
||||
CipherSuite: storj.EncAESGCM,
|
||||
@ -143,9 +140,9 @@ func TestCalculateNodeAtRestData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Confirm the correct number of shares were stored
|
||||
uplinkRS := uplinkConfig.GetRedundancyScheme()
|
||||
if !correctRedundencyScheme(len(obs.Node), uplinkRS) {
|
||||
t.Fatalf("expected between: %d and %d, actual: %d", uplinkRS.RepairShares, uplinkRS.TotalShares, len(obs.Node))
|
||||
rs := satelliteRS(planet.Satellites[0])
|
||||
if !correctRedundencyScheme(len(obs.Node), rs) {
|
||||
t.Fatalf("expected between: %d and %d, actual: %d", rs.RepairShares, rs.TotalShares, len(obs.Node))
|
||||
}
|
||||
|
||||
// Confirm the correct number of bytes were stored on each node
|
||||
@ -175,7 +172,7 @@ func TestCalculateBucketAtRestData(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellitePeer := planet.Satellites[0]
|
||||
redundancyScheme := planet.Uplinks[0].GetConfig(satellitePeer).GetRedundancyScheme()
|
||||
redundancyScheme := satelliteRS(satellitePeer)
|
||||
expectedBucketTallies := make(map[string]*accounting.BucketTally)
|
||||
for _, tt := range testCases {
|
||||
tt := tt // avoid scopelint error
|
||||
@ -211,7 +208,7 @@ func TestTallyIgnoresExpiredPointers(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellitePeer := planet.Satellites[0]
|
||||
redundancyScheme := planet.Uplinks[0].GetConfig(satellitePeer).GetRedundancyScheme()
|
||||
redundancyScheme := satelliteRS(satellitePeer)
|
||||
|
||||
project := "9656af6e-2d9c-42fa-91f2-bfd516a722d7"
|
||||
bucket := "bucket"
|
||||
@ -399,3 +396,12 @@ func correctRedundencyScheme(shareCount int, uplinkRS storj.RedundancyScheme) bo
|
||||
// TotalShares is the number of shares to encode
|
||||
return int(uplinkRS.RepairShares) <= shareCount && shareCount <= int(uplinkRS.TotalShares)
|
||||
}
|
||||
|
||||
func satelliteRS(satellite *testplanet.Satellite) storj.RedundancyScheme {
|
||||
return storj.RedundancyScheme{
|
||||
RequiredShares: int16(satellite.Config.Metainfo.RS.MinThreshold),
|
||||
RepairShares: int16(satellite.Config.Metainfo.RS.RepairThreshold),
|
||||
OptimalShares: int16(satellite.Config.Metainfo.RS.SuccessThreshold),
|
||||
TotalShares: int16(satellite.Config.Metainfo.RS.TotalThreshold),
|
||||
}
|
||||
}
|
||||
|
@ -11,9 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/encryption"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/paths"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
@ -22,6 +20,7 @@ import (
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/audit"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
"storj.io/uplink/private/storage/meta"
|
||||
)
|
||||
|
||||
// TestDisqualificationTooManyFailedAudits does the following:
|
||||
@ -128,14 +127,11 @@ func TestDisqualifiedNodesGetNoDownload(t *testing.T) {
|
||||
|
||||
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
|
||||
|
||||
encParameters := uplinkPeer.GetConfig(satellitePeer).GetEncryptionParameters()
|
||||
cipherSuite := encParameters.CipherSuite
|
||||
store := encryption.NewStore()
|
||||
store.SetDefaultKey(new(storj.Key))
|
||||
encryptedPath, err := encryption.EncryptPath("testbucket", paths.NewUnencrypted("test/path"), cipherSuite, store)
|
||||
items, _, err := satellitePeer.Metainfo.Service.List(ctx, "", "", true, 10, meta.All)
|
||||
require.NoError(t, err)
|
||||
lastSegPath := storj.JoinPaths(projects[0].ID.String(), "l", "testbucket", encryptedPath.Raw())
|
||||
pointer, err := satellitePeer.Metainfo.Service.Get(ctx, lastSegPath)
|
||||
require.Equal(t, 1, len(items))
|
||||
|
||||
pointer, err := satellitePeer.Metainfo.Service.Get(ctx, items[0].Path)
|
||||
require.NoError(t, err)
|
||||
|
||||
disqualifiedNode := pointer.GetRemote().GetRemotePieces()[0].NodeId
|
||||
|
@ -4,9 +4,11 @@
|
||||
package gc_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
@ -126,11 +128,15 @@ func getPointer(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Sa
|
||||
require.NoError(t, err)
|
||||
require.Len(t, projects, 1)
|
||||
|
||||
encParameters := upl.GetConfig(satellite).GetEncryptionParameters()
|
||||
cipherSuite := encParameters.CipherSuite
|
||||
store := encryption.NewStore()
|
||||
store.SetDefaultKey(new(storj.Key))
|
||||
encryptedPath, err := encryption.EncryptPath(bucket, paths.NewUnencrypted(path), cipherSuite, store)
|
||||
access := upl.Access[satellite.ID()]
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
store, err := encryptionAccess(serializedAccess)
|
||||
require.NoError(t, err)
|
||||
|
||||
encryptedPath, err := encryption.EncryptPathWithStoreCipher(bucket, paths.NewUnencrypted(path), store)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastSegPath = storj.JoinPaths(projects[0].ID.String(), "l", bucket, encryptedPath.Raw())
|
||||
@ -139,3 +145,26 @@ func getPointer(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Sa
|
||||
|
||||
return lastSegPath, pointer
|
||||
}
|
||||
|
||||
func encryptionAccess(access string) (*encryption.Store, error) {
|
||||
data, version, err := base58.CheckDecode(access)
|
||||
if err != nil || version != 0 {
|
||||
return nil, errors.New("invalid access grant format")
|
||||
}
|
||||
|
||||
p := new(pb.Scope)
|
||||
if err := pb.Unmarshal(data, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := storj.NewKey(p.EncryptionAccess.DefaultKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store := encryption.NewStore()
|
||||
store.SetDefaultKey(key)
|
||||
store.SetDefaultPathCipher(storj.EncAESGCM)
|
||||
|
||||
return store, nil
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
"storj.io/storj/storage"
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
@ -45,7 +46,10 @@ func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
// Reconfigure RS for ensuring that we don't have long-tail cancellations
|
||||
// and the upload doesn't leave garbage in the SNs
|
||||
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
Satellite: testplanet.Combine(
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
testplanet.MaxSegmentSize(13*memory.KiB),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
var (
|
||||
@ -59,13 +63,7 @@ func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
percentExp = 0.75
|
||||
)
|
||||
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
},
|
||||
bucketName, objectName, tc.objData,
|
||||
)
|
||||
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectName, tc.objData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// calculate the SNs total used space after data upload
|
||||
@ -131,7 +129,10 @@ func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
// Reconfigure RS for ensuring that we don't have long-tail cancellations
|
||||
// and the upload doesn't leave garbage in the SNs
|
||||
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
Satellite: testplanet.Combine(
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
testplanet.MaxSegmentSize(13*memory.KiB),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
numToShutdown := 2
|
||||
@ -141,11 +142,7 @@ func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
satelliteSys = planet.Satellites[0]
|
||||
)
|
||||
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
}, bucketName, objectName, tc.objData)
|
||||
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectName, tc.objData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Shutdown the first numToShutdown storage nodes before we delete the pieces
|
||||
@ -210,7 +207,10 @@ func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
// Reconfigure RS for ensuring that we don't have long-tail cancellations
|
||||
// and the upload doesn't leave garbage in the SNs
|
||||
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
Satellite: testplanet.Combine(
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
testplanet.MaxSegmentSize(13*memory.KiB),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
var (
|
||||
@ -218,11 +218,7 @@ func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
||||
satelliteSys = planet.Satellites[0]
|
||||
)
|
||||
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
}, bucketName, objectName, tc.objData)
|
||||
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectName, tc.objData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Shutdown all the storage nodes before we delete the pieces
|
||||
@ -500,13 +496,8 @@ func uploadFirstObjectWithoutSomeSegmentsPointers(
|
||||
t.Fatal("noSegments list must have at least one segment")
|
||||
}
|
||||
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: segmentSize,
|
||||
},
|
||||
},
|
||||
bucketName, objectName, objectData,
|
||||
)
|
||||
uploadCtx := testuplink.WithMaxSegmentSize(ctx, segmentSize)
|
||||
err := uplnk.Upload(uploadCtx, satelliteSys, bucketName, objectName, objectData)
|
||||
require.NoError(t, err)
|
||||
|
||||
projectID, encryptedPath = getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/private/metainfo"
|
||||
"storj.io/uplink/private/storage/meta"
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
func TestInvalidAPIKey(t *testing.T) {
|
||||
@ -170,13 +171,19 @@ func TestServiceList(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
|
||||
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
|
||||
project, err := planet.Uplinks[0].GetProject(ctx, planet.Satellites[0])
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(bucket.Close)
|
||||
defer ctx.Check(project.Close)
|
||||
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
|
||||
require.NoError(t, err)
|
||||
|
||||
objects := project.ListObjects(ctx, "testbucket", &uplink.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
listItems := make([]*uplink.Object, 0)
|
||||
for objects.Next() {
|
||||
listItems = append(listItems, objects.Item())
|
||||
}
|
||||
require.NoError(t, objects.Err())
|
||||
|
||||
expected := []storj.Object{
|
||||
{Path: "müsic"},
|
||||
@ -188,17 +195,24 @@ func TestServiceList(t *testing.T) {
|
||||
{Path: "ビデオ/movie.mkv"},
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(list.Items))
|
||||
sort.Slice(list.Items, func(i, k int) bool {
|
||||
return list.Items[i].Path < list.Items[k].Path
|
||||
require.Equal(t, len(expected), len(listItems))
|
||||
sort.Slice(listItems, func(i, k int) bool {
|
||||
return listItems[i].Key < listItems[k].Key
|
||||
})
|
||||
for i, item := range expected {
|
||||
require.Equal(t, item.Path, list.Items[i].Path)
|
||||
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
|
||||
require.Equal(t, item.Path, listItems[i].Key)
|
||||
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
||||
}
|
||||
|
||||
list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
|
||||
require.NoError(t, err)
|
||||
objects = project.ListObjects(ctx, "testbucket", &uplink.ListObjectsOptions{
|
||||
Recursive: false,
|
||||
})
|
||||
|
||||
listItems = make([]*uplink.Object, 0)
|
||||
for objects.Next() {
|
||||
listItems = append(listItems, objects.Item())
|
||||
}
|
||||
require.NoError(t, objects.Err())
|
||||
|
||||
expected = []storj.Object{
|
||||
{Path: "müsic"},
|
||||
@ -207,14 +221,14 @@ func TestServiceList(t *testing.T) {
|
||||
{Path: "ビデオ/", IsPrefix: true},
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(list.Items))
|
||||
sort.Slice(list.Items, func(i, k int) bool {
|
||||
return list.Items[i].Path < list.Items[k].Path
|
||||
require.Equal(t, len(expected), len(listItems))
|
||||
sort.Slice(listItems, func(i, k int) bool {
|
||||
return listItems[i].Key < listItems[k].Key
|
||||
})
|
||||
for i, item := range expected {
|
||||
t.Log(item.Path, list.Items[i].Path)
|
||||
require.Equal(t, item.Path, list.Items[i].Path)
|
||||
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
|
||||
t.Log(item.Path, listItems[i].Key)
|
||||
require.Equal(t, item.Path, listItems[i].Key)
|
||||
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -518,9 +532,6 @@ func TestBeginCommitListSegment(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||
uplink := planet.Uplinks[0]
|
||||
|
||||
config := uplink.GetConfig(planet.Satellites[0])
|
||||
metainfoService := planet.Satellites[0].Metainfo.Service
|
||||
|
||||
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
||||
@ -530,7 +541,6 @@ func TestBeginCommitListSegment(t *testing.T) {
|
||||
bucket := storj.Bucket{
|
||||
Name: "initial-bucket",
|
||||
ProjectID: projectID,
|
||||
PathCipher: config.GetEncryptionParameters().CipherSuite,
|
||||
}
|
||||
_, err = metainfoService.CreateBucket(ctx, bucket)
|
||||
require.NoError(t, err)
|
||||
@ -638,14 +648,15 @@ func TestBeginCommitListSegment(t *testing.T) {
|
||||
func TestListSegments(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: testplanet.MaxSegmentSize(memory.KiB),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||
uplink := planet.Uplinks[0]
|
||||
|
||||
data := testrand.Bytes(15 * memory.KiB)
|
||||
config := uplink.GetConfig(planet.Satellites[0])
|
||||
config.Client.SegmentSize = memory.KiB
|
||||
err := uplink.UploadWithClientConfig(ctx, planet.Satellites[0], config, "testbucket", "test-path", data)
|
||||
err := uplink.Upload(ctx, planet.Satellites[0], "testbucket", "test-path", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 15KiB + encryption should be uploaded into 16 segments with SegmentSize == 1KiB
|
||||
@ -702,9 +713,7 @@ func TestInlineSegment(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||
uplink := planet.Uplinks[0]
|
||||
|
||||
config := uplink.GetConfig(planet.Satellites[0])
|
||||
metainfoService := planet.Satellites[0].Metainfo.Service
|
||||
|
||||
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
||||
@ -725,7 +734,6 @@ func TestInlineSegment(t *testing.T) {
|
||||
bucket := storj.Bucket{
|
||||
Name: "inline-segments-bucket",
|
||||
ProjectID: projectID,
|
||||
PathCipher: config.GetEncryptionParameters().CipherSuite,
|
||||
}
|
||||
_, err = metainfoService.CreateBucket(ctx, bucket)
|
||||
require.NoError(t, err)
|
||||
@ -1193,12 +1201,17 @@ func TestRateLimit(t *testing.T) {
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
|
||||
config.Metainfo.RateLimiter.CacheExpiration = 500 * time.Millisecond
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
// TODO find a way to reset limiter before test is executed, currently
|
||||
// testplanet is doing one additional request to get access
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
var group errs2.Group
|
||||
for i := 0; i <= rateLimit; i++ {
|
||||
group.Go(func() error {
|
||||
@ -1241,12 +1254,17 @@ func TestRateLimit_ProjectRateLimitOverride(t *testing.T) {
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RateLimiter.Rate = 2
|
||||
config.Metainfo.RateLimiter.CacheExpiration = 500 * time.Millisecond
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
// TODO find a way to reset limiter before test is executed, currently
|
||||
// testplanet is doing one additional request to get access
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, projects, 1)
|
||||
@ -1281,6 +1299,10 @@ func TestRateLimit_ProjectRateLimitOverrideCachedExpired(t *testing.T) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
// TODO find a way to reset limiter before test is executed, currently
|
||||
// testplanet is doing one additional request to get access
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, projects, 1)
|
||||
@ -1327,7 +1349,6 @@ func TestOverwriteZombieSegments(t *testing.T) {
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||
uplink := planet.Uplinks[0]
|
||||
config := uplink.GetConfig(planet.Satellites[0])
|
||||
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(metainfoClient.Close)
|
||||
@ -1347,10 +1368,10 @@ func TestOverwriteZombieSegments(t *testing.T) {
|
||||
tc := tc
|
||||
t.Run(tc.label, func(t *testing.T) {
|
||||
data := testrand.Bytes(tc.objectSize)
|
||||
config.Client.SegmentSize = tc.segmentSize
|
||||
bucket := "testbucket" + strconv.Itoa(i)
|
||||
objectKey := "test-path" + strconv.Itoa(i)
|
||||
err := uplink.UploadWithClientConfig(ctx, planet.Satellites[0], config, bucket, objectKey, data)
|
||||
uploadCtx := testuplink.WithMaxSegmentSize(ctx, tc.segmentSize)
|
||||
err := uplink.Upload(uploadCtx, planet.Satellites[0], bucket, objectKey, data)
|
||||
require.NoError(t, err)
|
||||
|
||||
items, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
||||
@ -1376,7 +1397,7 @@ func TestOverwriteZombieSegments(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = uplink.UploadWithClientConfig(ctx, planet.Satellites[0], config, bucket, objectKey, data)
|
||||
err = uplink.Upload(uploadCtx, planet.Satellites[0], bucket, objectKey, data)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
|
@ -81,7 +81,10 @@ func TestService_DeletePieces_AllNodesUp(t *testing.T) {
|
||||
// Use RSConfig for ensuring that we don't have long-tail cancellations
|
||||
// and the upload doesn't leave garbage in the SNs
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
Satellite: testplanet.Combine(
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
testplanet.MaxSegmentSize(15*memory.KiB),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplnk := planet.Uplinks[0]
|
||||
@ -91,13 +94,7 @@ func TestService_DeletePieces_AllNodesUp(t *testing.T) {
|
||||
|
||||
{
|
||||
data := testrand.Bytes(10 * memory.KiB)
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
},
|
||||
"a-bucket", "object-filename", data,
|
||||
)
|
||||
err := uplnk.Upload(ctx, satelliteSys, "a-bucket", "object-filename", data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -156,7 +153,10 @@ func TestService_DeletePieces_SomeNodesDown(t *testing.T) {
|
||||
// Use RSConfig for ensuring that we don't have long-tail cancellations
|
||||
// and the upload doesn't leave garbage in the SNs
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
Satellite: testplanet.Combine(
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
testplanet.MaxSegmentSize(15*memory.KiB),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplnk := planet.Uplinks[0]
|
||||
@ -165,13 +165,7 @@ func TestService_DeletePieces_SomeNodesDown(t *testing.T) {
|
||||
|
||||
{
|
||||
data := testrand.Bytes(10 * memory.KiB)
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
},
|
||||
"a-bucket", "object-filename", data,
|
||||
)
|
||||
err := uplnk.Upload(ctx, satelliteSys, "a-bucket", "object-filename", data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -220,7 +214,10 @@ func TestService_DeletePieces_AllNodesDown(t *testing.T) {
|
||||
// Use RSConfig for ensuring that we don't have long-tail cancellations
|
||||
// and the upload doesn't leave garbage in the SNs
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
Satellite: testplanet.Combine(
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
testplanet.MaxSegmentSize(15*memory.KiB),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplnk := planet.Uplinks[0]
|
||||
@ -228,13 +225,7 @@ func TestService_DeletePieces_AllNodesDown(t *testing.T) {
|
||||
|
||||
{
|
||||
data := testrand.Bytes(10 * memory.KiB)
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
},
|
||||
"a-bucket", "object-filename", data,
|
||||
)
|
||||
err := uplnk.Upload(ctx, satelliteSys, "a-bucket", "object-filename", data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -308,6 +299,7 @@ func TestService_DeletePieces_Timeout(t *testing.T) {
|
||||
config.Metainfo.RS.RepairThreshold = 2
|
||||
config.Metainfo.RS.SuccessThreshold = 4
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
config.Metainfo.MaxSegmentSize = 15 * memory.KiB
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
@ -316,13 +308,7 @@ func TestService_DeletePieces_Timeout(t *testing.T) {
|
||||
|
||||
{
|
||||
data := testrand.Bytes(10 * memory.KiB)
|
||||
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, testplanet.UplinkConfig{
|
||||
Client: testplanet.ClientConfig{
|
||||
SegmentSize: 10 * memory.KiB,
|
||||
},
|
||||
},
|
||||
"a-bucket", "object-filename", data,
|
||||
)
|
||||
err := uplnk.Upload(ctx, satelliteSys, "a-bucket", "object-filename", data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user