makes sure all uplink cli configs get passed to libuplink, add stripeSize (#2103)

* makes sure all uplink cli configs get passed to libuplink, add stripSize

* update comment

* update defaults for uplink config blocksize

* changes per CR, update uplink config defaults

* pass shareSize from uplink config

* move block size validation to kvmeta pkg

* fix tests

* shareSize default 1k, rm config option blocksize

* rm printing err to stdout
This commit is contained in:
Jess G 2019-06-06 11:55:10 -07:00 committed by GitHub
parent f69f7b2d58
commit bf3d168cf6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 129 additions and 73 deletions

View File

@ -55,12 +55,14 @@ func makeBucket(cmd *cobra.Command, args []string) error {
}()
bucketCfg := &uplink.BucketConfig{}
//TODO (alex): make segment size customizable
bucketCfg.PathCipher = cfg.GetPathCipherSuite()
bucketCfg.EncryptionParameters = cfg.GetEncryptionScheme().ToEncryptionParameters()
bucketCfg.Volatile = struct {
RedundancyScheme storj.RedundancyScheme
SegmentsSize memory.Size
}{
RedundancyScheme: cfg.GetRedundancyScheme(),
SegmentsSize: cfg.GetSegmentSize(),
}
_, err = project.CreateBucket(ctx, dst.Bucket(), bucketCfg)

View File

@ -57,48 +57,44 @@ func addCmd(cmd *cobra.Command, root *cobra.Command) *cobra.Command {
}
// NewUplink returns a pointer to a new Client with a Config and Uplink pointer on it and an error.
func (c *UplinkFlags) NewUplink(ctx context.Context, config *libuplink.Config) (*libuplink.Uplink, error) {
return libuplink.NewUplink(ctx, config)
func (cliCfg *UplinkFlags) NewUplink(ctx context.Context) (*libuplink.Uplink, error) {
// Transform the uplink cli config flags to the libuplink config object
libuplinkCfg := &libuplink.Config{}
libuplinkCfg.Volatile.MaxInlineSize = cliCfg.Client.MaxInlineSize
libuplinkCfg.Volatile.MaxMemory = cliCfg.RS.MaxBufferMem
libuplinkCfg.Volatile.PeerIDVersion = cliCfg.TLS.PeerIDVersions
libuplinkCfg.Volatile.TLS = struct {
SkipPeerCAWhitelist bool
PeerCAWhitelistPath string
}{
SkipPeerCAWhitelist: !cliCfg.TLS.UsePeerCAWhitelist,
PeerCAWhitelistPath: cliCfg.TLS.PeerCAWhitelistPath,
}
return libuplink.NewUplink(ctx, libuplinkCfg)
}
// GetProject returns a *libuplink.Project for interacting with a specific project
func (c *UplinkFlags) GetProject(ctx context.Context) (*libuplink.Project, error) {
apiKey, err := libuplink.ParseAPIKey(c.Client.APIKey)
func (cliCfg *UplinkFlags) GetProject(ctx context.Context) (*libuplink.Project, error) {
apiKey, err := libuplink.ParseAPIKey(cliCfg.Client.APIKey)
if err != nil {
return nil, err
}
satelliteAddr := c.Client.SatelliteAddr
cfg := &libuplink.Config{}
cfg.Volatile.TLS = struct {
SkipPeerCAWhitelist bool
PeerCAWhitelistPath string
}{
SkipPeerCAWhitelist: !c.TLS.UsePeerCAWhitelist,
PeerCAWhitelistPath: c.TLS.PeerCAWhitelistPath,
}
cfg.Volatile.MaxInlineSize = c.Client.MaxInlineSize
cfg.Volatile.MaxMemory = c.RS.MaxBufferMem
uplk, err := c.NewUplink(ctx, cfg)
encKey, err := uplink.UseOrLoadEncryptionKey(cliCfg.Enc.EncryptionKey, cliCfg.Enc.KeyFilepath)
if err != nil {
return nil, err
}
opts := &libuplink.ProjectOptions{}
opts.Volatile.EncryptionKey = encKey
encKey, err := uplink.UseOrLoadEncryptionKey(c.Enc.EncryptionKey, c.Enc.KeyFilepath)
uplk, err := cliCfg.NewUplink(ctx)
if err != nil {
return nil, err
}
opts.Volatile.EncryptionKey = encKey
project, err := uplk.OpenProject(ctx, satelliteAddr, apiKey, opts)
project, err := uplk.OpenProject(ctx, cliCfg.Client.SatelliteAddr, apiKey, opts)
if err != nil {
if err := uplk.Close(); err != nil {
fmt.Printf("error closing uplink: %+v\n", err)
@ -109,10 +105,10 @@ func (c *UplinkFlags) GetProject(ctx context.Context) (*libuplink.Project, error
}
// GetProjectAndBucket returns a *libuplink.Bucket for interacting with a specific project's bucket
func (c *UplinkFlags) GetProjectAndBucket(ctx context.Context, bucketName string, access libuplink.EncryptionAccess) (project *libuplink.Project, bucket *libuplink.Bucket, err error) {
project, err = c.GetProject(ctx)
func (cliCfg *UplinkFlags) GetProjectAndBucket(ctx context.Context, bucketName string, access libuplink.EncryptionAccess) (project *libuplink.Project, bucket *libuplink.Bucket, err error) {
project, err = cliCfg.GetProject(ctx)
if err != nil {
return nil, nil, err
return project, bucket, err
}
defer func() {
@ -125,10 +121,10 @@ func (c *UplinkFlags) GetProjectAndBucket(ctx context.Context, bucketName string
bucket, err = project.OpenBucket(ctx, bucketName, &access)
if err != nil {
return nil, nil, err
return project, bucket, err
}
return project, bucket, nil
return project, bucket, err
}
func closeProjectAndBucket(project *libuplink.Project, bucket *libuplink.Bucket) {

View File

@ -226,8 +226,8 @@ func (uplink *Uplink) UploadWithConfig(ctx context.Context, satellite *satellite
return err
}
encScheme := config.GetEncryptionScheme()
redScheme := config.GetRedundancyScheme()
encScheme := config.GetEncryptionScheme()
// create bucket if not exists
_, err = metainfo.GetBucket(ctx, bucket)

View File

@ -64,11 +64,14 @@ func TestBucketAttrs(t *testing.T) {
var (
access = simpleEncryptionAccess("voxmachina")
bucketName = "mightynein"
shareSize = memory.KiB.Int32()
requiredShares = 2
stripeSize = shareSize * int32(requiredShares)
inBucketConfig = BucketConfig{
PathCipher: storj.EncSecretBox,
EncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: 512,
BlockSize: stripeSize,
},
Volatile: struct {
RedundancyScheme storj.RedundancyScheme
@ -76,8 +79,8 @@ func TestBucketAttrs(t *testing.T) {
}{
RedundancyScheme: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: memory.KiB.Int32(),
RequiredShares: 2,
ShareSize: shareSize,
RequiredShares: int16(requiredShares),
RepairShares: 3,
OptimalShares: 4,
TotalShares: 5,
@ -120,11 +123,14 @@ func TestBucketAttrsApply(t *testing.T) {
bucketName = "dodecahedron"
objectPath1 = "vax/vex/vox"
objectContents = "Willingham,Ray,Jaffe,Johnson,Riegel,O'Brien,Bailey,Mercer"
shareSize = 3 * memory.KiB.Int32()
requiredShares = 3
stripeSize = shareSize * int32(requiredShares)
inBucketConfig = BucketConfig{
PathCipher: storj.EncSecretBox,
EncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.EncSecretBox,
BlockSize: 768,
BlockSize: stripeSize,
},
Volatile: struct {
RedundancyScheme storj.RedundancyScheme
@ -132,8 +138,8 @@ func TestBucketAttrsApply(t *testing.T) {
}{
RedundancyScheme: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: (3 * memory.KiB).Int32(),
RequiredShares: 3,
ShareSize: shareSize,
RequiredShares: int16(requiredShares),
RepairShares: 4,
OptimalShares: 5,
TotalShares: 5,

View File

@ -93,23 +93,21 @@ func (cfg *BucketConfig) setDefaults() {
}
// CreateBucket creates a new bucket if authorized.
func (p *Project) CreateBucket(ctx context.Context, name string, cfg *BucketConfig) (b storj.Bucket, err error) {
func (p *Project) CreateBucket(ctx context.Context, name string, cfg *BucketConfig) (bucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
if cfg == nil {
cfg = &BucketConfig{}
}
cfg = cfg.clone()
cfg.setDefaults()
if cfg.Volatile.RedundancyScheme.ShareSize*int32(cfg.Volatile.RedundancyScheme.RequiredShares)%cfg.EncryptionParameters.BlockSize != 0 {
return b, Error.New("EncryptionParameters.BlockSize must be a multiple of RS ShareSize * RS RequiredShares")
}
b = storj.Bucket{
bucket = storj.Bucket{
PathCipher: cfg.PathCipher.ToCipher(),
EncryptionParameters: cfg.EncryptionParameters,
RedundancyScheme: cfg.Volatile.RedundancyScheme,
SegmentsSize: cfg.Volatile.SegmentsSize.Int64(),
}
return p.project.CreateBucket(ctx, name, &b)
return p.project.CreateBucket(ctx, name, &bucket)
}
// DeleteBucket deletes a bucket if authorized. If the bucket contains any

View File

@ -5,6 +5,7 @@ package kvmetainfo
import (
"context"
"fmt"
"storj.io/storj/pkg/storage/buckets"
"storj.io/storj/pkg/storj"
@ -48,6 +49,10 @@ func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *st
info.SegmentsSize = db.segmentsSize
}
if err := validateBlockSize(info.RedundancyScheme, info.EncryptionParameters.BlockSize); err != nil {
return bucketInfo, err
}
meta, err := db.buckets.Put(ctx, bucketName, buckets.Meta{
PathEncryptionType: info.PathCipher,
SegmentsSize: info.SegmentsSize,
@ -61,6 +66,22 @@ func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *st
return bucketFromMeta(bucketName, meta), nil
}
// validateBlockSize confirms the encryption block size aligns with stripe size.
// Stripes contain encrypted data therefore we want the stripe boundaries to match
// with the encryption block size boundaries. We also want stripes to be small for
// audits, but encryption can be a bit larger. All told, block size should be an integer
// multiple of stripe size.
func validateBlockSize(redundancyScheme storj.RedundancyScheme, blockSize int32) error {
stripeSize := redundancyScheme.StripeSize()
if blockSize%stripeSize != 0 {
return fmt.Errorf("encryption BlockSize (%d) must be a multiple of RS ShareSize (%d) * RS RequiredShares (%d)",
blockSize, redundancyScheme.ShareSize, redundancyScheme.RequiredShares,
)
}
return nil
}
// DeleteBucket deletes bucket
func (db *Project) DeleteBucket(ctx context.Context, bucketName string) (err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -367,14 +367,15 @@ func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, buckets.Store,
key := new(storj.Key)
copy(key[:], TestEncKey)
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), key, 1*memory.KiB.Int(), storj.AESGCM)
blockSize := rs.StripeSize()
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), key, blockSize, storj.AESGCM)
if err != nil {
return nil, nil, nil, err
}
buckets := buckets.NewStore(streams)
return kvmetainfo.New(metainfo, buckets, streams, segments, key, 1*memory.KiB.Int32(), rs, 64*memory.MiB.Int64()), buckets, streams, nil
return kvmetainfo.New(metainfo, buckets, streams, segments, key, int32(blockSize), rs, 64*memory.MiB.Int64()), buckets, streams, nil
}
func forAllCiphers(test func(cipher storj.Cipher)) {

View File

@ -40,9 +40,10 @@ var DefaultRS = storj.RedundancyScheme{
}
// DefaultES default values for EncryptionScheme
// BlockSize should default to the size of a stripe
var DefaultES = storj.EncryptionScheme{
Cipher: storj.AESGCM,
BlockSize: 1 * memory.KiB.Int32(),
BlockSize: DefaultRS.StripeSize(),
}
// GetObject returns information about an object
@ -105,14 +106,20 @@ func (db *DB) CreateObject(ctx context.Context, bucket string, path storj.Path,
// TODO: autodetect content type from the path extension
// if info.ContentType == "" {}
if info.RedundancyScheme.IsZero() {
info.RedundancyScheme = DefaultRS
}
if info.EncryptionScheme.IsZero() {
info.EncryptionScheme = storj.EncryptionScheme{
Cipher: DefaultES.Cipher,
BlockSize: info.RedundancyScheme.ShareSize,
BlockSize: DefaultES.BlockSize,
}
}
if info.RedundancyScheme.IsZero() {
info.RedundancyScheme = DefaultRS
// If the provided EncryptionScheme.BlockSize isn't a multiple of the
// DefaultRS stripeSize, then overwrite the EncryptionScheme with the DefaultES values
if err := validateBlockSize(DefaultRS, info.EncryptionScheme.BlockSize); err != nil {
info.EncryptionScheme.BlockSize = DefaultES.BlockSize
}
}

View File

@ -36,7 +36,7 @@ func TestCreateObject(t *testing.T) {
customES := storj.EncryptionScheme{
Cipher: storj.Unencrypted,
BlockSize: 1 * memory.KiB.Int32(),
BlockSize: customRS.StripeSize(),
}
runTest(t, func(ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
@ -52,18 +52,21 @@ func TestCreateObject(t *testing.T) {
create: nil,
expectedRS: kvmetainfo.DefaultRS,
expectedES: kvmetainfo.DefaultES,
}, {
},
{
create: &storj.CreateObject{RedundancyScheme: customRS, EncryptionScheme: customES},
expectedRS: customRS,
expectedES: customES,
}, {
},
{
create: &storj.CreateObject{RedundancyScheme: customRS},
expectedRS: customRS,
expectedES: storj.EncryptionScheme{Cipher: kvmetainfo.DefaultES.Cipher, BlockSize: customRS.ShareSize},
}, {
expectedES: storj.EncryptionScheme{Cipher: kvmetainfo.DefaultES.Cipher, BlockSize: kvmetainfo.DefaultES.BlockSize},
},
{
create: &storj.CreateObject{EncryptionScheme: customES},
expectedRS: kvmetainfo.DefaultRS,
expectedES: customES,
expectedES: storj.EncryptionScheme{Cipher: customES.Cipher, BlockSize: kvmetainfo.DefaultES.BlockSize},
},
} {
errTag := fmt.Sprintf("%d. %+v", i, tt)

View File

@ -705,14 +705,15 @@ func initEnv(ctx context.Context, planet *testplanet.Planet) (minio.ObjectLayer,
encKey := new(storj.Key)
copy(encKey[:], TestEncKey)
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), encKey, 1*memory.KiB.Int(), storj.AESGCM)
blockSize := rs.StripeSize()
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), encKey, blockSize, storj.AESGCM)
if err != nil {
return nil, nil, nil, err
}
buckets := buckets.NewStore(streams)
kvmetainfo := kvmetainfo.New(metainfo, buckets, streams, segments, encKey, 1*memory.KiB.Int32(), rs, 64*memory.MiB.Int64())
kvmetainfo := kvmetainfo.New(metainfo, buckets, streams, segments, encKey, int32(blockSize), rs, 64*memory.MiB.Int64())
cfg := libuplink.Config{}
cfg.Volatile.TLS = struct {
@ -739,13 +740,15 @@ func initEnv(ctx context.Context, planet *testplanet.Planet) (minio.ObjectLayer,
return nil, nil, nil, err
}
stripeSize := rs.StripeSize()
gateway := NewStorjGateway(
proj,
encKey,
storj.EncAESGCM,
storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: 1 * memory.KiB.Int32(),
BlockSize: int32(stripeSize),
},
storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,

View File

@ -8,11 +8,11 @@ type RedundancyScheme struct {
// Algorithm determines the algorithm to be used for redundancy.
Algorithm RedundancyAlgorithm
// ShareSize is the size to use for new redundancy shares.
// ShareSize is the size in bytes for each erasure shares.
ShareSize int32
// RequiredShares is the minimum number of shares required to recover a
// segment.
// stripe, reed-solomon k.
RequiredShares int16
// RepairShares is the minimum number of safe shares that can remain
// before a repair is triggered.
@ -30,6 +30,14 @@ func (scheme RedundancyScheme) IsZero() bool {
return scheme == (RedundancyScheme{})
}
// StripeSize is the number of bytes for a stripe.
// Stripes are erasure encoded and split into n shares, where we need k to
// reconstruct the stripe. Therefore a stripe size is the erasure share size
// times the required shares, k.
func (scheme RedundancyScheme) StripeSize() int32 {
return scheme.ShareSize * int32(scheme.RequiredShares)
}
// RedundancyAlgorithm is the algorithm used for redundancy
type RedundancyAlgorithm byte

View File

@ -32,7 +32,7 @@ import (
// redundancy strategy information
type RSConfig struct {
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4MiB"`
ErasureShareSize memory.Size `help:"the size of each new erasure sure in bytes" default:"1KiB"`
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"1KiB"`
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
RepairThreshold int `help:"the minimum safe pieces before a repair is triggered. m." releaseDefault:"35" devDefault:"6"`
SuccessThreshold int `help:"the desired total pieces for a segment. o." releaseDefault:"80" devDefault:"8"`
@ -42,11 +42,10 @@ type RSConfig struct {
// EncryptionConfig is a configuration struct that keeps details about
// encrypting segments
type EncryptionConfig struct {
EncryptionKey string `help:"the root key for encrypting the data; when set, it overrides the key stored in the file indicated by the key-filepath flag"`
KeyFilepath string `help:"the path to the file which contains the root key for encrypting the data"`
BlockSize memory.Size `help:"size (in bytes) of encrypted blocks" default:"1KiB"`
DataType int `help:"Type of encryption to use for content and metadata (1=AES-GCM, 2=SecretBox)" default:"1"`
PathType int `help:"Type of encryption to use for paths (0=Unencrypted, 1=AES-GCM, 2=SecretBox)" default:"1"`
EncryptionKey string `help:"the root key for encrypting the data; when set, it overrides the key stored in the file indicated by the key-filepath flag"`
KeyFilepath string `help:"the path to the file which contains the root key for encrypting the data"`
DataType int `help:"Type of encryption to use for content and metadata (1=AES-GCM, 2=SecretBox)" default:"1"`
PathType int `help:"Type of encryption to use for paths (0=Unencrypted, 1=AES-GCM, 2=SecretBox)" default:"1"`
}
// ClientConfig is a configuration struct for the uplink that controls how
@ -116,7 +115,8 @@ func (c Config) GetMetainfo(ctx context.Context, identity *identity.FullIdentity
}
segments := segments.NewSegmentStore(metainfo, ec, rs, c.Client.MaxInlineSize.Int(), maxEncryptedSegmentSize)
if c.RS.ErasureShareSize.Int()*c.RS.MinThreshold%c.Enc.BlockSize.Int() != 0 {
blockSize := c.GetEncryptionScheme().BlockSize
if int(blockSize)%c.RS.ErasureShareSize.Int()*c.RS.MinThreshold != 0 {
err = Error.New("EncryptionBlockSize must be a multiple of ErasureShareSize * RS MinThreshold")
return nil, nil, err
}
@ -126,20 +126,21 @@ func (c Config) GetMetainfo(ctx context.Context, identity *identity.FullIdentity
return nil, nil, Error.Wrap(err)
}
streams, err := streams.NewStreamStore(segments, c.Client.SegmentSize.Int64(), key, c.Enc.BlockSize.Int(), storj.Cipher(c.Enc.DataType))
streams, err := streams.NewStreamStore(segments, c.Client.SegmentSize.Int64(), key, int(blockSize), storj.Cipher(c.Enc.DataType))
if err != nil {
return nil, nil, Error.New("failed to create stream store: %v", err)
}
buckets := buckets.NewStore(streams)
return kvmetainfo.New(metainfo, buckets, streams, segments, key, c.Enc.BlockSize.Int32(), rs, c.Client.SegmentSize.Int64()), streams, nil
return kvmetainfo.New(metainfo, buckets, streams, segments, key, blockSize, rs, c.Client.SegmentSize.Int64()), streams, nil
}
// GetRedundancyScheme returns the configured redundancy scheme for new uploads
func (c Config) GetRedundancyScheme() storj.RedundancyScheme {
return storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: c.RS.ErasureShareSize.Int32(),
RequiredShares: int16(c.RS.MinThreshold),
RepairShares: int16(c.RS.RepairThreshold),
OptimalShares: int16(c.RS.SuccessThreshold),
@ -147,14 +148,24 @@ func (c Config) GetRedundancyScheme() storj.RedundancyScheme {
}
}
// GetPathCipherSuite returns the cipher suite used for path encryption for bucket objects
func (c Config) GetPathCipherSuite() storj.CipherSuite {
return storj.Cipher(c.Enc.PathType).ToCipherSuite()
}
// GetEncryptionScheme returns the configured encryption scheme for new uploads
func (c Config) GetEncryptionScheme() storj.EncryptionScheme {
return storj.EncryptionScheme{
Cipher: storj.Cipher(c.Enc.DataType),
BlockSize: int32(c.Enc.BlockSize),
BlockSize: c.GetRedundancyScheme().StripeSize(),
}
}
// GetSegmentSize returns the segment size set in uplink config
func (c Config) GetSegmentSize() memory.Size {
return c.Client.SegmentSize
}
// LoadEncryptionKey loads the encryption key stored in the file pointed by
// filepath.
//