storj/lib/uplink/project.go

225 lines
7.3 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package uplink
import (
"context"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/vivint/infectious"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/encryption"
"storj.io/storj/pkg/metainfo/kvmetainfo"
ecclient "storj.io/storj/pkg/storage/ec"
"storj.io/storj/pkg/storage/segments"
"storj.io/storj/pkg/storage/streams"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
"storj.io/storj/uplink/metainfo"
)
// Project represents a specific project access session.
type Project struct {
uplinkCfg *Config
tc transport.Client
2019-06-25 16:36:23 +01:00
metainfo *metainfo.Client
project *kvmetainfo.Project
maxInlineSize memory.Size
}
// BucketConfig holds information about a bucket's configuration. This is
// filled in by the caller for use with CreateBucket(), or filled in by the
// library as Bucket.Config when a bucket is returned from OpenBucket().
type BucketConfig struct {
// PathCipher indicates which cipher suite is to be used for path
// encryption within the new Bucket. If not set, AES-GCM encryption
// will be used.
PathCipher storj.CipherSuite
// EncryptionParameters specifies the default encryption parameters to
// be used for data encryption of new Objects in this bucket.
EncryptionParameters storj.EncryptionParameters
// Volatile groups config values that are likely to change semantics
// or go away entirely between releases. Be careful when using them!
Volatile struct {
// RedundancyScheme defines the default Reed-Solomon and/or
// Forward Error Correction encoding parameters to be used by
// objects in this Bucket.
RedundancyScheme storj.RedundancyScheme
// SegmentsSize is the default segment size to use for new
// objects in this Bucket.
SegmentsSize memory.Size
}
}
2019-04-26 18:15:41 +01:00
func (cfg *BucketConfig) clone() *BucketConfig {
clone := *cfg
return &clone
}
// TODO: is this the best way to do this?
2019-04-26 18:15:41 +01:00
func (cfg *BucketConfig) setDefaults() {
if cfg.PathCipher == storj.EncUnspecified {
cfg.PathCipher = defaultCipher
}
2019-04-26 18:15:41 +01:00
if cfg.EncryptionParameters.CipherSuite == storj.EncUnspecified {
cfg.EncryptionParameters.CipherSuite = defaultCipher
}
2019-04-26 18:15:41 +01:00
if cfg.Volatile.RedundancyScheme.RequiredShares == 0 {
cfg.Volatile.RedundancyScheme.RequiredShares = 29
}
2019-04-26 18:15:41 +01:00
if cfg.Volatile.RedundancyScheme.RepairShares == 0 {
cfg.Volatile.RedundancyScheme.RepairShares = 35
}
2019-04-26 18:15:41 +01:00
if cfg.Volatile.RedundancyScheme.OptimalShares == 0 {
cfg.Volatile.RedundancyScheme.OptimalShares = 80
}
2019-04-26 18:15:41 +01:00
if cfg.Volatile.RedundancyScheme.TotalShares == 0 {
cfg.Volatile.RedundancyScheme.TotalShares = 95
}
2019-04-26 18:15:41 +01:00
if cfg.Volatile.RedundancyScheme.ShareSize == 0 {
cfg.Volatile.RedundancyScheme.ShareSize = (1 * memory.KiB).Int32()
}
2019-06-20 22:50:13 +01:00
if cfg.EncryptionParameters.BlockSize == 0 {
cfg.EncryptionParameters.BlockSize = cfg.Volatile.RedundancyScheme.ShareSize * int32(cfg.Volatile.RedundancyScheme.RequiredShares)
}
2019-04-26 18:15:41 +01:00
if cfg.Volatile.SegmentsSize.Int() == 0 {
cfg.Volatile.SegmentsSize = 64 * memory.MiB
}
}
// CreateBucket creates a new bucket if authorized.
func (p *Project) CreateBucket(ctx context.Context, name string, cfg *BucketConfig) (bucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
if cfg == nil {
cfg = &BucketConfig{}
}
2019-04-26 18:15:41 +01:00
cfg = cfg.clone()
cfg.setDefaults()
bucket = storj.Bucket{
PathCipher: cfg.PathCipher.ToCipher(),
EncryptionParameters: cfg.EncryptionParameters,
RedundancyScheme: cfg.Volatile.RedundancyScheme,
SegmentsSize: cfg.Volatile.SegmentsSize.Int64(),
}
return p.project.CreateBucket(ctx, name, &bucket)
}
// DeleteBucket deletes a bucket if authorized. If the bucket contains any
// Objects at the time of deletion, they may be lost permanently.
func (p *Project) DeleteBucket(ctx context.Context, bucket string) (err error) {
defer mon.Task()(&ctx)(&err)
return p.project.DeleteBucket(ctx, bucket)
}
// BucketListOptions controls options to the ListBuckets() call.
type BucketListOptions = storj.BucketListOptions
// ListBuckets will list authorized buckets.
func (p *Project) ListBuckets(ctx context.Context, opts *BucketListOptions) (bl storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err)
if opts == nil {
opts = &BucketListOptions{Direction: storj.Forward}
}
return p.project.ListBuckets(ctx, *opts)
}
// GetBucketInfo returns info about the requested bucket if authorized.
func (p *Project) GetBucketInfo(ctx context.Context, bucket string) (b storj.Bucket, bi *BucketConfig, err error) {
defer mon.Task()(&ctx)(&err)
b, err = p.project.GetBucket(ctx, bucket)
if err != nil {
return b, nil, err
}
cfg := &BucketConfig{
PathCipher: b.PathCipher.ToCipherSuite(),
EncryptionParameters: b.EncryptionParameters,
}
cfg.Volatile.RedundancyScheme = b.RedundancyScheme
cfg.Volatile.SegmentsSize = memory.Size(b.SegmentsSize)
return b, cfg, nil
}
2019-06-25 16:36:23 +01:00
// TODO: move the bucket related OpenBucket to bucket.go
// OpenBucket returns a Bucket handle with the given EncryptionAccess
// information.
func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *EncryptionAccess) (b *Bucket, err error) {
defer mon.Task()(&ctx)(&err)
err = p.CheckBucketAttribution(ctx, bucketName)
if err != nil {
return nil, err
}
bucketInfo, cfg, err := p.GetBucketInfo(ctx, bucketName)
if err != nil {
return nil, err
}
if access == nil || access.Key == (storj.Key{}) {
return nil, Error.New("No encryption key chosen")
}
if err != nil {
return nil, err
}
encryptionScheme := cfg.EncryptionParameters.ToEncryptionScheme()
ec := ecclient.NewClient(p.tc, p.uplinkCfg.Volatile.MaxMemory.Int())
fc, err := infectious.NewFEC(int(cfg.Volatile.RedundancyScheme.RequiredShares), int(cfg.Volatile.RedundancyScheme.TotalShares))
if err != nil {
return nil, err
}
rs, err := eestream.NewRedundancyStrategy(
eestream.NewRSScheme(fc, int(cfg.Volatile.RedundancyScheme.ShareSize)),
int(cfg.Volatile.RedundancyScheme.RepairShares),
int(cfg.Volatile.RedundancyScheme.OptimalShares))
if err != nil {
return nil, err
}
maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(cfg.Volatile.SegmentsSize.Int64(),
cfg.EncryptionParameters.ToEncryptionScheme())
if err != nil {
return nil, err
}
segmentStore := segments.NewSegmentStore(p.metainfo, ec, rs, p.maxInlineSize.Int(), maxEncryptedSegmentSize)
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
// TODO(jeff): this is where we would load scope information in.
encStore := encryption.NewStore()
encStore.SetDefaultKey(&access.Key)
streamStore, err := streams.NewStreamStore(segmentStore, cfg.Volatile.SegmentsSize.Int64(), encStore, int(encryptionScheme.BlockSize), encryptionScheme.Cipher, p.maxInlineSize.Int())
if err != nil {
return nil, err
}
return &Bucket{
BucketConfig: *cfg,
Name: bucketInfo.Name,
Created: bucketInfo.Created,
bucket: bucketInfo,
Create and use an encryption.Store (#2293) * add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
2019-06-24 20:23:07 +01:00
metainfo: kvmetainfo.New(p.project, p.metainfo, streamStore, segmentStore, encStore),
streams: streamStore,
}, nil
}
// CheckBucketAttribution Checks the bucket attribution
func (p *Project) CheckBucketAttribution(ctx context.Context, bucketName string) error {
if p.uplinkCfg.Volatile.PartnerID == "" {
return nil
}
partnerID, err := uuid.Parse(p.uplinkCfg.Volatile.PartnerID)
if err != nil {
return Error.Wrap(err)
}
return p.metainfo.SetAttribution(ctx, bucketName, *partnerID)
}