value attribution merge fix and more test cases (#2588)

* value attribution merge fix and more test cases
This commit is contained in:
aligeti 2019-07-19 11:17:34 -04:00 committed by GitHub
parent 5f096a3eab
commit 29b576961f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 867 additions and 362 deletions

2
go.mod
View File

@ -115,7 +115,7 @@ require (
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
golang.org/x/text v0.3.2 // indirect

View File

@ -15,6 +15,7 @@ import (
"storj.io/storj/internal/memory"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/internal/testrand"
"storj.io/storj/lib/uplink"
"storj.io/storj/pkg/storj"
)
@ -53,6 +54,89 @@ func testPlanetWithLibUplink(t *testing.T, cfg testConfig,
})
}
// check that partner bucket attributes are stored and retrieved correctly.
func TestPartnerBucketAttrs(t *testing.T) {
var (
access = uplink.NewEncryptionAccessWithDefaultKey(storj.Key{0, 1, 2, 3, 4})
bucketName = "mightynein"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
apikey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[satellite.ID()])
require.NoError(t, err)
partnerID := testrand.UUID()
t.Run("without partner id", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.TLS.SkipPeerCAWhitelist = true
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucketInfo, err := project.CreateBucket(ctx, bucketName, nil)
require.NoError(t, err)
assert.True(t, bucketInfo.PartnerID.IsZero())
_, err = project.CreateBucket(ctx, bucketName, nil)
require.Error(t, err)
})
t.Run("open with partner id", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.TLS.SkipPeerCAWhitelist = true
config.Volatile.PartnerID = partnerID.String()
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucket, err := project.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
bucketInfo, _, err := project.GetBucketInfo(ctx, bucketName)
require.NoError(t, err)
assert.Equal(t, bucketInfo.PartnerID.String(), config.Volatile.PartnerID)
})
t.Run("open with different partner id", func(t *testing.T) {
config := uplink.Config{}
config.Volatile.TLS.SkipPeerCAWhitelist = true
config.Volatile.PartnerID = testrand.UUID().String()
up, err := uplink.NewUplink(ctx, &config)
require.NoError(t, err)
defer ctx.Check(up.Close)
project, err := up.OpenProject(ctx, satellite.Addr(), apikey)
require.NoError(t, err)
defer ctx.Check(project.Close)
bucket, err := project.OpenBucket(ctx, bucketName, access)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
bucketInfo, _, err := project.GetBucketInfo(ctx, bucketName)
require.NoError(t, err)
assert.NotEqual(t, bucketInfo.PartnerID.String(), config.Volatile.PartnerID)
})
})
}
// check that bucket attributes are stored and retrieved correctly.
func TestBucketAttrs(t *testing.T) {
var (

View File

@ -157,6 +157,26 @@ func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *Enc
return nil, err
}
// partnerID set and bucket's attribution is not set
if p.uplinkCfg.Volatile.PartnerID != "" && bucketInfo.PartnerID.IsZero() {
// make an entry into the attribution table
err = p.checkBucketAttribution(ctx, bucketName)
if err != nil {
return nil, err
}
partnerID, err := uuid.Parse(p.uplinkCfg.Volatile.PartnerID)
if err != nil {
return nil, Error.Wrap(err)
}
// update the bucket metainfo table with corresponding partner info
bucketInfo.PartnerID = *partnerID
bucketInfo, err = p.updateBucket(ctx, bucketInfo)
if err != nil {
return nil, err
}
}
encryptionParameters := cfg.EncryptionParameters
ec := ecclient.NewClient(p.uplinkCfg.Volatile.Log.Named("ecclient"), p.tc, p.uplinkCfg.Volatile.MaxMemory.Int())
@ -233,3 +253,18 @@ func (p *Project) checkBucketAttribution(ctx context.Context, bucketName string)
return p.metainfo.SetAttribution(ctx, bucketName, *partnerID)
}
// updateBucket updates an existing bucket's attribution info.
func (p *Project) updateBucket(ctx context.Context, bucketInfo storj.Bucket) (bucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
bucket = storj.Bucket{
Name: bucketInfo.Name,
PartnerID: bucketInfo.PartnerID,
PathCipher: bucketInfo.PathCipher,
DefaultEncryptionParameters: bucketInfo.DefaultEncryptionParameters,
DefaultRedundancyScheme: bucketInfo.DefaultRedundancyScheme,
DefaultSegmentsSize: bucketInfo.DefaultSegmentsSize,
}
return p.project.CreateBucket(ctx, bucketInfo.Name, &bucket)
}

File diff suppressed because it is too large Load Diff

View File

@ -48,6 +48,7 @@ message Bucket {
int64 default_segment_size = 4;
pointerdb.RedundancyScheme default_redundancy_scheme = 5;
encryption.EncryptionParameters default_encryption_parameters = 6;
bytes partner_id = 7;
}
message BucketListItem {
@ -63,6 +64,7 @@ message BucketCreateRequest {
int64 default_segment_size = 3;
pointerdb.RedundancyScheme default_redundancy_scheme = 4;
encryption.EncryptionParameters default_encryption_parameters = 5;
bytes partner_id = 6;
}
message BucketCreateResponse {

View File

@ -1638,6 +1638,11 @@
"id": 6,
"name": "default_encryption_parameters",
"type": "encryption.EncryptionParameters"
},
{
"id": 7,
"name": "partner_id",
"type": "bytes"
}
]
},
@ -1693,6 +1698,11 @@
"id": 5,
"name": "default_encryption_parameters",
"type": "encryption.EncryptionParameters"
},
{
"id": 6,
"name": "partner_id",
"type": "bytes"
}
]
},

View File

@ -18,6 +18,8 @@ type BucketsDB interface {
CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)
// Get returns an existing bucket
GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error)
// UpdateBucket updates an existing bucket
UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)
// Delete deletes a bucket
DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)
// List returns all buckets for a project

View File

@ -674,8 +674,13 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
return nil, status.Errorf(codes.Internal, err.Error())
}
convBucket, err := convertBucketToProto(ctx, bucket)
if err != nil {
return resp, err
}
return &pb.BucketGetResponse{
Bucket: convertBucketToProto(ctx, bucket),
Bucket: convBucket,
}, nil
}
@ -702,19 +707,56 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
bucket, err := convertProtoToBucket(req, keyInfo.ProjectID)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
// checks if bucket exists before updates it or makes a new entry
bucket, err := endpoint.metainfo.GetBucket(ctx, req.GetName(), keyInfo.ProjectID)
if err == nil {
var partnerID uuid.UUID
err = partnerID.UnmarshalJSON(req.GetPartnerId())
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
// partnerID not set
if partnerID.IsZero() {
return resp, status.Errorf(codes.AlreadyExists, "Bucket already exists")
}
//update the bucket
bucket.PartnerID = partnerID
bucket, err = endpoint.metainfo.UpdateBucket(ctx, bucket)
pbBucket, err := convertBucketToProto(ctx, bucket)
if err != nil {
return resp, status.Errorf(codes.Internal, err.Error())
}
return &pb.BucketCreateResponse{
Bucket: pbBucket,
}, nil
}
bucket, err = endpoint.metainfo.CreateBucket(ctx, bucket)
if err != nil {
return nil, Error.Wrap(err)
}
// create the bucket
if storj.ErrBucketNotFound.Has(err) {
bucket, err := convertProtoToBucket(req, keyInfo.ProjectID)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
return &pb.BucketCreateResponse{
Bucket: convertBucketToProto(ctx, bucket),
}, nil
bucket, err = endpoint.metainfo.CreateBucket(ctx, bucket)
if err != nil {
return nil, Error.Wrap(err)
}
convBucket, err := convertBucketToProto(ctx, bucket)
if err != nil {
return resp, err
}
return &pb.BucketCreateResponse{
Bucket: convBucket,
}, nil
}
return nil, Error.Wrap(err)
}
// DeleteBucket deletes a bucket
@ -805,7 +847,7 @@ func (endpoint *Endpoint) SetBucketAttribution(context.Context, *pb.BucketSetAtt
return resp, status.Error(codes.Unimplemented, "not implemented")
}
func convertProtoToBucket(req *pb.BucketCreateRequest, projectID uuid.UUID) (storj.Bucket, error) {
func convertProtoToBucket(req *pb.BucketCreateRequest, projectID uuid.UUID) (bucket storj.Bucket, err error) {
bucketID, err := uuid.New()
if err != nil {
return storj.Bucket{}, err
@ -813,10 +855,21 @@ func convertProtoToBucket(req *pb.BucketCreateRequest, projectID uuid.UUID) (sto
defaultRS := req.GetDefaultRedundancyScheme()
defaultEP := req.GetDefaultEncryptionParameters()
var partnerID uuid.UUID
err = partnerID.UnmarshalJSON(req.GetPartnerId())
// bucket's partnerID should never be set
// it is always read back from buckets DB
if err != nil && !partnerID.IsZero() {
return bucket, errs.New("Invalid uuid")
}
return storj.Bucket{
ID: *bucketID,
Name: string(req.GetName()),
ProjectID: projectID,
PartnerID: partnerID,
PathCipher: storj.CipherSuite(req.GetPathCipher()),
DefaultSegmentsSize: req.GetDefaultSegmentSize(),
DefaultRedundancyScheme: storj.RedundancyScheme{
@ -834,11 +887,16 @@ func convertProtoToBucket(req *pb.BucketCreateRequest, projectID uuid.UUID) (sto
}, nil
}
func convertBucketToProto(ctx context.Context, bucket storj.Bucket) (pbBucket *pb.Bucket) {
func convertBucketToProto(ctx context.Context, bucket storj.Bucket) (pbBucket *pb.Bucket, err error) {
rs := bucket.DefaultRedundancyScheme
partnerID, err := bucket.PartnerID.MarshalJSON()
if err != nil {
return pbBucket, status.Errorf(codes.Internal, "UUID marshal error")
}
return &pb.Bucket{
Name: []byte(bucket.Name),
PathCipher: pb.CipherSuite(int(bucket.PathCipher)),
PartnerId: partnerID,
CreatedAt: bucket.Created,
DefaultSegmentSize: bucket.DefaultSegmentsSize,
DefaultRedundancyScheme: &pb.RedundancyScheme{
@ -853,7 +911,7 @@ func convertBucketToProto(ctx context.Context, bucket storj.Bucket) (pbBucket *p
CipherSuite: pb.CipherSuite(int(bucket.DefaultEncryptionParameters.CipherSuite)),
BlockSize: int64(bucket.DefaultEncryptionParameters.BlockSize),
},
}
}, nil
}
// BeginObject begins object

View File

@ -182,6 +182,12 @@ func (s *Service) GetBucket(ctx context.Context, bucketName []byte, projectID uu
return s.bucketsDB.GetBucket(ctx, bucketName, projectID)
}
// UpdateBucket returns an updated bucket in the buckets db
func (s *Service) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
return s.bucketsDB.UpdateBucket(ctx, bucket)
}
// DeleteBucket deletes a bucket from the bucekts db
func (s *Service) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stretchr/testify/require"
"storj.io/storj/internal/testcontext"
@ -87,5 +88,21 @@ func TestUsers(t *testing.T) {
DefaultSegmentsSize: int64(100),
})
require.NoError(t, err)
// update a bucket with partnerID
bucket, err := db.Buckets().UpdateBucket(ctx, storj.Bucket{
ID: testrand.UUID(),
Name: "testbucket",
ProjectID: proj.ID,
PartnerID: proj.ID,
Created: time.Now(),
PathCipher: storj.EncAESGCM,
DefaultSegmentsSize: int64(100),
})
require.NoError(t, err)
bucket, err = db.Buckets().GetBucket(ctx, []byte("testbucket"), proj.ID)
require.NoError(t, err)
flag := uuid.Equal(bucket.PartnerID, proj.ID)
require.True(t, flag)
})
}

View File

@ -28,6 +28,14 @@ func (db *DB) Buckets() metainfo.BucketsDB {
// CreateBucket creates a new bucket
func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
partnerID := dbx.BucketMetainfo_Create_Fields{}
if !bucket.PartnerID.IsZero() {
partnerID = dbx.BucketMetainfo_Create_Fields{
PartnerId: dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:]),
}
}
row, err := db.db.Create_BucketMetainfo(ctx,
dbx.BucketMetainfo_Id(bucket.ID[:]),
dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]),
@ -42,9 +50,7 @@ func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ s
dbx.BucketMetainfo_DefaultRedundancyRepairShares(int(bucket.DefaultRedundancyScheme.RepairShares)),
dbx.BucketMetainfo_DefaultRedundancyOptimalShares(int(bucket.DefaultRedundancyScheme.OptimalShares)),
dbx.BucketMetainfo_DefaultRedundancyTotalShares(int(bucket.DefaultRedundancyScheme.TotalShares)),
dbx.BucketMetainfo_Create_Fields{
PartnerId: dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:]),
},
partnerID,
)
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
@ -73,6 +79,24 @@ func (db *bucketsDB) GetBucket(ctx context.Context, bucketName []byte, projectID
return convertDBXtoBucket(dbxBucket)
}
// UpdateBucket upates a bucket
func (db *bucketsDB) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
if bucket.PartnerID.IsZero() {
return storj.Bucket{}, Error.New("partnerId is zero")
}
var updateFields dbx.BucketMetainfo_Update_Fields
updateFields.PartnerId = dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:])
dbxBucket, err := db.db.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]), dbx.BucketMetainfo_Name([]byte(bucket.Name)), updateFields)
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
return convertDBXtoBucket(dbxBucket)
}
// DeleteBucket deletes a bucket
func (db *bucketsDB) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
defer mon.Task()(&ctx)(&err)
@ -166,13 +190,14 @@ func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listO
func convertDBXtoBucket(dbxBucket *dbx.BucketMetainfo) (bucket storj.Bucket, err error) {
id, err := bytesToUUID(dbxBucket.Id)
if err != nil {
return bucket, err
return bucket, storj.ErrBucket.Wrap(err)
}
project, err := bytesToUUID(dbxBucket.ProjectId)
if err != nil {
return bucket, err
return bucket, storj.ErrBucket.Wrap(err)
}
return storj.Bucket{
bucket = storj.Bucket{
ID: id,
Name: string(dbxBucket.Name),
ProjectID: project,
@ -191,5 +216,15 @@ func convertDBXtoBucket(dbxBucket *dbx.BucketMetainfo) (bucket storj.Bucket, err
CipherSuite: storj.CipherSuite(dbxBucket.DefaultEncryptionCipherSuite),
BlockSize: int32(dbxBucket.DefaultEncryptionBlockSize),
},
}, nil
}
if dbxBucket.PartnerId != nil {
partnerID, err := bytesToUUID(dbxBucket.PartnerId)
if err != nil {
return bucket, storj.ErrBucket.Wrap(err)
}
bucket.PartnerID = partnerID
}
return bucket, nil
}

View File

@ -767,7 +767,7 @@ model bucket_metainfo (
field id blob
field project_id project.id restrict
field name blob
field partner_id blob (nullable)
field partner_id blob (nullable, updatable)
field path_cipher int
@ -787,6 +787,10 @@ model bucket_metainfo (
)
create bucket_metainfo ()
update bucket_metainfo (
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo

View File

@ -4298,6 +4298,7 @@ type BucketMetainfo_Create_Fields struct {
}
type BucketMetainfo_Update_Fields struct {
PartnerId BucketMetainfo_PartnerId_Field
DefaultSegmentSize BucketMetainfo_DefaultSegmentSize_Field
DefaultEncryptionCipherSuite BucketMetainfo_DefaultEncryptionCipherSuite_Field
DefaultEncryptionBlockSize BucketMetainfo_DefaultEncryptionBlockSize_Field
@ -8424,6 +8425,92 @@ func (obj *postgresImpl) Update_Offer_By_Id(ctx context.Context,
return offer, nil
}
func (obj *postgresImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field,
update BucketMetainfo_Update_Fields) (
bucket_metainfo *BucketMetainfo, err error) {
var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")}}
__sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{}
var __args []interface{}
if update.PartnerId._set {
__values = append(__values, update.PartnerId.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?"))
}
if update.DefaultSegmentSize._set {
__values = append(__values, update.DefaultSegmentSize.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?"))
}
if update.DefaultEncryptionCipherSuite._set {
__values = append(__values, update.DefaultEncryptionCipherSuite.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?"))
}
if update.DefaultEncryptionBlockSize._set {
__values = append(__values, update.DefaultEncryptionBlockSize.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?"))
}
if update.DefaultRedundancyAlgorithm._set {
__values = append(__values, update.DefaultRedundancyAlgorithm.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?"))
}
if update.DefaultRedundancyShareSize._set {
__values = append(__values, update.DefaultRedundancyShareSize.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?"))
}
if update.DefaultRedundancyRequiredShares._set {
__values = append(__values, update.DefaultRedundancyRequiredShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?"))
}
if update.DefaultRedundancyRepairShares._set {
__values = append(__values, update.DefaultRedundancyRepairShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?"))
}
if update.DefaultRedundancyOptimalShares._set {
__values = append(__values, update.DefaultRedundancyOptimalShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?"))
}
if update.DefaultRedundancyTotalShares._set {
__values = append(__values, update.DefaultRedundancyTotalShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?"))
}
if len(__sets_sql.SQLs) == 0 {
return nil, emptyUpdate()
}
__args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
__values = append(__values, __args...)
__sets.SQL = __sets_sql
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
bucket_metainfo = &BucketMetainfo{}
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, obj.makeErr(err)
}
return bucket_metainfo, nil
}
func (obj *postgresImpl) Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
value_attribution_project_id ValueAttribution_ProjectId_Field,
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
@ -12277,6 +12364,102 @@ func (obj *sqlite3Impl) Update_Offer_By_Id(ctx context.Context,
return offer, nil
}
func (obj *sqlite3Impl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field,
update BucketMetainfo_Update_Fields) (
bucket_metainfo *BucketMetainfo, err error) {
var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")}}
__sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{}
var __args []interface{}
if update.PartnerId._set {
__values = append(__values, update.PartnerId.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?"))
}
if update.DefaultSegmentSize._set {
__values = append(__values, update.DefaultSegmentSize.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?"))
}
if update.DefaultEncryptionCipherSuite._set {
__values = append(__values, update.DefaultEncryptionCipherSuite.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?"))
}
if update.DefaultEncryptionBlockSize._set {
__values = append(__values, update.DefaultEncryptionBlockSize.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?"))
}
if update.DefaultRedundancyAlgorithm._set {
__values = append(__values, update.DefaultRedundancyAlgorithm.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?"))
}
if update.DefaultRedundancyShareSize._set {
__values = append(__values, update.DefaultRedundancyShareSize.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?"))
}
if update.DefaultRedundancyRequiredShares._set {
__values = append(__values, update.DefaultRedundancyRequiredShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?"))
}
if update.DefaultRedundancyRepairShares._set {
__values = append(__values, update.DefaultRedundancyRepairShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?"))
}
if update.DefaultRedundancyOptimalShares._set {
__values = append(__values, update.DefaultRedundancyOptimalShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?"))
}
if update.DefaultRedundancyTotalShares._set {
__values = append(__values, update.DefaultRedundancyTotalShares.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?"))
}
if len(__sets_sql.SQLs) == 0 {
return nil, emptyUpdate()
}
__args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
__values = append(__values, __args...)
__sets.SQL = __sets_sql
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
bucket_metainfo = &BucketMetainfo{}
_, err = obj.driver.Exec(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
var __embed_stmt_get = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, obj.makeErr(err)
}
return bucket_metainfo, nil
}
func (obj *sqlite3Impl) Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
value_attribution_project_id ValueAttribution_ProjectId_Field,
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
@ -14587,6 +14770,18 @@ func (rx *Rx) Update_ApiKey_By_Id(ctx context.Context,
return tx.Update_ApiKey_By_Id(ctx, api_key_id, update)
}
func (rx *Rx) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field,
update BucketMetainfo_Update_Fields) (
bucket_metainfo *BucketMetainfo, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name, update)
}
func (rx *Rx) Update_CertRecord_By_Id(ctx context.Context,
certRecord_id CertRecord_Id_Field,
update CertRecord_Update_Fields) (
@ -15216,6 +15411,12 @@ type Methods interface {
update ApiKey_Update_Fields) (
api_key *ApiKey, err error)
Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field,
update BucketMetainfo_Update_Fields) (
bucket_metainfo *BucketMetainfo, err error)
Update_CertRecord_By_Id(ctx context.Context,
certRecord_id CertRecord_Id_Field,
update CertRecord_Update_Fields) (

View File

@ -117,6 +117,13 @@ func (m *lockedBuckets) ListBuckets(ctx context.Context, projectID uuid.UUID, li
return m.db.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
}
// UpdateBucket updates an existing bucket
func (m *lockedBuckets) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
m.Lock()
defer m.Unlock()
return m.db.UpdateBucket(ctx, bucket)
}
// CertDB returns database for storing uplink's public key & ID
func (m *locked) CertDB() certdb.DB {
m.Lock()

View File

@ -75,14 +75,19 @@ func (projects *projects) Insert(ctx context.Context, project *console.Project)
return nil, err
}
partnerID := dbx.Project_Create_Fields{}
if !project.PartnerID.IsZero() {
partnerID = dbx.Project_Create_Fields{
PartnerId: dbx.Project_PartnerId(project.PartnerID[:]),
}
}
createdProject, err := projects.db.Create_Project(ctx,
dbx.Project_Id(projectID[:]),
dbx.Project_Name(project.Name),
dbx.Project_Description(project.Description),
dbx.Project_UsageLimit(0),
dbx.Project_Create_Fields{
PartnerId: dbx.Project_PartnerId(project.PartnerID[:]),
},
partnerID,
)
if err != nil {

View File

@ -236,19 +236,26 @@ func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoR
}
// CreateBucket creates a new bucket
func (client *Client) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
func (client *Client) CreateBucket(ctx context.Context, bucket storj.Bucket) (respBucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
req := convertBucketToProtoRequest(bucket)
req, err := convertBucketToProtoRequest(bucket)
if err != nil {
return respBucket, Error.Wrap(err)
}
resp, err := client.client.CreateBucket(ctx, &req)
if err != nil {
return storj.Bucket{}, Error.Wrap(err)
}
return convertProtoToBucket(resp.Bucket), nil
respBucket, err = convertProtoToBucket(resp.Bucket)
if err != nil {
return respBucket, Error.Wrap(err)
}
return respBucket, nil
}
// GetBucket returns a bucket
func (client *Client) GetBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) {
func (client *Client) GetBucket(ctx context.Context, bucketName string) (respBucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
resp, err := client.client.GetBucket(ctx, &pb.BucketGetRequest{Name: []byte(bucketName)})
if err != nil {
@ -257,7 +264,12 @@ func (client *Client) GetBucket(ctx context.Context, bucketName string) (_ storj
}
return storj.Bucket{}, Error.Wrap(err)
}
return convertProtoToBucket(resp.Bucket), nil
respBucket, err = convertProtoToBucket(resp.Bucket)
if err != nil {
return respBucket, Error.Wrap(err)
}
return respBucket, nil
}
// DeleteBucket deletes a bucket
@ -298,11 +310,16 @@ func (client *Client) ListBuckets(ctx context.Context, listOpts storj.BucketList
return resultBucketList, nil
}
func convertBucketToProtoRequest(bucket storj.Bucket) pb.BucketCreateRequest {
func convertBucketToProtoRequest(bucket storj.Bucket) (bucketReq pb.BucketCreateRequest, err error) {
rs := bucket.DefaultRedundancyScheme
partnerID, err := bucket.PartnerID.MarshalJSON()
if err != nil {
return bucketReq, Error.Wrap(err)
}
return pb.BucketCreateRequest{
Name: []byte(bucket.Name),
PathCipher: pb.CipherSuite(bucket.PathCipher),
PartnerId: partnerID,
DefaultSegmentSize: bucket.DefaultSegmentsSize,
DefaultRedundancyScheme: &pb.RedundancyScheme{
Type: pb.RedundancyScheme_SchemeType(rs.Algorithm),
@ -316,14 +333,20 @@ func convertBucketToProtoRequest(bucket storj.Bucket) pb.BucketCreateRequest {
CipherSuite: pb.CipherSuite(bucket.DefaultEncryptionParameters.CipherSuite),
BlockSize: int64(bucket.DefaultEncryptionParameters.BlockSize),
},
}
}, nil
}
func convertProtoToBucket(pbBucket *pb.Bucket) storj.Bucket {
func convertProtoToBucket(pbBucket *pb.Bucket) (bucket storj.Bucket, err error) {
defaultRS := pbBucket.GetDefaultRedundancyScheme()
defaultEP := pbBucket.GetDefaultEncryptionParameters()
var partnerID uuid.UUID
err = partnerID.UnmarshalJSON(pbBucket.GetPartnerId())
if err != nil && !partnerID.IsZero() {
return bucket, errs.New("Invalid uuid")
}
return storj.Bucket{
Name: string(pbBucket.GetName()),
PartnerID: partnerID,
PathCipher: storj.CipherSuite(pbBucket.GetPathCipher()),
Created: pbBucket.GetCreatedAt(),
DefaultSegmentsSize: pbBucket.GetDefaultSegmentSize(),
@ -339,7 +362,7 @@ func convertProtoToBucket(pbBucket *pb.Bucket) storj.Bucket {
CipherSuite: storj.CipherSuite(defaultEP.CipherSuite),
BlockSize: int32(defaultEP.BlockSize),
},
}
}, nil
}
// BeginObject begins object creation