2019-07-08 23:32:18 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-07-12 13:57:02 +01:00
|
|
|
"database/sql"
|
2019-07-08 23:32:18 +01:00
|
|
|
"errors"
|
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/macaroon"
|
|
|
|
"storj.io/common/storj"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2021-11-12 20:47:41 +00:00
|
|
|
"storj.io/storj/satellite/buckets"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2020-01-15 02:29:51 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/dbx"
|
2019-07-08 23:32:18 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type bucketsDB struct {
|
2019-12-14 02:29:54 +00:00
|
|
|
db *satelliteDB
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
// CreateBucket creates a new bucket.
|
2023-04-13 13:04:07 +01:00
|
|
|
func (db *bucketsDB) CreateBucket(ctx context.Context, bucket buckets.Bucket) (_ buckets.Bucket, err error) {
|
2019-07-08 23:32:18 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-07-19 16:17:34 +01:00
|
|
|
|
2021-10-27 09:50:27 +01:00
|
|
|
optionalFields := dbx.BucketMetainfo_Create_Fields{}
|
2021-09-23 00:38:18 +01:00
|
|
|
if !bucket.PartnerID.IsZero() || bucket.UserAgent != nil {
|
2021-10-27 09:50:27 +01:00
|
|
|
optionalFields = dbx.BucketMetainfo_Create_Fields{
|
2019-07-19 16:17:34 +01:00
|
|
|
PartnerId: dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:]),
|
2021-09-23 00:38:18 +01:00
|
|
|
UserAgent: dbx.BucketMetainfo_UserAgent(bucket.UserAgent),
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
|
|
|
}
|
2021-10-27 09:50:27 +01:00
|
|
|
optionalFields.Placement = dbx.BucketMetainfo_Placement(int(bucket.Placement))
|
2019-07-19 16:17:34 +01:00
|
|
|
|
2019-07-08 23:32:18 +01:00
|
|
|
row, err := db.db.Create_BucketMetainfo(ctx,
|
|
|
|
dbx.BucketMetainfo_Id(bucket.ID[:]),
|
|
|
|
dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name([]byte(bucket.Name)),
|
|
|
|
dbx.BucketMetainfo_PathCipher(int(bucket.PathCipher)),
|
|
|
|
dbx.BucketMetainfo_DefaultSegmentSize(int(bucket.DefaultSegmentsSize)),
|
|
|
|
dbx.BucketMetainfo_DefaultEncryptionCipherSuite(int(bucket.DefaultEncryptionParameters.CipherSuite)),
|
|
|
|
dbx.BucketMetainfo_DefaultEncryptionBlockSize(int(bucket.DefaultEncryptionParameters.BlockSize)),
|
|
|
|
dbx.BucketMetainfo_DefaultRedundancyAlgorithm(int(bucket.DefaultRedundancyScheme.Algorithm)),
|
|
|
|
dbx.BucketMetainfo_DefaultRedundancyShareSize(int(bucket.DefaultRedundancyScheme.ShareSize)),
|
|
|
|
dbx.BucketMetainfo_DefaultRedundancyRequiredShares(int(bucket.DefaultRedundancyScheme.RequiredShares)),
|
|
|
|
dbx.BucketMetainfo_DefaultRedundancyRepairShares(int(bucket.DefaultRedundancyScheme.RepairShares)),
|
|
|
|
dbx.BucketMetainfo_DefaultRedundancyOptimalShares(int(bucket.DefaultRedundancyScheme.OptimalShares)),
|
|
|
|
dbx.BucketMetainfo_DefaultRedundancyTotalShares(int(bucket.DefaultRedundancyScheme.TotalShares)),
|
2021-10-27 09:50:27 +01:00
|
|
|
optionalFields,
|
2019-07-08 23:32:18 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.Bucket{}, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bucket, err = convertDBXtoBucket(row)
|
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.Bucket{}, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
return bucket, nil
|
|
|
|
}
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
// GetBucket returns a bucket.
|
2023-04-13 13:04:07 +01:00
|
|
|
func (db *bucketsDB) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ buckets.Bucket, err error) {
|
2019-07-08 23:32:18 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
dbxBucket, err := db.db.Get_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name(bucketName),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.Bucket{}, buckets.ErrBucketNotFound.New("%s", bucketName)
|
2019-07-12 13:57:02 +01:00
|
|
|
}
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.Bucket{}, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
return convertDBXtoBucket(dbxBucket)
|
|
|
|
}
|
|
|
|
|
2021-10-27 09:50:27 +01:00
|
|
|
// GetBucketPlacement returns with the placement constraint identifier.
|
|
|
|
func (db *bucketsDB) GetBucketPlacement(ctx context.Context, bucketName []byte, projectID uuid.UUID) (placement storj.PlacementConstraint, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
dbxPlacement, err := db.db.Get_BucketMetainfo_Placement_By_ProjectId_And_Name(ctx,
|
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name(bucketName),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2023-04-13 13:04:07 +01:00
|
|
|
return storj.EveryCountry, buckets.ErrBucketNotFound.New("%s", bucketName)
|
2021-10-27 09:50:27 +01:00
|
|
|
}
|
2023-04-13 13:04:07 +01:00
|
|
|
return storj.EveryCountry, buckets.ErrBucket.Wrap(err)
|
2021-10-27 09:50:27 +01:00
|
|
|
}
|
|
|
|
placement = storj.EveryCountry
|
|
|
|
if dbxPlacement.Placement != nil {
|
|
|
|
placement = storj.PlacementConstraint(*dbxPlacement.Placement)
|
|
|
|
}
|
|
|
|
|
|
|
|
return placement, nil
|
|
|
|
}
|
|
|
|
|
2021-10-18 09:54:59 +01:00
|
|
|
// GetMinimalBucket returns existing bucket with minimal number of fields.
|
2023-04-13 13:04:07 +01:00
|
|
|
func (db *bucketsDB) GetMinimalBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ buckets.MinimalBucket, err error) {
|
2021-10-18 09:54:59 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
row, err := db.db.Get_BucketMetainfo_CreatedAt_By_ProjectId_And_Name(ctx,
|
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name(bucketName),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.MinimalBucket{}, buckets.ErrBucketNotFound.New("%s", bucketName)
|
2021-10-18 09:54:59 +01:00
|
|
|
}
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.MinimalBucket{}, buckets.ErrBucket.Wrap(err)
|
2021-10-18 09:54:59 +01:00
|
|
|
}
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.MinimalBucket{
|
2021-10-18 09:54:59 +01:00
|
|
|
Name: bucketName,
|
|
|
|
CreatedAt: row.CreatedAt,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-04-02 17:19:17 +01:00
|
|
|
// HasBucket returns if a bucket exists.
|
|
|
|
func (db *bucketsDB) HasBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (exists bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
exists, err = db.db.Has_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name(bucketName),
|
|
|
|
)
|
2023-04-13 13:04:07 +01:00
|
|
|
return exists, buckets.ErrBucket.Wrap(err)
|
2021-04-02 17:19:17 +01:00
|
|
|
}
|
|
|
|
|
2020-06-25 15:47:44 +01:00
|
|
|
// GetBucketID returns an existing bucket id.
|
2020-07-24 18:13:15 +01:00
|
|
|
func (db *bucketsDB) GetBucketID(ctx context.Context, bucket metabase.BucketLocation) (_ uuid.UUID, err error) {
|
2020-06-25 15:47:44 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
dbxID, err := db.db.Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx,
|
2020-07-24 18:13:15 +01:00
|
|
|
dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name([]byte(bucket.BucketName)),
|
2020-06-25 15:47:44 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2023-04-13 13:04:07 +01:00
|
|
|
return uuid.UUID{}, buckets.ErrBucketNotFound.New("%s", bucket.BucketName)
|
2020-06-25 15:47:44 +01:00
|
|
|
}
|
2023-04-13 13:04:07 +01:00
|
|
|
return uuid.UUID{}, buckets.ErrBucket.Wrap(err)
|
2020-06-25 15:47:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
id, err := uuid.FromBytes(dbxID.Id)
|
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return id, buckets.ErrBucket.Wrap(err)
|
2020-06-25 15:47:44 +01:00
|
|
|
}
|
|
|
|
return id, err
|
|
|
|
}
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
// UpdateBucket updates a bucket.
|
2023-04-13 13:04:07 +01:00
|
|
|
func (db *bucketsDB) UpdateBucket(ctx context.Context, bucket buckets.Bucket) (_ buckets.Bucket, err error) {
|
2019-07-19 16:17:34 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var updateFields dbx.BucketMetainfo_Update_Fields
|
2021-09-23 00:38:18 +01:00
|
|
|
if !bucket.PartnerID.IsZero() {
|
|
|
|
updateFields.PartnerId = dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
if bucket.UserAgent != nil {
|
|
|
|
updateFields.UserAgent = dbx.BucketMetainfo_UserAgent(bucket.UserAgent)
|
|
|
|
}
|
2019-07-19 16:17:34 +01:00
|
|
|
|
2021-11-12 20:47:41 +00:00
|
|
|
updateFields.Placement = dbx.BucketMetainfo_Placement(int(bucket.Placement))
|
|
|
|
|
2019-07-19 16:17:34 +01:00
|
|
|
dbxBucket, err := db.db.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]), dbx.BucketMetainfo_Name([]byte(bucket.Name)), updateFields)
|
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.Bucket{}, buckets.ErrBucket.Wrap(err)
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
|
|
|
return convertDBXtoBucket(dbxBucket)
|
|
|
|
}
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
// DeleteBucket deletes a bucket.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (db *bucketsDB) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-17 17:45:40 +00:00
|
|
|
deleted, err := db.db.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
2019-07-08 23:32:18 +01:00
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name(bucketName),
|
|
|
|
)
|
2019-07-12 13:57:02 +01:00
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.ErrBucket.Wrap(err)
|
2019-07-12 13:57:02 +01:00
|
|
|
}
|
2020-02-17 17:45:40 +00:00
|
|
|
if !deleted {
|
2023-04-13 13:04:07 +01:00
|
|
|
return buckets.ErrBucketNotFound.New("%s", bucketName)
|
2020-02-17 17:45:40 +00:00
|
|
|
}
|
2019-07-12 13:57:02 +01:00
|
|
|
return nil
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
// ListBuckets returns a list of buckets for a project.
|
2023-04-13 13:04:07 +01:00
|
|
|
func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts buckets.ListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList buckets.List, err error) {
|
2019-07-08 23:32:18 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
const defaultListLimit = 10000
|
|
|
|
if listOpts.Limit < 1 {
|
|
|
|
listOpts.Limit = defaultListLimit
|
|
|
|
}
|
|
|
|
limit := listOpts.Limit + 1 // add one to detect More
|
|
|
|
|
|
|
|
for {
|
|
|
|
var dbxBuckets []*dbx.BucketMetainfo
|
|
|
|
switch listOpts.Direction {
|
2019-07-12 13:57:02 +01:00
|
|
|
// For simplictiy we are only supporting the forward direction for listing buckets
|
2023-04-19 13:25:12 +01:00
|
|
|
case buckets.DirectionForward:
|
2019-07-08 23:32:18 +01:00
|
|
|
dbxBuckets, err = db.db.Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx,
|
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name([]byte(listOpts.Cursor)),
|
|
|
|
limit,
|
|
|
|
0,
|
|
|
|
)
|
2019-07-12 13:57:02 +01:00
|
|
|
|
|
|
|
// After is only called by BucketListOptions.NextPage and is the paginated Forward direction
|
2023-04-19 13:25:12 +01:00
|
|
|
case buckets.DirectionAfter:
|
2019-07-12 13:57:02 +01:00
|
|
|
dbxBuckets, err = db.db.Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx,
|
|
|
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketMetainfo_Name([]byte(listOpts.Cursor)),
|
|
|
|
limit,
|
|
|
|
0,
|
|
|
|
)
|
2019-07-08 23:32:18 +01:00
|
|
|
default:
|
|
|
|
return bucketList, errors.New("unknown list direction")
|
|
|
|
}
|
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return bucketList, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bucketList.More = len(dbxBuckets) > listOpts.Limit
|
|
|
|
if bucketList.More {
|
|
|
|
// If there are more buckets than listOpts.limit returned,
|
|
|
|
// then remove the extra buckets so that we do not return
|
|
|
|
// more then the limit
|
|
|
|
dbxBuckets = dbxBuckets[0:listOpts.Limit]
|
|
|
|
}
|
|
|
|
|
|
|
|
if bucketList.Items == nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
bucketList.Items = make([]buckets.Bucket, 0, len(dbxBuckets))
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, dbxBucket := range dbxBuckets {
|
|
|
|
// Check that the bucket is allowed to be viewed
|
2019-07-12 13:57:02 +01:00
|
|
|
_, bucketAllowed := allowedBuckets.Buckets[string(dbxBucket.Name)]
|
|
|
|
if bucketAllowed || allowedBuckets.All {
|
2019-07-08 23:32:18 +01:00
|
|
|
item, err := convertDBXtoBucket(dbxBucket)
|
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return bucketList, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
bucketList.Items = append(bucketList.Items, item)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(bucketList.Items) < listOpts.Limit && bucketList.More {
|
|
|
|
// If we filtered out disallowed buckets, then get more buckets
|
|
|
|
// out of database so that we return `limit` number of buckets
|
2023-04-13 13:04:07 +01:00
|
|
|
listOpts = buckets.ListOptions{
|
2019-07-12 13:57:02 +01:00
|
|
|
Cursor: string(dbxBuckets[len(dbxBuckets)-1].Name),
|
2019-07-08 23:32:18 +01:00
|
|
|
Limit: listOpts.Limit,
|
2023-04-19 13:25:12 +01:00
|
|
|
Direction: buckets.DirectionAfter,
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2019-07-12 13:57:02 +01:00
|
|
|
return bucketList, nil
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
// CountBuckets returns the number of buckets a project currently has.
|
|
|
|
func (db *bucketsDB) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
|
|
|
|
count64, err := db.db.Count_BucketMetainfo_Name_By_ProjectId(ctx, dbx.BucketMetainfo_ProjectId(projectID[:]))
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
return int(count64), nil
|
|
|
|
}
|
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
func convertDBXtoBucket(dbxBucket *dbx.BucketMetainfo) (bucket buckets.Bucket, err error) {
|
2020-03-31 17:49:16 +01:00
|
|
|
id, err := uuid.FromBytes(dbxBucket.Id)
|
2019-07-08 23:32:18 +01:00
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return bucket, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
2020-03-31 17:49:16 +01:00
|
|
|
project, err := uuid.FromBytes(dbxBucket.ProjectId)
|
2019-07-08 23:32:18 +01:00
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return bucket, buckets.ErrBucket.Wrap(err)
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
2019-07-19 16:17:34 +01:00
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
bucket = buckets.Bucket{
|
2020-04-02 15:18:08 +01:00
|
|
|
ID: id,
|
2019-07-08 23:32:18 +01:00
|
|
|
Name: string(dbxBucket.Name),
|
2020-04-02 15:18:08 +01:00
|
|
|
ProjectID: project,
|
2019-07-08 23:32:18 +01:00
|
|
|
Created: dbxBucket.CreatedAt,
|
|
|
|
PathCipher: storj.CipherSuite(dbxBucket.PathCipher),
|
|
|
|
DefaultSegmentsSize: int64(dbxBucket.DefaultSegmentSize),
|
|
|
|
DefaultRedundancyScheme: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.RedundancyAlgorithm(dbxBucket.DefaultRedundancyAlgorithm),
|
|
|
|
ShareSize: int32(dbxBucket.DefaultRedundancyShareSize),
|
|
|
|
RequiredShares: int16(dbxBucket.DefaultRedundancyRequiredShares),
|
|
|
|
RepairShares: int16(dbxBucket.DefaultRedundancyRepairShares),
|
|
|
|
OptimalShares: int16(dbxBucket.DefaultRedundancyOptimalShares),
|
|
|
|
TotalShares: int16(dbxBucket.DefaultRedundancyTotalShares),
|
|
|
|
},
|
|
|
|
DefaultEncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.CipherSuite(dbxBucket.DefaultEncryptionCipherSuite),
|
|
|
|
BlockSize: int32(dbxBucket.DefaultEncryptionBlockSize),
|
|
|
|
},
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
|
|
|
|
2021-11-12 20:47:41 +00:00
|
|
|
if dbxBucket.Placement != nil {
|
|
|
|
bucket.Placement = storj.PlacementConstraint(*dbxBucket.Placement)
|
|
|
|
}
|
|
|
|
|
2019-07-19 16:17:34 +01:00
|
|
|
if dbxBucket.PartnerId != nil {
|
2020-03-31 17:49:16 +01:00
|
|
|
partnerID, err := uuid.FromBytes(dbxBucket.PartnerId)
|
2019-07-19 16:17:34 +01:00
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return bucket, buckets.ErrBucket.Wrap(err)
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
2020-04-02 15:18:08 +01:00
|
|
|
bucket.PartnerID = partnerID
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
|
|
|
|
2021-09-23 00:38:18 +01:00
|
|
|
if dbxBucket.UserAgent != nil {
|
|
|
|
bucket.UserAgent = dbxBucket.UserAgent
|
|
|
|
}
|
|
|
|
|
2019-07-19 16:17:34 +01:00
|
|
|
return bucket, nil
|
2019-07-08 23:32:18 +01:00
|
|
|
}
|
2022-10-05 11:53:02 +01:00
|
|
|
|
|
|
|
// IterateBucketLocations iterates through all buckets from some point with limit.
|
2022-12-06 11:16:55 +00:00
|
|
|
func (db *bucketsDB) IterateBucketLocations(ctx context.Context, projectID uuid.UUID, bucketName string, limit int, fn func([]metabase.BucketLocation) error) (more bool, err error) {
|
2022-10-05 11:53:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var result []metabase.BucketLocation
|
|
|
|
|
2022-12-06 11:16:55 +00:00
|
|
|
moreLimit := limit + 1
|
2022-10-05 11:53:02 +01:00
|
|
|
rows, err := db.db.QueryContext(ctx, `
|
|
|
|
SELECT project_id, name
|
|
|
|
FROM bucket_metainfos
|
|
|
|
WHERE (project_id, name) > ($1, $2)
|
|
|
|
GROUP BY (project_id, name)
|
|
|
|
ORDER BY (project_id, name) ASC LIMIT $3
|
2022-12-06 11:16:55 +00:00
|
|
|
`, projectID, bucketName, moreLimit)
|
2022-10-05 11:53:02 +01:00
|
|
|
if err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return false, buckets.ErrBucket.New("BatchBuckets query error: %s", err)
|
2022-10-05 11:53:02 +01:00
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, Error.Wrap(rows.Close()))
|
|
|
|
}()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var bucketLocation metabase.BucketLocation
|
|
|
|
|
|
|
|
if err = rows.Scan(&bucketLocation.ProjectID, &bucketLocation.BucketName); err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return false, buckets.ErrBucket.New("bucket location scan error: %s", err)
|
2022-10-05 11:53:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
result = append(result, bucketLocation)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = rows.Err(); err != nil {
|
2023-04-13 13:04:07 +01:00
|
|
|
return false, buckets.ErrBucket.Wrap(err)
|
2022-12-06 11:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(result) == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(result) > limit {
|
|
|
|
return true, Error.Wrap(fn(result[:len(result)-1]))
|
2022-10-05 11:53:02 +01:00
|
|
|
}
|
|
|
|
|
2022-12-06 11:16:55 +00:00
|
|
|
return false, Error.Wrap(fn(result))
|
2022-10-05 11:53:02 +01:00
|
|
|
}
|