satellite/metainfo: remove BucketsDB.ListAllBuckets

The ListAllBuckets implementation was buggy, remove it altogether.

Change-Id: Id457ba5f4d793156af3fc2071f74ce1be17ba804
This commit is contained in:
Egon Elbre 2021-02-18 16:07:56 +02:00
parent 8e95e76c35
commit 1cb6376daa
5 changed files with 0 additions and 300 deletions

View File

@ -12,18 +12,6 @@ import (
"storj.io/storj/satellite/metainfo/metabase"
)
// ListAllBucketsCursor defines cursor for ListAllBuckets listing.
type ListAllBucketsCursor struct {
ProjectID uuid.UUID
BucketName []byte
}
// ListAllBucketsOptions defines ListAllBuckets listing options.
type ListAllBucketsOptions struct {
Cursor ListAllBucketsCursor
Limit int
}
// BucketsDB is the interface for the database to interact with buckets.
//
// architecture: Database
@ -40,8 +28,6 @@ type BucketsDB interface {
DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)
// List returns all buckets for a project
ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error)
// ListAllBuckets returns a list of all buckets.
ListAllBuckets(ctx context.Context, listOpts ListAllBucketsOptions) (bucketList storj.BucketList, err error)
// CountBuckets returns the number of buckets a project currently has
CountBuckets(ctx context.Context, projectID uuid.UUID) (int, error)
}

View File

@ -11,7 +11,6 @@ import (
"storj.io/common/macaroon"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/satellitedb/dbx"
)
@ -206,63 +205,6 @@ func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listO
return bucketList, nil
}
// ListAllBuckets returns a list of all buckets.
func (db *bucketsDB) ListAllBuckets(ctx context.Context, listOpts metainfo.ListAllBucketsOptions) (bucketList storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err)
const defaultListLimit = 10000
if listOpts.Limit < 1 || listOpts.Limit > defaultListLimit {
listOpts.Limit = defaultListLimit
}
limit := listOpts.Limit + 1 // add one to detect More
if listOpts.Cursor.BucketName == nil {
listOpts.Cursor.BucketName = []byte{}
}
for {
dbxBuckets, err := db.db.Limited_BucketMetainfo_By_ProjectId_GreaterOrEqual_And_Name_Greater_OrderBy_Asc_ProjectId_Name(ctx,
dbx.BucketMetainfo_ProjectId(listOpts.Cursor.ProjectID[:]),
dbx.BucketMetainfo_Name(listOpts.Cursor.BucketName),
limit,
0,
)
if err != nil {
return bucketList, storj.ErrBucket.Wrap(err)
}
bucketList.More = len(dbxBuckets) > listOpts.Limit
if bucketList.More {
// If there are more buckets than listOpts.limit returned,
// then remove the extra buckets so that we do not return
// more then the limit
dbxBuckets = dbxBuckets[0:listOpts.Limit]
}
if bucketList.Items == nil {
bucketList.Items = make([]storj.Bucket, 0, len(dbxBuckets))
}
for _, dbxBucket := range dbxBuckets {
item, err := convertDBXtoBucket(dbxBucket)
if err != nil {
return bucketList, storj.ErrBucket.Wrap(err)
}
bucketList.Items = append(bucketList.Items, item)
}
if len(bucketList.Items) < listOpts.Limit && bucketList.More {
lastBucket := bucketList.Items[len(bucketList.Items)-1]
listOpts.Cursor.ProjectID = lastBucket.ProjectID
listOpts.Cursor.BucketName = []byte(lastBucket.Name)
continue
}
break
}
return bucketList, nil
}
// CountBuckets returns the number of buckets a project currently has.
func (db *bucketsDB) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
count64, err := db.db.Count_BucketMetainfo_Name_By_ProjectId(ctx, dbx.BucketMetainfo_ProjectId(projectID[:]))

View File

@ -1,103 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb_test
import (
"bytes"
"strconv"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/satellite"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
func TestListAllBuckets(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
// no buckets
list, err := db.Buckets().ListAllBuckets(ctx, metainfo.ListAllBucketsOptions{})
require.NoError(t, err)
require.Equal(t, 0, len(list.Items))
first, err := db.Console().Projects().Insert(ctx, &console.Project{
Name: "first",
ID: testrand.UUID(),
})
require.NoError(t, err)
second, err := db.Console().Projects().Insert(ctx, &console.Project{
Name: "second",
ID: testrand.UUID(),
})
require.NoError(t, err)
projects := []*console.Project{first, second}
if bytes.Compare(first.ID[:], second.ID[:]) > 0 {
projects = []*console.Project{second, first}
}
buckets := make([]storj.Bucket, 10)
for i, project := range projects {
for index := 0; index < (len(buckets) / 2); index++ {
var err error
buckets[index+(i*5)], err = db.Buckets().CreateBucket(ctx, storj.Bucket{
ID: testrand.UUID(),
Name: "bucket-test-" + strconv.Itoa(index),
ProjectID: project.ID,
})
require.NoError(t, err)
}
}
list, err = db.Buckets().ListAllBuckets(ctx, metainfo.ListAllBucketsOptions{})
require.NoError(t, err)
require.Equal(t, len(buckets), len(list.Items))
require.False(t, list.More)
require.Zero(t, cmp.Diff(buckets, list.Items))
list, err = db.Buckets().ListAllBuckets(ctx, metainfo.ListAllBucketsOptions{
Cursor: metainfo.ListAllBucketsCursor{
ProjectID: projects[1].ID,
},
})
require.NoError(t, err)
require.Equal(t, len(buckets)/2, len(list.Items))
require.Zero(t, cmp.Diff(buckets[len(buckets)/2:], list.Items))
list, err = db.Buckets().ListAllBuckets(ctx, metainfo.ListAllBucketsOptions{
Cursor: metainfo.ListAllBucketsCursor{
ProjectID: projects[1].ID,
BucketName: []byte("bucket-test-2"),
},
})
require.NoError(t, err)
require.Equal(t, 2, len(list.Items))
require.False(t, list.More)
require.Zero(t, cmp.Diff(buckets[8:], list.Items))
list, err = db.Buckets().ListAllBuckets(ctx, metainfo.ListAllBucketsOptions{
Cursor: metainfo.ListAllBucketsCursor{
ProjectID: projects[1].ID,
BucketName: []byte("bucket-test-4"),
},
})
require.NoError(t, err)
require.Equal(t, 0, len(list.Items))
list, err = db.Buckets().ListAllBuckets(ctx, metainfo.ListAllBucketsOptions{
Limit: 2,
})
require.NoError(t, err)
require.Equal(t, 2, len(list.Items))
require.True(t, list.More)
require.Zero(t, cmp.Diff(buckets[:2], list.Items))
})
}

View File

@ -1000,13 +1000,6 @@ read limitoffset ( // After
orderby asc bucket_metainfo.name
)
read limitoffset ( // After
select bucket_metainfo
where bucket_metainfo.project_id >= ?
where bucket_metainfo.name > ?
orderby asc bucket_metainfo.project_id bucket_metainfo.name
)
read count (
select bucket_metainfo.name
where bucket_metainfo.project_id = ?

View File

@ -12836,56 +12836,6 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy
}
func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_GreaterOrEqual_And_Name_Greater_OrderBy_Asc_ProjectId_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*BucketMetainfo, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id >= ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.project_id, bucket_metainfos.name LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, bucket_metainfo_project_id_greater_or_equal.value(), bucket_metainfo_name_greater.value())
__values = append(__values, limit, offset)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
for {
rows, err = func() (rows []*BucketMetainfo, err error) {
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
if err != nil {
return nil, err
}
defer __rows.Close()
for __rows.Next() {
bucket_metainfo := &BucketMetainfo{}
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
if err != nil {
return nil, err
}
rows = append(rows, bucket_metainfo)
}
err = __rows.Err()
if err != nil {
return nil, err
}
return rows, nil
}()
if err != nil {
if obj.shouldRetry(err) {
continue
}
return nil, obj.makeErr(err)
}
return rows, nil
}
}
func (obj *pgxImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
count int64, err error) {
@ -19674,56 +19624,6 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate
}
func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_GreaterOrEqual_And_Name_Greater_OrderBy_Asc_ProjectId_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*BucketMetainfo, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id >= ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.project_id, bucket_metainfos.name LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, bucket_metainfo_project_id_greater_or_equal.value(), bucket_metainfo_name_greater.value())
__values = append(__values, limit, offset)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
for {
rows, err = func() (rows []*BucketMetainfo, err error) {
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
if err != nil {
return nil, err
}
defer __rows.Close()
for __rows.Next() {
bucket_metainfo := &BucketMetainfo{}
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
if err != nil {
return nil, err
}
rows = append(rows, bucket_metainfo)
}
err = __rows.Err()
if err != nil {
return nil, err
}
return rows, nil
}()
if err != nil {
if obj.shouldRetry(err) {
continue
}
return nil, obj.makeErr(err)
}
return rows, nil
}
}
func (obj *pgxcockroachImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
count int64, err error) {
@ -24072,18 +23972,6 @@ func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_N
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater, limit, offset)
}
func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_GreaterOrEqual_And_Name_Greater_OrderBy_Asc_ProjectId_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*BucketMetainfo, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Limited_BucketMetainfo_By_ProjectId_GreaterOrEqual_And_Name_Greater_OrderBy_Asc_ProjectId_Name(ctx, bucket_metainfo_project_id_greater_or_equal, bucket_metainfo_name_greater, limit, offset)
}
func (rx *Rx) Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
@ -25153,12 +25041,6 @@ type Methods interface {
limit int, offset int64) (
rows []*BucketMetainfo, err error)
Limited_BucketMetainfo_By_ProjectId_GreaterOrEqual_And_Name_Greater_OrderBy_Asc_ProjectId_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*BucketMetainfo, err error)
Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,