storj/satellite/satellitedb/buckets.go
paul cannon b5ddfc6fa5 satellite/satellitedb: unexport satellitedb.DB
Backstory: I needed a better way to pass around information about the
underlying driver and implementation to all the various db-using things
in satellitedb (at least until some new "cockroach driver" support makes
it to DBX). After hitting a few dead ends, I decided I wanted to have a
type that could act like a *dbx.DB but which would also carry
information about the implementation, etc. Then I could pass around that
type to all the things in satellitedb that previously wanted *dbx.DB.

But then I realized that *satellitedb.DB was, essentially, exactly that
already.

One thing that might have kept *satellitedb.DB from being directly
usable was that embedding a *dbx.DB inside it would make a lot of dbx
methods publicly available on a *satellitedb.DB instance that previously
were nicely encapsulated and hidden. But after a quick look, I realized
that _nothing_ outside of satellite/satellitedb even needs to use
satellitedb.DB at all. It didn't even need to be exported, except for
some trivially-replaceable code in migrate_postgres_test.go. And once
I made it unexported, any concerns about exposing new methods on it were
entirely moot.

So I have here changed the exported *satellitedb.DB type into the
unexported *satellitedb.satelliteDB type, and I have changed all the
places here that wanted raw dbx.DB handles to use this new type instead.
Now they can just take a gander at the implementation member on it and
know all they need to know about the underlying database.

This will make it possible for some other pending code here to
differentiate between postgres and cockroach backends.

Change-Id: I27af99f8ae23b50782333da5277b553b34634edc
2019-12-16 19:09:30 +00:00

232 lines
7.8 KiB
Go

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"database/sql"
"errors"
"github.com/skyrings/skyring-common/tools/uuid"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/storj"
"storj.io/storj/private/dbutil"
"storj.io/storj/satellite/metainfo"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
type bucketsDB struct {
db *satelliteDB
}
// Buckets returns database for interacting with buckets
func (db *satelliteDB) Buckets() metainfo.BucketsDB {
return &bucketsDB{db: db}
}
// CreateBucket creates a new bucket
func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
partnerID := dbx.BucketMetainfo_Create_Fields{}
if !bucket.PartnerID.IsZero() {
partnerID = dbx.BucketMetainfo_Create_Fields{
PartnerId: dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:]),
}
}
row, err := db.db.Create_BucketMetainfo(ctx,
dbx.BucketMetainfo_Id(bucket.ID[:]),
dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]),
dbx.BucketMetainfo_Name([]byte(bucket.Name)),
dbx.BucketMetainfo_PathCipher(int(bucket.PathCipher)),
dbx.BucketMetainfo_DefaultSegmentSize(int(bucket.DefaultSegmentsSize)),
dbx.BucketMetainfo_DefaultEncryptionCipherSuite(int(bucket.DefaultEncryptionParameters.CipherSuite)),
dbx.BucketMetainfo_DefaultEncryptionBlockSize(int(bucket.DefaultEncryptionParameters.BlockSize)),
dbx.BucketMetainfo_DefaultRedundancyAlgorithm(int(bucket.DefaultRedundancyScheme.Algorithm)),
dbx.BucketMetainfo_DefaultRedundancyShareSize(int(bucket.DefaultRedundancyScheme.ShareSize)),
dbx.BucketMetainfo_DefaultRedundancyRequiredShares(int(bucket.DefaultRedundancyScheme.RequiredShares)),
dbx.BucketMetainfo_DefaultRedundancyRepairShares(int(bucket.DefaultRedundancyScheme.RepairShares)),
dbx.BucketMetainfo_DefaultRedundancyOptimalShares(int(bucket.DefaultRedundancyScheme.OptimalShares)),
dbx.BucketMetainfo_DefaultRedundancyTotalShares(int(bucket.DefaultRedundancyScheme.TotalShares)),
partnerID,
)
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
bucket, err = convertDBXtoBucket(row)
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
return bucket, nil
}
// GetBucket returns a bucket
func (db *bucketsDB) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
dbxBucket, err := db.db.Get_BucketMetainfo_By_ProjectId_And_Name(ctx,
dbx.BucketMetainfo_ProjectId(projectID[:]),
dbx.BucketMetainfo_Name(bucketName),
)
if err != nil {
if err == sql.ErrNoRows {
return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err)
}
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
return convertDBXtoBucket(dbxBucket)
}
// UpdateBucket upates a bucket
func (db *bucketsDB) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
if bucket.PartnerID.IsZero() {
return storj.Bucket{}, Error.New("partnerId is zero")
}
var updateFields dbx.BucketMetainfo_Update_Fields
updateFields.PartnerId = dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:])
dbxBucket, err := db.db.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]), dbx.BucketMetainfo_Name([]byte(bucket.Name)), updateFields)
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
return convertDBXtoBucket(dbxBucket)
}
// DeleteBucket deletes a bucket
func (db *bucketsDB) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = db.db.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx,
dbx.BucketMetainfo_ProjectId(projectID[:]),
dbx.BucketMetainfo_Name(bucketName),
)
if err != nil {
return storj.ErrBucket.Wrap(err)
}
return nil
}
// ListBuckets returns a list of buckets for a project
func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err)
const defaultListLimit = 10000
if listOpts.Limit < 1 {
listOpts.Limit = defaultListLimit
}
limit := listOpts.Limit + 1 // add one to detect More
for {
var dbxBuckets []*dbx.BucketMetainfo
switch listOpts.Direction {
// For simplictiy we are only supporting the forward direction for listing buckets
case storj.Forward:
dbxBuckets, err = db.db.Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx,
dbx.BucketMetainfo_ProjectId(projectID[:]),
dbx.BucketMetainfo_Name([]byte(listOpts.Cursor)),
limit,
0,
)
// After is only called by BucketListOptions.NextPage and is the paginated Forward direction
case storj.After:
dbxBuckets, err = db.db.Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx,
dbx.BucketMetainfo_ProjectId(projectID[:]),
dbx.BucketMetainfo_Name([]byte(listOpts.Cursor)),
limit,
0,
)
default:
return bucketList, errors.New("unknown list direction")
}
if err != nil {
return bucketList, storj.ErrBucket.Wrap(err)
}
bucketList.More = len(dbxBuckets) > listOpts.Limit
if bucketList.More {
// If there are more buckets than listOpts.limit returned,
// then remove the extra buckets so that we do not return
// more then the limit
dbxBuckets = dbxBuckets[0:listOpts.Limit]
}
if bucketList.Items == nil {
bucketList.Items = make([]storj.Bucket, 0, len(dbxBuckets))
}
for _, dbxBucket := range dbxBuckets {
// Check that the bucket is allowed to be viewed
_, bucketAllowed := allowedBuckets.Buckets[string(dbxBucket.Name)]
if bucketAllowed || allowedBuckets.All {
item, err := convertDBXtoBucket(dbxBucket)
if err != nil {
return bucketList, storj.ErrBucket.Wrap(err)
}
bucketList.Items = append(bucketList.Items, item)
}
}
if len(bucketList.Items) < listOpts.Limit && bucketList.More {
// If we filtered out disallowed buckets, then get more buckets
// out of database so that we return `limit` number of buckets
listOpts = storj.BucketListOptions{
Cursor: string(dbxBuckets[len(dbxBuckets)-1].Name),
Limit: listOpts.Limit,
Direction: storj.After,
}
continue
}
break
}
return bucketList, nil
}
func convertDBXtoBucket(dbxBucket *dbx.BucketMetainfo) (bucket storj.Bucket, err error) {
id, err := dbutil.BytesToUUID(dbxBucket.Id)
if err != nil {
return bucket, storj.ErrBucket.Wrap(err)
}
project, err := dbutil.BytesToUUID(dbxBucket.ProjectId)
if err != nil {
return bucket, storj.ErrBucket.Wrap(err)
}
bucket = storj.Bucket{
ID: id,
Name: string(dbxBucket.Name),
ProjectID: project,
Created: dbxBucket.CreatedAt,
PathCipher: storj.CipherSuite(dbxBucket.PathCipher),
DefaultSegmentsSize: int64(dbxBucket.DefaultSegmentSize),
DefaultRedundancyScheme: storj.RedundancyScheme{
Algorithm: storj.RedundancyAlgorithm(dbxBucket.DefaultRedundancyAlgorithm),
ShareSize: int32(dbxBucket.DefaultRedundancyShareSize),
RequiredShares: int16(dbxBucket.DefaultRedundancyRequiredShares),
RepairShares: int16(dbxBucket.DefaultRedundancyRepairShares),
OptimalShares: int16(dbxBucket.DefaultRedundancyOptimalShares),
TotalShares: int16(dbxBucket.DefaultRedundancyTotalShares),
},
DefaultEncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.CipherSuite(dbxBucket.DefaultEncryptionCipherSuite),
BlockSize: int32(dbxBucket.DefaultEncryptionBlockSize),
},
}
if dbxBucket.PartnerId != nil {
partnerID, err := dbutil.BytesToUUID(dbxBucket.PartnerId)
if err != nil {
return bucket, storj.ErrBucket.Wrap(err)
}
bucket.PartnerID = partnerID
}
return bucket, nil
}