satellite/metainfo: add buckets RPC and database (#2460)
* add db interface and methods, add sa metainfo endpoints and svc * add bucket metainfo svc funcs * add sadb bucekts * bucket list gets all buckets * filter buckets list on macaroon restrictions * update pb cipher suite to be enum * add conversion funcs * updates per comments * bucket settings should say default * add direction to list buckets, add tests * fix test bucket names * lint err * only support forward direction * add comments * minor refactoring * make sure list up to limit * update test * update protolock file * fix lint * change per PR
This commit is contained in:
parent
16156e3b3d
commit
f9696d6c5e
@ -103,10 +103,10 @@ func (p *Project) CreateBucket(ctx context.Context, name string, cfg *BucketConf
|
||||
cfg.setDefaults()
|
||||
|
||||
bucket = storj.Bucket{
|
||||
PathCipher: cfg.PathCipher,
|
||||
EncryptionParameters: cfg.EncryptionParameters,
|
||||
RedundancyScheme: cfg.Volatile.RedundancyScheme,
|
||||
SegmentsSize: cfg.Volatile.SegmentsSize.Int64(),
|
||||
PathCipher: cfg.PathCipher,
|
||||
DefaultEncryptionParameters: cfg.EncryptionParameters,
|
||||
DefaultRedundancyScheme: cfg.Volatile.RedundancyScheme,
|
||||
DefaultSegmentsSize: cfg.Volatile.SegmentsSize.Int64(),
|
||||
}
|
||||
return p.project.CreateBucket(ctx, name, &bucket)
|
||||
}
|
||||
@ -139,10 +139,10 @@ func (p *Project) GetBucketInfo(ctx context.Context, bucket string) (b storj.Buc
|
||||
}
|
||||
cfg := &BucketConfig{
|
||||
PathCipher: b.PathCipher,
|
||||
EncryptionParameters: b.EncryptionParameters,
|
||||
EncryptionParameters: b.DefaultEncryptionParameters,
|
||||
}
|
||||
cfg.Volatile.RedundancyScheme = b.RedundancyScheme
|
||||
cfg.Volatile.SegmentsSize = memory.Size(b.SegmentsSize)
|
||||
cfg.Volatile.RedundancyScheme = b.DefaultRedundancyScheme
|
||||
cfg.Volatile.SegmentsSize = memory.Size(b.DefaultSegmentsSize)
|
||||
return b, cfg, nil
|
||||
}
|
||||
|
||||
@ -258,11 +258,11 @@ func (p *Project) updateBucket(ctx context.Context, bucketInfo storj.Bucket) (bu
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket = storj.Bucket{
|
||||
Attribution: p.uplinkCfg.Volatile.PartnerID,
|
||||
PathCipher: bucketInfo.PathCipher,
|
||||
EncryptionParameters: bucketInfo.EncryptionParameters,
|
||||
RedundancyScheme: bucketInfo.RedundancyScheme,
|
||||
SegmentsSize: bucketInfo.SegmentsSize,
|
||||
Attribution: p.uplinkCfg.Volatile.PartnerID,
|
||||
PathCipher: bucketInfo.PathCipher,
|
||||
DefaultEncryptionParameters: bucketInfo.DefaultEncryptionParameters,
|
||||
DefaultRedundancyScheme: bucketInfo.DefaultRedundancyScheme,
|
||||
DefaultSegmentsSize: bucketInfo.DefaultSegmentsSize,
|
||||
}
|
||||
return p.project.CreateBucket(ctx, bucketInfo.Name, &bucket)
|
||||
}
|
||||
|
@ -15,10 +15,10 @@ func newBucketInfo(bucket *storj.Bucket) C.BucketInfo {
|
||||
name: C.CString(bucket.Name),
|
||||
created: C.int64_t(bucket.Created.Unix()),
|
||||
path_cipher: toCCipherSuite(bucket.PathCipher),
|
||||
segment_size: C.uint64_t(bucket.SegmentsSize),
|
||||
segment_size: C.uint64_t(bucket.DefaultSegmentsSize),
|
||||
|
||||
encryption_parameters: convertEncryptionParameters(&bucket.EncryptionParameters),
|
||||
redundancy_scheme: convertRedundancyScheme(&bucket.RedundancyScheme),
|
||||
encryption_parameters: convertEncryptionParameters(&bucket.DefaultEncryptionParameters),
|
||||
redundancy_scheme: convertRedundancyScheme(&bucket.DefaultRedundancyScheme),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,18 +57,18 @@ func newBucketInfo(bucket storj.Bucket) *BucketInfo {
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created.UTC().UnixNano() / int64(time.Millisecond),
|
||||
PathCipher: byte(bucket.PathCipher),
|
||||
SegmentsSize: bucket.SegmentsSize,
|
||||
SegmentsSize: bucket.DefaultSegmentsSize,
|
||||
RedundancyScheme: &RedundancyScheme{
|
||||
Algorithm: byte(bucket.RedundancyScheme.Algorithm),
|
||||
ShareSize: bucket.RedundancyScheme.ShareSize,
|
||||
RequiredShares: bucket.RedundancyScheme.RequiredShares,
|
||||
RepairShares: bucket.RedundancyScheme.RepairShares,
|
||||
OptimalShares: bucket.RedundancyScheme.OptimalShares,
|
||||
TotalShares: bucket.RedundancyScheme.TotalShares,
|
||||
Algorithm: byte(bucket.DefaultRedundancyScheme.Algorithm),
|
||||
ShareSize: bucket.DefaultRedundancyScheme.ShareSize,
|
||||
RequiredShares: bucket.DefaultRedundancyScheme.RequiredShares,
|
||||
RepairShares: bucket.DefaultRedundancyScheme.RepairShares,
|
||||
OptimalShares: bucket.DefaultRedundancyScheme.OptimalShares,
|
||||
TotalShares: bucket.DefaultRedundancyScheme.TotalShares,
|
||||
},
|
||||
EncryptionParameters: &EncryptionParameters{
|
||||
CipherSuite: byte(bucket.EncryptionParameters.CipherSuite),
|
||||
BlockSize: bucket.EncryptionParameters.BlockSize,
|
||||
CipherSuite: byte(bucket.DefaultEncryptionParameters.CipherSuite),
|
||||
BlockSize: bucket.DefaultEncryptionParameters.BlockSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -131,6 +131,27 @@ func (a *APIKey) Check(ctx context.Context, secret []byte, action Action, revoke
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllowedBuckets returns a list of all the allowed bucket paths that match the Action operation
|
||||
func (a *APIKey) GetAllowedBuckets(ctx context.Context, action Action) (allowedBuckets map[string]struct{}, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
caveats := a.mac.Caveats()
|
||||
for _, cavbuf := range caveats {
|
||||
var cav Caveat
|
||||
err := proto.Unmarshal(cavbuf, &cav)
|
||||
if err != nil {
|
||||
return allowedBuckets, ErrFormat.New("invalid caveat format: %v", err)
|
||||
}
|
||||
if cav.Allows(action) {
|
||||
for _, caveatPath := range cav.AllowedPaths {
|
||||
allowedBuckets[string(caveatPath.Bucket)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allowedBuckets, err
|
||||
}
|
||||
|
||||
// Restrict generates a new APIKey with the provided Caveat attached.
|
||||
func (a *APIKey) Restrict(caveat Caveat) (*APIKey, error) {
|
||||
buf, err := proto.Marshal(&caveat)
|
||||
@ -182,6 +203,12 @@ func (c *Caveat) Allows(action Action) bool {
|
||||
if len(c.AllowedPaths) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(action.Bucket) == 0 {
|
||||
// if no action.bucket name is provided, then this call is checking that
|
||||
// we can list all buckets. In that case, return true here and we will
|
||||
// filter out buckets that aren't allowed later with `GetAllowedBuckets()`
|
||||
return true
|
||||
}
|
||||
for _, path := range c.AllowedPaths {
|
||||
if bytes.Equal(path.Bucket, action.Bucket) {
|
||||
return true
|
||||
|
@ -29,35 +29,35 @@ func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *st
|
||||
if info == nil {
|
||||
info = &storj.Bucket{PathCipher: storj.EncAESGCM}
|
||||
}
|
||||
if info.EncryptionParameters.CipherSuite == storj.EncUnspecified {
|
||||
info.EncryptionParameters.CipherSuite = storj.EncAESGCM
|
||||
if info.DefaultEncryptionParameters.CipherSuite == storj.EncUnspecified {
|
||||
info.DefaultEncryptionParameters.CipherSuite = storj.EncAESGCM
|
||||
}
|
||||
if info.EncryptionParameters.BlockSize == 0 {
|
||||
info.EncryptionParameters.BlockSize = db.encryptedBlockSize
|
||||
if info.DefaultEncryptionParameters.BlockSize == 0 {
|
||||
info.DefaultEncryptionParameters.BlockSize = db.encryptedBlockSize
|
||||
}
|
||||
if info.RedundancyScheme.Algorithm == storj.InvalidRedundancyAlgorithm {
|
||||
info.RedundancyScheme.Algorithm = storj.ReedSolomon
|
||||
if info.DefaultRedundancyScheme.Algorithm == storj.InvalidRedundancyAlgorithm {
|
||||
info.DefaultRedundancyScheme.Algorithm = storj.ReedSolomon
|
||||
}
|
||||
if info.RedundancyScheme.RequiredShares == 0 {
|
||||
info.RedundancyScheme.RequiredShares = int16(db.redundancy.RequiredCount())
|
||||
if info.DefaultRedundancyScheme.RequiredShares == 0 {
|
||||
info.DefaultRedundancyScheme.RequiredShares = int16(db.redundancy.RequiredCount())
|
||||
}
|
||||
if info.RedundancyScheme.RepairShares == 0 {
|
||||
info.RedundancyScheme.RepairShares = int16(db.redundancy.RepairThreshold())
|
||||
if info.DefaultRedundancyScheme.RepairShares == 0 {
|
||||
info.DefaultRedundancyScheme.RepairShares = int16(db.redundancy.RepairThreshold())
|
||||
}
|
||||
if info.RedundancyScheme.OptimalShares == 0 {
|
||||
info.RedundancyScheme.OptimalShares = int16(db.redundancy.OptimalThreshold())
|
||||
if info.DefaultRedundancyScheme.OptimalShares == 0 {
|
||||
info.DefaultRedundancyScheme.OptimalShares = int16(db.redundancy.OptimalThreshold())
|
||||
}
|
||||
if info.RedundancyScheme.TotalShares == 0 {
|
||||
info.RedundancyScheme.TotalShares = int16(db.redundancy.TotalCount())
|
||||
if info.DefaultRedundancyScheme.TotalShares == 0 {
|
||||
info.DefaultRedundancyScheme.TotalShares = int16(db.redundancy.TotalCount())
|
||||
}
|
||||
if info.RedundancyScheme.ShareSize == 0 {
|
||||
info.RedundancyScheme.ShareSize = int32(db.redundancy.ErasureShareSize())
|
||||
if info.DefaultRedundancyScheme.ShareSize == 0 {
|
||||
info.DefaultRedundancyScheme.ShareSize = int32(db.redundancy.ErasureShareSize())
|
||||
}
|
||||
if info.SegmentsSize == 0 {
|
||||
info.SegmentsSize = db.segmentsSize
|
||||
if info.DefaultSegmentsSize == 0 {
|
||||
info.DefaultSegmentsSize = db.segmentsSize
|
||||
}
|
||||
|
||||
if err := validateBlockSize(info.RedundancyScheme, info.EncryptionParameters.BlockSize); err != nil {
|
||||
if err := validateBlockSize(info.DefaultRedundancyScheme, info.DefaultEncryptionParameters.BlockSize); err != nil {
|
||||
return bucketInfo, err
|
||||
}
|
||||
|
||||
@ -69,15 +69,15 @@ func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *st
|
||||
userMeta := map[string]string{
|
||||
"attribution-to": info.Attribution,
|
||||
"path-enc-type": strconv.Itoa(int(info.PathCipher)),
|
||||
"default-seg-size": strconv.FormatInt(info.SegmentsSize, 10),
|
||||
"default-enc-type": strconv.Itoa(int(info.EncryptionParameters.CipherSuite)),
|
||||
"default-enc-blksz": strconv.FormatInt(int64(info.EncryptionParameters.BlockSize), 10),
|
||||
"default-rs-algo": strconv.Itoa(int(info.RedundancyScheme.Algorithm)),
|
||||
"default-rs-sharsz": strconv.FormatInt(int64(info.RedundancyScheme.ShareSize), 10),
|
||||
"default-rs-reqd": strconv.Itoa(int(info.RedundancyScheme.RequiredShares)),
|
||||
"default-rs-repair": strconv.Itoa(int(info.RedundancyScheme.RepairShares)),
|
||||
"default-rs-optim": strconv.Itoa(int(info.RedundancyScheme.OptimalShares)),
|
||||
"default-rs-total": strconv.Itoa(int(info.RedundancyScheme.TotalShares)),
|
||||
"default-seg-size": strconv.FormatInt(info.DefaultSegmentsSize, 10),
|
||||
"default-enc-type": strconv.Itoa(int(info.DefaultEncryptionParameters.CipherSuite)),
|
||||
"default-enc-blksz": strconv.FormatInt(int64(info.DefaultEncryptionParameters.BlockSize), 10),
|
||||
"default-rs-algo": strconv.Itoa(int(info.DefaultRedundancyScheme.Algorithm)),
|
||||
"default-rs-sharsz": strconv.FormatInt(int64(info.DefaultRedundancyScheme.ShareSize), 10),
|
||||
"default-rs-reqd": strconv.Itoa(int(info.DefaultRedundancyScheme.RequiredShares)),
|
||||
"default-rs-repair": strconv.Itoa(int(info.DefaultRedundancyScheme.RepairShares)),
|
||||
"default-rs-optim": strconv.Itoa(int(info.DefaultRedundancyScheme.OptimalShares)),
|
||||
"default-rs-total": strconv.Itoa(int(info.DefaultRedundancyScheme.TotalShares)),
|
||||
}
|
||||
var exp time.Time
|
||||
m, err := db.buckets.Put(ctx, bucketName, r, pb.SerializableMeta{UserDefined: userMeta}, exp)
|
||||
@ -215,12 +215,12 @@ func bucketFromMeta(ctx context.Context, bucketName string, m objects.Meta) (out
|
||||
}
|
||||
}
|
||||
|
||||
es := &out.EncryptionParameters
|
||||
rs := &out.RedundancyScheme
|
||||
es := &out.DefaultEncryptionParameters
|
||||
rs := &out.DefaultRedundancyScheme
|
||||
|
||||
out.Attribution = m.UserDefined["attribution-to"]
|
||||
applySetting("path-enc-type", 16, func(v int64) { out.PathCipher = storj.CipherSuite(v) })
|
||||
applySetting("default-seg-size", 64, func(v int64) { out.SegmentsSize = v })
|
||||
applySetting("default-seg-size", 64, func(v int64) { out.DefaultSegmentsSize = v })
|
||||
applySetting("default-enc-type", 32, func(v int64) { es.CipherSuite = storj.CipherSuite(v) })
|
||||
applySetting("default-enc-blksz", 32, func(v int64) { es.BlockSize = int32(v) })
|
||||
applySetting("default-rs-algo", 32, func(v int64) { rs.Algorithm = storj.RedundancyAlgorithm(v) })
|
||||
|
@ -20,43 +20,43 @@ var _ = math.Inf
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type CipherSuite_Type int32
|
||||
type CipherSuite int32
|
||||
|
||||
const (
|
||||
CipherSuite_ENC_UNSPECIFIED CipherSuite_Type = 0
|
||||
CipherSuite_ENC_NULL CipherSuite_Type = 1
|
||||
CipherSuite_ENC_AESGCM CipherSuite_Type = 2
|
||||
CipherSuite_ENC_SECRETBOX CipherSuite_Type = 3
|
||||
CipherSuite_ENC_UNSPECIFIED CipherSuite = 0
|
||||
CipherSuite_ENC_NULL CipherSuite = 1
|
||||
CipherSuite_ENC_AESGCM CipherSuite = 2
|
||||
CipherSuite_ENC_SECRETBOX CipherSuite = 3
|
||||
)
|
||||
|
||||
var CipherSuite_Type_name = map[int32]string{
|
||||
var CipherSuite_name = map[int32]string{
|
||||
0: "ENC_UNSPECIFIED",
|
||||
1: "ENC_NULL",
|
||||
2: "ENC_AESGCM",
|
||||
3: "ENC_SECRETBOX",
|
||||
}
|
||||
|
||||
var CipherSuite_Type_value = map[string]int32{
|
||||
var CipherSuite_value = map[string]int32{
|
||||
"ENC_UNSPECIFIED": 0,
|
||||
"ENC_NULL": 1,
|
||||
"ENC_AESGCM": 2,
|
||||
"ENC_SECRETBOX": 3,
|
||||
}
|
||||
|
||||
func (x CipherSuite_Type) String() string {
|
||||
return proto.EnumName(CipherSuite_Type_name, int32(x))
|
||||
func (x CipherSuite) String() string {
|
||||
return proto.EnumName(CipherSuite_name, int32(x))
|
||||
}
|
||||
|
||||
func (CipherSuite_Type) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8293a649ce9418c6, []int{1, 0}
|
||||
func (CipherSuite) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8293a649ce9418c6, []int{0}
|
||||
}
|
||||
|
||||
type EncryptionParameters struct {
|
||||
CipherSuite *CipherSuite `protobuf:"bytes,1,opt,name=cipher_suite,json=cipherSuite,proto3" json:"cipher_suite,omitempty"`
|
||||
BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
CipherSuite CipherSuite `protobuf:"varint,1,opt,name=cipher_suite,json=cipherSuite,proto3,enum=encryption.CipherSuite" json:"cipher_suite,omitempty"`
|
||||
BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *EncryptionParameters) Reset() { *m = EncryptionParameters{} }
|
||||
@ -83,11 +83,11 @@ func (m *EncryptionParameters) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_EncryptionParameters proto.InternalMessageInfo
|
||||
|
||||
func (m *EncryptionParameters) GetCipherSuite() *CipherSuite {
|
||||
func (m *EncryptionParameters) GetCipherSuite() CipherSuite {
|
||||
if m != nil {
|
||||
return m.CipherSuite
|
||||
}
|
||||
return nil
|
||||
return CipherSuite_ENC_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *EncryptionParameters) GetBlockSize() int64 {
|
||||
@ -97,67 +97,27 @@ func (m *EncryptionParameters) GetBlockSize() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type CipherSuite struct {
|
||||
Type CipherSuite_Type `protobuf:"varint,1,opt,name=type,proto3,enum=encryption.CipherSuite_Type" json:"type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CipherSuite) Reset() { *m = CipherSuite{} }
|
||||
func (m *CipherSuite) String() string { return proto.CompactTextString(m) }
|
||||
func (*CipherSuite) ProtoMessage() {}
|
||||
func (*CipherSuite) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8293a649ce9418c6, []int{1}
|
||||
}
|
||||
func (m *CipherSuite) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CipherSuite.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CipherSuite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CipherSuite.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CipherSuite) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CipherSuite.Merge(m, src)
|
||||
}
|
||||
func (m *CipherSuite) XXX_Size() int {
|
||||
return xxx_messageInfo_CipherSuite.Size(m)
|
||||
}
|
||||
func (m *CipherSuite) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CipherSuite.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CipherSuite proto.InternalMessageInfo
|
||||
|
||||
func (m *CipherSuite) GetType() CipherSuite_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return CipherSuite_ENC_UNSPECIFIED
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("encryption.CipherSuite_Type", CipherSuite_Type_name, CipherSuite_Type_value)
|
||||
proto.RegisterEnum("encryption.CipherSuite", CipherSuite_name, CipherSuite_value)
|
||||
proto.RegisterType((*EncryptionParameters)(nil), "encryption.EncryptionParameters")
|
||||
proto.RegisterType((*CipherSuite)(nil), "encryption.CipherSuite")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("encryption.proto", fileDescriptor_8293a649ce9418c6) }
|
||||
|
||||
var fileDescriptor_8293a649ce9418c6 = []byte{
|
||||
// 238 bytes of a gzipped FileDescriptorProto
|
||||
// 209 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcd, 0x4b, 0x2e,
|
||||
0xaa, 0x2c, 0x28, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88,
|
||||
0x28, 0x15, 0x72, 0x89, 0xb8, 0xc2, 0x79, 0x01, 0x89, 0x45, 0x89, 0xb9, 0xa9, 0x25, 0xa9, 0x45,
|
||||
0xc5, 0x42, 0x56, 0x5c, 0x3c, 0xc9, 0x99, 0x05, 0x19, 0xa9, 0x45, 0xf1, 0xc5, 0xa5, 0x99, 0x25,
|
||||
0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0xe2, 0x7a, 0x48, 0x86, 0x39, 0x83, 0xe5, 0x83,
|
||||
0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x7c, 0x46, 0xe2, 0x7a, 0x48, 0x86, 0x39, 0x83, 0xe5, 0x83,
|
||||
0x41, 0xd2, 0x41, 0xdc, 0xc9, 0x08, 0x8e, 0x90, 0x2c, 0x17, 0x57, 0x52, 0x4e, 0x7e, 0x72, 0x76,
|
||||
0x7c, 0x71, 0x66, 0x55, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x27, 0x58, 0x24, 0x38,
|
||||
0xb3, 0x2a, 0x55, 0xa9, 0x97, 0x91, 0x8b, 0x1b, 0x49, 0xaf, 0x90, 0x01, 0x17, 0x4b, 0x49, 0x65,
|
||||
0x01, 0xc4, 0x0a, 0x3e, 0x23, 0x19, 0x1c, 0x56, 0xe8, 0x85, 0x54, 0x16, 0xa4, 0x06, 0x81, 0x55,
|
||||
0x2a, 0xf9, 0x70, 0xb1, 0x80, 0x78, 0x42, 0xc2, 0x5c, 0xfc, 0xae, 0x7e, 0xce, 0xf1, 0xa1, 0x7e,
|
||||
0xc1, 0x01, 0xae, 0xce, 0x9e, 0x6e, 0x9e, 0xae, 0x2e, 0x02, 0x0c, 0x42, 0x3c, 0x5c, 0x1c, 0x20,
|
||||
0x41, 0xbf, 0x50, 0x1f, 0x1f, 0x01, 0x46, 0x21, 0x3e, 0x2e, 0x2e, 0x10, 0xcf, 0xd1, 0x35, 0xd8,
|
||||
0xdd, 0xd9, 0x57, 0x80, 0x49, 0x48, 0x90, 0x8b, 0x17, 0xc4, 0x0f, 0x76, 0x75, 0x0e, 0x72, 0x0d,
|
||||
0x71, 0xf2, 0x8f, 0x10, 0x60, 0x76, 0x62, 0x89, 0x62, 0x2a, 0x48, 0x4a, 0x62, 0x03, 0x87, 0x8d,
|
||||
0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x45, 0xc3, 0x51, 0x2f, 0x01, 0x00, 0x00,
|
||||
0xb3, 0x2a, 0x55, 0x2b, 0x98, 0x8b, 0x1b, 0x49, 0xab, 0x90, 0x30, 0x17, 0xbf, 0xab, 0x9f, 0x73,
|
||||
0x7c, 0xa8, 0x5f, 0x70, 0x80, 0xab, 0xb3, 0xa7, 0x9b, 0xa7, 0xab, 0x8b, 0x00, 0x83, 0x10, 0x0f,
|
||||
0x17, 0x07, 0x48, 0xd0, 0x2f, 0xd4, 0xc7, 0x47, 0x80, 0x51, 0x88, 0x8f, 0x8b, 0x0b, 0xc4, 0x73,
|
||||
0x74, 0x0d, 0x76, 0x77, 0xf6, 0x15, 0x60, 0x12, 0x12, 0xe4, 0xe2, 0x05, 0xf1, 0x83, 0x5d, 0x9d,
|
||||
0x83, 0x5c, 0x43, 0x9c, 0xfc, 0x23, 0x04, 0x98, 0x9d, 0x58, 0xa2, 0x98, 0x0a, 0x92, 0x92, 0xd8,
|
||||
0xc0, 0x1e, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x9d, 0x88, 0x06, 0xf4, 0x00, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
@ -11,13 +11,9 @@ message EncryptionParameters {
|
||||
int64 block_size = 2;
|
||||
}
|
||||
|
||||
message CipherSuite {
|
||||
enum Type {
|
||||
ENC_UNSPECIFIED = 0;
|
||||
ENC_NULL = 1;
|
||||
ENC_AESGCM = 2;
|
||||
ENC_SECRETBOX = 3;
|
||||
}
|
||||
|
||||
Type type = 1;
|
||||
enum CipherSuite {
|
||||
ENC_UNSPECIFIED = 0;
|
||||
ENC_NULL = 1;
|
||||
ENC_AESGCM = 2;
|
||||
ENC_SECRETBOX = 3;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Bucket struct {
|
||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
PathCipher *CipherSuite `protobuf:"bytes,2,opt,name=path_cipher,json=pathCipher,proto3" json:"path_cipher,omitempty"`
|
||||
PathCipher CipherSuite `protobuf:"varint,2,opt,name=path_cipher,json=pathCipher,proto3,enum=encryption.CipherSuite" json:"path_cipher,omitempty"`
|
||||
AttributionId []byte `protobuf:"bytes,3,opt,name=attribution_id,json=attributionId,proto3" json:"attribution_id,omitempty"`
|
||||
CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
|
||||
DefaultSegmentSize int64 `protobuf:"varint,5,opt,name=default_segment_size,json=defaultSegmentSize,proto3" json:"default_segment_size,omitempty"`
|
||||
@ -70,11 +70,11 @@ func (m *Bucket) GetName() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Bucket) GetPathCipher() *CipherSuite {
|
||||
func (m *Bucket) GetPathCipher() CipherSuite {
|
||||
if m != nil {
|
||||
return m.PathCipher
|
||||
}
|
||||
return nil
|
||||
return CipherSuite_ENC_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *Bucket) GetAttributionId() []byte {
|
||||
@ -160,7 +160,7 @@ func (m *BucketListItem) GetCreatedAt() time.Time {
|
||||
|
||||
type BucketCreateRequest struct {
|
||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
PathCipher *CipherSuite `protobuf:"bytes,2,opt,name=path_cipher,json=pathCipher,proto3" json:"path_cipher,omitempty"`
|
||||
PathCipher CipherSuite `protobuf:"varint,2,opt,name=path_cipher,json=pathCipher,proto3,enum=encryption.CipherSuite" json:"path_cipher,omitempty"`
|
||||
AttributionId []byte `protobuf:"bytes,3,opt,name=attribution_id,json=attributionId,proto3" json:"attribution_id,omitempty"`
|
||||
DefaultSegmentSize int64 `protobuf:"varint,4,opt,name=default_segment_size,json=defaultSegmentSize,proto3" json:"default_segment_size,omitempty"`
|
||||
DefaultRedundancyScheme *RedundancyScheme `protobuf:"bytes,5,opt,name=default_redundancy_scheme,json=defaultRedundancyScheme,proto3" json:"default_redundancy_scheme,omitempty"`
|
||||
@ -201,11 +201,11 @@ func (m *BucketCreateRequest) GetName() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BucketCreateRequest) GetPathCipher() *CipherSuite {
|
||||
func (m *BucketCreateRequest) GetPathCipher() CipherSuite {
|
||||
if m != nil {
|
||||
return m.PathCipher
|
||||
}
|
||||
return nil
|
||||
return CipherSuite_ENC_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *BucketCreateRequest) GetAttributionId() []byte {
|
||||
@ -1507,95 +1507,95 @@ func init() {
|
||||
func init() { proto.RegisterFile("metainfo.proto", fileDescriptor_631e2f30a93cd64e) }
|
||||
|
||||
var fileDescriptor_631e2f30a93cd64e = []byte{
|
||||
// 1402 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4b, 0x6f, 0x1b, 0xd5,
|
||||
0x17, 0xff, 0x8f, 0x93, 0x38, 0xf6, 0x71, 0x9a, 0xa4, 0xb7, 0xf9, 0x27, 0xee, 0xb8, 0x69, 0xdc,
|
||||
0x29, 0xad, 0x82, 0x84, 0x5c, 0x94, 0x6e, 0x2a, 0x0a, 0x12, 0x79, 0xf4, 0x11, 0xd4, 0x47, 0x34,
|
||||
0x46, 0xb4, 0x54, 0xa0, 0xd1, 0xd8, 0x73, 0xec, 0x0c, 0x78, 0x1e, 0xdc, 0xb9, 0x86, 0xb4, 0x6b,
|
||||
0x3e, 0x00, 0x8b, 0x2e, 0xf8, 0x1e, 0x7c, 0x09, 0xc4, 0x92, 0x25, 0x48, 0x65, 0xc7, 0x97, 0x60,
|
||||
0x83, 0xee, 0xcb, 0x33, 0x63, 0x8f, 0x9d, 0xb6, 0x4a, 0xc5, 0x6e, 0xee, 0xb9, 0xbf, 0x7b, 0xee,
|
||||
0x39, 0xbf, 0xf3, 0xba, 0x03, 0xcb, 0x01, 0x32, 0xd7, 0x0f, 0x7b, 0x51, 0x2b, 0xa6, 0x11, 0x8b,
|
||||
0x48, 0x45, 0xaf, 0xcd, 0x55, 0x0c, 0xbb, 0xf4, 0x79, 0xcc, 0xfc, 0x28, 0x94, 0x7b, 0x26, 0xf4,
|
||||
0xa3, 0xbe, 0xc2, 0x99, 0x5b, 0xfd, 0x28, 0xea, 0x0f, 0xf0, 0x86, 0x58, 0x75, 0x86, 0xbd, 0x1b,
|
||||
0xcc, 0x0f, 0x30, 0x61, 0x6e, 0x10, 0x6b, 0x70, 0x18, 0x79, 0xa8, 0xbe, 0x57, 0xe2, 0xc8, 0x0f,
|
||||
0x19, 0x52, 0xaf, 0xa3, 0x04, 0x4b, 0x11, 0xf5, 0x90, 0x26, 0x72, 0x65, 0xfd, 0x32, 0x07, 0xe5,
|
||||
0xbd, 0x61, 0xf7, 0x5b, 0x64, 0x84, 0xc0, 0x7c, 0xe8, 0x06, 0x58, 0x37, 0x9a, 0xc6, 0xf6, 0x92,
|
||||
0x2d, 0xbe, 0xc9, 0x2d, 0xa8, 0xc5, 0x2e, 0x3b, 0x76, 0xba, 0x7e, 0x7c, 0x8c, 0xb4, 0x5e, 0x6a,
|
||||
0x1a, 0xdb, 0xb5, 0x9d, 0x8d, 0x56, 0xc6, 0xbc, 0x7d, 0xb1, 0xd3, 0x1e, 0xfa, 0x0c, 0x6d, 0xe0,
|
||||
0x58, 0x29, 0x20, 0xd7, 0x60, 0xd9, 0x65, 0x8c, 0xfa, 0x9d, 0x21, 0x87, 0x39, 0xbe, 0x57, 0x9f,
|
||||
0x13, 0x7a, 0xcf, 0x65, 0xa4, 0x87, 0x1e, 0xd9, 0x07, 0xe8, 0x52, 0x74, 0x19, 0x7a, 0x8e, 0xcb,
|
||||
0xea, 0xf3, 0x42, 0xbf, 0xd9, 0x92, 0x0e, 0xb6, 0xb4, 0x83, 0xad, 0xcf, 0xb5, 0x83, 0x7b, 0x95,
|
||||
0x5f, 0x5f, 0x6d, 0xfd, 0xef, 0xa7, 0xbf, 0xb6, 0x0c, 0xbb, 0xaa, 0xce, 0xed, 0x32, 0xf2, 0x21,
|
||||
0xac, 0x79, 0xd8, 0x73, 0x87, 0x03, 0xe6, 0x24, 0xd8, 0x0f, 0x30, 0x64, 0x4e, 0xe2, 0xbf, 0xc0,
|
||||
0xfa, 0x42, 0xd3, 0xd8, 0x9e, 0xb3, 0x89, 0xda, 0x6b, 0xcb, 0xad, 0xb6, 0xff, 0x02, 0xc9, 0x13,
|
||||
0xb8, 0xa8, 0x4f, 0x50, 0xf4, 0x86, 0xa1, 0xe7, 0x86, 0xdd, 0xe7, 0x4e, 0xd2, 0x3d, 0xc6, 0x00,
|
||||
0xeb, 0x65, 0x61, 0x45, 0xa3, 0x95, 0x32, 0x67, 0x8f, 0x30, 0x6d, 0x01, 0xb1, 0x37, 0xd4, 0xe9,
|
||||
0xf1, 0x0d, 0xe2, 0xc1, 0xa6, 0x56, 0x9c, 0x92, 0xe4, 0xc4, 0x2e, 0x75, 0x03, 0x64, 0x48, 0x93,
|
||||
0xfa, 0xa2, 0x50, 0xde, 0xcc, 0x52, 0x78, 0x67, 0xf4, 0x79, 0x34, 0xc2, 0xd9, 0x0d, 0xa5, 0xa6,
|
||||
0x68, 0xd3, 0xf2, 0x61, 0x59, 0x06, 0xed, 0x81, 0x9f, 0xb0, 0x43, 0x86, 0x41, 0x61, 0xf0, 0xf2,
|
||||
0xdc, 0x96, 0xde, 0x8a, 0x5b, 0xeb, 0x9f, 0x12, 0x5c, 0x90, 0x77, 0xed, 0x0b, 0x99, 0x8d, 0xdf,
|
||||
0x0d, 0x31, 0xf9, 0x8f, 0xb2, 0x65, 0x5a, 0xa0, 0xe7, 0xdf, 0x2e, 0xd0, 0x0b, 0xef, 0x32, 0xd0,
|
||||
0xe5, 0xb3, 0x08, 0xf4, 0xa7, 0xb0, 0x96, 0x27, 0x3f, 0x89, 0xa3, 0x30, 0x41, 0xb2, 0x0d, 0xe5,
|
||||
0x8e, 0x90, 0x0b, 0xfe, 0x6b, 0x3b, 0xab, 0xad, 0x51, 0x2f, 0x91, 0x78, 0x5b, 0xed, 0x5b, 0xd7,
|
||||
0x61, 0x55, 0x4a, 0xee, 0x21, 0x9b, 0x11, 0x3b, 0xeb, 0x13, 0x38, 0x9f, 0xc1, 0xbd, 0xf1, 0x35,
|
||||
0xef, 0xeb, 0x2c, 0x39, 0xc0, 0x01, 0xce, 0xcc, 0x12, 0x6b, 0x5d, 0xfb, 0xa4, 0xa1, 0xf2, 0x32,
|
||||
0x6b, 0x57, 0x5b, 0xc0, 0x93, 0x5a, 0x2b, 0x58, 0x87, 0x72, 0x77, 0x48, 0x93, 0x88, 0x2a, 0x15,
|
||||
0x6a, 0x45, 0xd6, 0x60, 0x61, 0xe0, 0x07, 0xbe, 0x4c, 0xeb, 0x05, 0x5b, 0x2e, 0xac, 0xa7, 0x40,
|
||||
0xb2, 0x2a, 0x94, 0x17, 0x2d, 0x58, 0xf0, 0x19, 0x06, 0x49, 0xdd, 0x68, 0xce, 0x6d, 0xd7, 0x76,
|
||||
0xea, 0xe3, 0x4e, 0xe8, 0x22, 0xb2, 0x25, 0x8c, 0x1b, 0x1d, 0x44, 0x14, 0x85, 0xea, 0x8a, 0x2d,
|
||||
0xbe, 0xad, 0xa7, 0xd0, 0x90, 0xe0, 0x36, 0xb2, 0xdd, 0x34, 0x27, 0x67, 0x55, 0xc3, 0x64, 0x4e,
|
||||
0x97, 0x0a, 0x72, 0xda, 0xba, 0x0c, 0x97, 0x8a, 0x35, 0x2b, 0x5a, 0x7e, 0x34, 0xe0, 0xc2, 0xae,
|
||||
0xe7, 0x51, 0x4c, 0x12, 0xf4, 0x1e, 0xf3, 0xde, 0xfd, 0x80, 0xfb, 0x4a, 0xb6, 0x35, 0x03, 0x32,
|
||||
0x34, 0xa4, 0xa5, 0xfa, 0x7a, 0x0a, 0x51, 0xac, 0x90, 0x7d, 0x58, 0x4b, 0x58, 0x44, 0xdd, 0x3e,
|
||||
0x3a, 0x7c, 0x30, 0x38, 0xae, 0xd4, 0xa6, 0xea, 0xf3, 0x7c, 0x4b, 0x4c, 0x8b, 0x47, 0x91, 0x87,
|
||||
0xea, 0x1a, 0x9b, 0x28, 0x78, 0x46, 0x66, 0xbd, 0x2c, 0xc1, 0xba, 0x2a, 0xac, 0x27, 0xd4, 0x1f,
|
||||
0x45, 0xf8, 0xf1, 0xc0, 0xe3, 0x31, 0xca, 0x64, 0xc9, 0x92, 0xce, 0x09, 0x4e, 0x0a, 0x2f, 0x71,
|
||||
0xe5, 0xb6, 0xf8, 0x26, 0x75, 0x58, 0x54, 0x95, 0x2b, 0x2a, 0x7c, 0xce, 0xd6, 0x4b, 0x72, 0x1b,
|
||||
0x20, 0xad, 0x50, 0x35, 0x09, 0x66, 0x96, 0x66, 0x06, 0x4e, 0x6e, 0x83, 0x19, 0xb8, 0x27, 0xba,
|
||||
0x12, 0xd1, 0x2b, 0x9a, 0x03, 0x1b, 0x81, 0x7b, 0x72, 0x47, 0x03, 0xb2, 0x3d, 0xe2, 0x23, 0x00,
|
||||
0x3c, 0x89, 0x7d, 0xea, 0x72, 0xde, 0x55, 0xdd, 0xce, 0xe8, 0x93, 0x76, 0x06, 0x6d, 0xfd, 0x6c,
|
||||
0xc0, 0x46, 0x9e, 0x16, 0x19, 0x36, 0xce, 0xcb, 0x7d, 0x58, 0x75, 0x75, 0xe0, 0x1c, 0x11, 0x0a,
|
||||
0x9d, 0x82, 0x9b, 0x69, 0x0a, 0x16, 0x84, 0xd6, 0x5e, 0x19, 0x1d, 0x13, 0xeb, 0x84, 0xdc, 0x84,
|
||||
0x73, 0x34, 0x8a, 0x98, 0x13, 0xfb, 0xd8, 0xc5, 0x51, 0x26, 0xed, 0xad, 0xf0, 0x86, 0xfd, 0xc7,
|
||||
0xab, 0xad, 0xc5, 0x23, 0x2e, 0x3f, 0x3c, 0xb0, 0x6b, 0x1c, 0x25, 0x17, 0x9e, 0xf5, 0x5b, 0x6a,
|
||||
0xda, 0x7e, 0x14, 0x70, 0xbd, 0x67, 0x1d, 0xb2, 0x0f, 0x60, 0x51, 0xc5, 0x47, 0xc5, 0x8b, 0x64,
|
||||
0xe2, 0x75, 0x24, 0xbf, 0x6c, 0x0d, 0x21, 0xb7, 0x61, 0x25, 0xa2, 0x7e, 0xdf, 0x0f, 0xdd, 0x81,
|
||||
0x66, 0x63, 0x41, 0xb0, 0x51, 0x94, 0xba, 0xcb, 0x1a, 0x2a, 0x19, 0xb0, 0xee, 0x43, 0x7d, 0xcc,
|
||||
0x97, 0x94, 0xe7, 0x8c, 0x19, 0xc6, 0xa9, 0x66, 0x58, 0x2e, 0x5c, 0x54, 0x9a, 0x0e, 0xa2, 0x1f,
|
||||
0xc2, 0x41, 0xe4, 0x7a, 0x67, 0xcd, 0x8b, 0xf5, 0xd2, 0x00, 0x73, 0xe2, 0x8e, 0x77, 0x91, 0x17,
|
||||
0x19, 0xcf, 0x4b, 0xa7, 0x7b, 0xfe, 0x35, 0xfc, 0x5f, 0x59, 0x75, 0x18, 0xf6, 0xa2, 0x33, 0xf7,
|
||||
0xfa, 0xee, 0xa8, 0x41, 0x48, 0xf5, 0x85, 0x01, 0x7a, 0x0d, 0x33, 0x9d, 0x51, 0xda, 0xe6, 0x66,
|
||||
0xc9, 0xd9, 0x19, 0xea, 0x8d, 0x72, 0x29, 0x3f, 0x81, 0xce, 0x34, 0x36, 0xd6, 0x9f, 0x06, 0xac,
|
||||
0xf3, 0xc9, 0xa2, 0xae, 0x4a, 0x5e, 0xc3, 0x8d, 0x75, 0x28, 0xc7, 0x14, 0x7b, 0xfe, 0x89, 0x72,
|
||||
0x44, 0xad, 0xc8, 0x16, 0xd4, 0x12, 0xe6, 0x52, 0xe6, 0xb8, 0x3d, 0xce, 0xa1, 0x7c, 0x1a, 0x81,
|
||||
0x10, 0xed, 0x72, 0x09, 0xd9, 0x04, 0xc0, 0xd0, 0x73, 0x3a, 0xd8, 0xe3, 0x73, 0x6b, 0x5e, 0xec,
|
||||
0x57, 0x31, 0xf4, 0xf6, 0x84, 0x80, 0x5c, 0x82, 0x2a, 0x45, 0x3e, 0x38, 0xfd, 0xef, 0x65, 0x33,
|
||||
0xac, 0xd8, 0xa9, 0x20, 0x1d, 0xa5, 0xe5, 0xcc, 0x28, 0xe5, 0x2a, 0xb9, 0xbf, 0x4e, 0x6f, 0xe0,
|
||||
0xf6, 0xe5, 0xab, 0x75, 0xd1, 0xae, 0x72, 0xc9, 0x5d, 0x2e, 0xb0, 0x7e, 0x37, 0x60, 0x23, 0xef,
|
||||
0x5d, 0xca, 0xe1, 0xc7, 0xf9, 0x79, 0x7b, 0x3d, 0x25, 0x6e, 0xca, 0x89, 0xd6, 0x29, 0xd3, 0xd7,
|
||||
0x44, 0x98, 0xd7, 0xaf, 0x5c, 0x11, 0x67, 0x23, 0x13, 0xe7, 0x37, 0x4a, 0x2e, 0xd2, 0x80, 0xaa,
|
||||
0x9f, 0x38, 0x8a, 0xe5, 0x39, 0x71, 0x45, 0xc5, 0x4f, 0x8e, 0xc4, 0xda, 0x7a, 0xc6, 0x13, 0xa3,
|
||||
0x60, 0xbc, 0x73, 0xa7, 0xb6, 0xa0, 0x26, 0xa3, 0xe4, 0x64, 0x06, 0x3d, 0x48, 0xd1, 0x23, 0x3e,
|
||||
0xee, 0x37, 0x01, 0x62, 0x97, 0xb2, 0x10, 0x69, 0x3a, 0xea, 0xab, 0x4a, 0x72, 0xe8, 0x59, 0x0d,
|
||||
0xde, 0x76, 0x8a, 0x06, 0xfc, 0xe3, 0x81, 0x67, 0xad, 0x01, 0x39, 0xa2, 0xd1, 0x37, 0xd8, 0xcd,
|
||||
0x56, 0xa6, 0x75, 0x0b, 0x2e, 0xe4, 0xa4, 0xea, 0x39, 0x73, 0x05, 0x96, 0x62, 0x29, 0x76, 0x12,
|
||||
0x77, 0xa0, 0x73, 0xa8, 0xa6, 0x64, 0x6d, 0x77, 0xc0, 0x76, 0xfe, 0xae, 0x40, 0xe5, 0xa1, 0x22,
|
||||
0x9d, 0x3c, 0x84, 0x25, 0xf9, 0x7a, 0x54, 0xff, 0x79, 0x9b, 0xe3, 0xef, 0x9f, 0xdc, 0xc3, 0xde,
|
||||
0xbc, 0x3c, 0x6d, 0x5b, 0x5d, 0x7f, 0x00, 0xd5, 0x7b, 0xc8, 0x94, 0x2e, 0x73, 0x1c, 0x9c, 0xbe,
|
||||
0x32, 0xcd, 0x46, 0xe1, 0x9e, 0xd2, 0xf2, 0x10, 0x96, 0x64, 0xf1, 0x4d, 0x33, 0x2a, 0x57, 0xfb,
|
||||
0x93, 0x46, 0xe5, 0x2b, 0x97, 0xdc, 0x87, 0x1a, 0xcf, 0x2d, 0xb9, 0x97, 0x90, 0x46, 0xd1, 0x13,
|
||||
0x4f, 0xeb, 0xba, 0x54, 0xbc, 0xa9, 0x34, 0x21, 0xac, 0xb5, 0xb5, 0x7b, 0x99, 0x68, 0x91, 0x6b,
|
||||
0xe3, 0xa7, 0x0a, 0x33, 0xc5, 0xbc, 0x7e, 0x1a, 0x4c, 0x5d, 0xf3, 0x04, 0x56, 0x25, 0xaf, 0xaa,
|
||||
0x1c, 0x78, 0x8a, 0x35, 0xd3, 0xb3, 0xc5, 0x2f, 0x2d, 0xf3, 0xca, 0x34, 0x44, 0x5a, 0x7c, 0x5f,
|
||||
0xc2, 0xaa, 0x9c, 0x90, 0x19, 0xc5, 0x93, 0xc7, 0xc6, 0x1f, 0x04, 0xa6, 0x35, 0x15, 0x92, 0xaa,
|
||||
0x6e, 0xc3, 0x72, 0xa6, 0xc1, 0x8b, 0xa2, 0x98, 0x38, 0x95, 0x9f, 0x2c, 0x66, 0x73, 0x0a, 0x20,
|
||||
0x55, 0xea, 0x00, 0xd1, 0x33, 0x32, 0x63, 0xf1, 0xd5, 0x89, 0x73, 0x93, 0xc3, 0xda, 0x7c, 0x6f,
|
||||
0x06, 0x28, 0x47, 0x88, 0x4c, 0x96, 0x99, 0x84, 0x8c, 0x8f, 0x9a, 0x02, 0x42, 0x26, 0x87, 0xc5,
|
||||
0x17, 0xb0, 0x92, 0xed, 0x68, 0x63, 0x31, 0x2c, 0x6e, 0xfe, 0xd9, 0x18, 0x4e, 0x6b, 0xa0, 0x5f,
|
||||
0xc1, 0xf9, 0x7c, 0xda, 0x70, 0x61, 0xce, 0xa0, 0xe2, 0x26, 0x65, 0x5e, 0x9d, 0x8e, 0x49, 0xb5,
|
||||
0x7f, 0x06, 0xb5, 0x4c, 0x5b, 0x21, 0x99, 0x72, 0x98, 0xec, 0x41, 0xe6, 0xe6, 0x94, 0x5d, 0xa9,
|
||||
0x6e, 0x6f, 0xfe, 0x59, 0x29, 0xee, 0x74, 0xca, 0xe2, 0x91, 0x7c, 0xf3, 0xdf, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0x7b, 0xfa, 0x2e, 0x7b, 0xd1, 0x12, 0x00, 0x00,
|
||||
// 1406 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6,
|
||||
0x16, 0xbe, 0x94, 0x6d, 0x59, 0x3a, 0x72, 0x6c, 0x67, 0xe2, 0x6b, 0x2b, 0x54, 0x1c, 0x2b, 0xcc,
|
||||
0x4d, 0xa0, 0x0b, 0x5c, 0x28, 0x17, 0xce, 0x26, 0x68, 0x5a, 0xa0, 0x7e, 0xe4, 0xe1, 0x22, 0x0f,
|
||||
0x83, 0x2a, 0x9a, 0x34, 0x68, 0x41, 0x50, 0xe2, 0x91, 0xc2, 0x56, 0x7c, 0x74, 0x38, 0x6a, 0x9d,
|
||||
0xac, 0xfb, 0x03, 0xba, 0xc8, 0xa2, 0xff, 0xa3, 0x7f, 0xa2, 0xe8, 0xb2, 0xcb, 0x16, 0x48, 0x77,
|
||||
0xfd, 0x13, 0xdd, 0x14, 0xf3, 0x12, 0x49, 0x89, 0x92, 0x93, 0xc0, 0x41, 0x77, 0x9c, 0x33, 0xdf,
|
||||
0x9c, 0x39, 0xe7, 0x3b, 0xaf, 0x21, 0xac, 0x06, 0xc8, 0x5c, 0x3f, 0xec, 0x47, 0xed, 0x98, 0x46,
|
||||
0x2c, 0x22, 0x15, 0xbd, 0x36, 0xd7, 0x31, 0xec, 0xd1, 0x17, 0x31, 0xf3, 0xa3, 0x50, 0xee, 0x99,
|
||||
0x30, 0x88, 0x06, 0x0a, 0x67, 0xee, 0x0c, 0xa2, 0x68, 0x30, 0xc4, 0x1b, 0x62, 0xd5, 0x1d, 0xf5,
|
||||
0x6f, 0x30, 0x3f, 0xc0, 0x84, 0xb9, 0x41, 0xac, 0xc1, 0x61, 0xe4, 0xa1, 0xfa, 0x5e, 0x8b, 0x23,
|
||||
0x3f, 0x64, 0x48, 0xbd, 0xae, 0x12, 0xac, 0x44, 0xd4, 0x43, 0x9a, 0xc8, 0x95, 0xf5, 0xd3, 0x02,
|
||||
0x94, 0xf7, 0x47, 0xbd, 0xaf, 0x91, 0x11, 0x02, 0x8b, 0xa1, 0x1b, 0x60, 0xdd, 0x68, 0x1a, 0xad,
|
||||
0x15, 0x5b, 0x7c, 0x93, 0x5b, 0x50, 0x8b, 0x5d, 0xf6, 0xdc, 0xe9, 0xf9, 0xf1, 0x73, 0xa4, 0xf5,
|
||||
0x52, 0xd3, 0x68, 0xad, 0xee, 0x6e, 0xb5, 0x33, 0xe6, 0x1d, 0x88, 0x9d, 0xce, 0xc8, 0x67, 0x68,
|
||||
0x03, 0xc7, 0x4a, 0x01, 0xb9, 0x06, 0xab, 0x2e, 0x63, 0xd4, 0xef, 0x8e, 0x38, 0xcc, 0xf1, 0xbd,
|
||||
0xfa, 0x82, 0xd0, 0x7b, 0x2e, 0x23, 0x3d, 0xf2, 0xc8, 0x01, 0x40, 0x8f, 0xa2, 0xcb, 0xd0, 0x73,
|
||||
0x5c, 0x56, 0x5f, 0x6c, 0x1a, 0xad, 0xda, 0xae, 0xd9, 0x96, 0x0e, 0xb6, 0xb5, 0x83, 0xed, 0x4f,
|
||||
0xb5, 0x83, 0xfb, 0x95, 0x9f, 0x5f, 0xef, 0xfc, 0xeb, 0x87, 0x3f, 0x76, 0x0c, 0xbb, 0xaa, 0xce,
|
||||
0xed, 0x31, 0xf2, 0x7f, 0xd8, 0xf0, 0xb0, 0xef, 0x8e, 0x86, 0xcc, 0x49, 0x70, 0x10, 0x60, 0xc8,
|
||||
0x9c, 0xc4, 0x7f, 0x89, 0xf5, 0xa5, 0xa6, 0xd1, 0x5a, 0xb0, 0x89, 0xda, 0xeb, 0xc8, 0xad, 0x8e,
|
||||
0xff, 0x12, 0xc9, 0x13, 0xb8, 0xa8, 0x4f, 0x50, 0xf4, 0x46, 0xa1, 0xe7, 0x86, 0xbd, 0x17, 0x4e,
|
||||
0xd2, 0x7b, 0x8e, 0x01, 0xd6, 0xcb, 0xc2, 0x8a, 0x46, 0x3b, 0x65, 0xce, 0x1e, 0x63, 0x3a, 0x02,
|
||||
0x62, 0x6f, 0xa9, 0xd3, 0x93, 0x1b, 0xc4, 0x83, 0x6d, 0xad, 0x38, 0x25, 0xc9, 0x89, 0x5d, 0xea,
|
||||
0x06, 0xc8, 0x90, 0x26, 0xf5, 0x65, 0xa1, 0xbc, 0x99, 0xa5, 0xf0, 0xce, 0xf8, 0xf3, 0x78, 0x8c,
|
||||
0xb3, 0x1b, 0x4a, 0x4d, 0xd1, 0xa6, 0xe5, 0xc3, 0xaa, 0x0c, 0xda, 0x03, 0x3f, 0x61, 0x47, 0x0c,
|
||||
0x83, 0xc2, 0xe0, 0xe5, 0xb9, 0x2d, 0xbd, 0x13, 0xb7, 0xd6, 0x5f, 0x25, 0xb8, 0x20, 0xef, 0x3a,
|
||||
0x10, 0x32, 0x1b, 0xbf, 0x19, 0x61, 0xf2, 0x0f, 0x65, 0xcb, 0xac, 0x40, 0x2f, 0xbe, 0x5b, 0xa0,
|
||||
0x97, 0xde, 0x67, 0xa0, 0xcb, 0x67, 0x11, 0xe8, 0x8f, 0x61, 0x23, 0x4f, 0x7e, 0x12, 0x47, 0x61,
|
||||
0x82, 0xa4, 0x05, 0xe5, 0xae, 0x90, 0x0b, 0xfe, 0x6b, 0xbb, 0xeb, 0xed, 0x71, 0x2f, 0x91, 0x78,
|
||||
0x5b, 0xed, 0x5b, 0xd7, 0x61, 0x5d, 0x4a, 0xee, 0x21, 0x9b, 0x13, 0x3b, 0xeb, 0x23, 0x38, 0x9f,
|
||||
0xc1, 0xbd, 0xf5, 0x35, 0xff, 0xd5, 0x59, 0x72, 0x88, 0x43, 0x9c, 0x9b, 0x25, 0xd6, 0xa6, 0xf6,
|
||||
0x49, 0x43, 0xe5, 0x65, 0xd6, 0x9e, 0xb6, 0x80, 0x27, 0xb5, 0x56, 0xb0, 0x09, 0xe5, 0xde, 0x88,
|
||||
0x26, 0x11, 0x55, 0x2a, 0xd4, 0x8a, 0x6c, 0xc0, 0xd2, 0xd0, 0x0f, 0x7c, 0x99, 0xd6, 0x4b, 0xb6,
|
||||
0x5c, 0x58, 0x4f, 0x81, 0x64, 0x55, 0x28, 0x2f, 0xda, 0xb0, 0xe4, 0x33, 0x0c, 0x92, 0xba, 0xd1,
|
||||
0x5c, 0x68, 0xd5, 0x76, 0xeb, 0x93, 0x4e, 0xe8, 0x22, 0xb2, 0x25, 0x8c, 0x1b, 0x1d, 0x44, 0x14,
|
||||
0x85, 0xea, 0x8a, 0x2d, 0xbe, 0xad, 0xa7, 0xd0, 0x90, 0xe0, 0x0e, 0xb2, 0xbd, 0x34, 0x27, 0xe7,
|
||||
0x55, 0xc3, 0x74, 0x4e, 0x97, 0x0a, 0x72, 0xda, 0xba, 0x0c, 0x97, 0x8a, 0x35, 0x2b, 0x5a, 0xbe,
|
||||
0x37, 0xe0, 0xc2, 0x9e, 0xe7, 0x51, 0x4c, 0x12, 0xf4, 0x1e, 0xf3, 0xde, 0xfd, 0x80, 0xfb, 0x4a,
|
||||
0x5a, 0x9a, 0x01, 0x19, 0x1a, 0xd2, 0x56, 0x7d, 0x3d, 0x85, 0x28, 0x56, 0xc8, 0x01, 0x6c, 0x24,
|
||||
0x2c, 0xa2, 0xee, 0x00, 0x1d, 0x3e, 0x18, 0x1c, 0x57, 0x6a, 0x53, 0x1d, 0xe1, 0x7c, 0x5b, 0x4c,
|
||||
0x8b, 0x47, 0x91, 0x87, 0xea, 0x1a, 0x9b, 0x28, 0x78, 0x46, 0x66, 0xbd, 0x2a, 0xc1, 0xa6, 0x2a,
|
||||
0xac, 0x27, 0xd4, 0x1f, 0x47, 0xf8, 0xf1, 0xd0, 0xe3, 0x31, 0xca, 0x64, 0xc9, 0x8a, 0xce, 0x09,
|
||||
0x4e, 0x0a, 0x2f, 0x71, 0xe5, 0xb6, 0xf8, 0x26, 0x75, 0x58, 0x56, 0x95, 0x2b, 0x2a, 0x7c, 0xc1,
|
||||
0xd6, 0x4b, 0x72, 0x1b, 0x20, 0xad, 0x50, 0x35, 0x09, 0xe6, 0x96, 0x66, 0x06, 0x4e, 0x6e, 0x83,
|
||||
0x19, 0xb8, 0x27, 0xba, 0x12, 0xd1, 0x2b, 0x9a, 0x03, 0x5b, 0x81, 0x7b, 0x72, 0x47, 0x03, 0xb2,
|
||||
0x3d, 0xe2, 0x03, 0x00, 0x3c, 0x89, 0x7d, 0xea, 0x72, 0xde, 0x55, 0xdd, 0xce, 0xe9, 0x93, 0x76,
|
||||
0x06, 0x6d, 0xfd, 0x68, 0xc0, 0x56, 0x9e, 0x16, 0x19, 0x36, 0xce, 0xcb, 0x7d, 0x58, 0x77, 0x75,
|
||||
0xe0, 0x1c, 0x11, 0x0a, 0x9d, 0x82, 0xdb, 0x69, 0x0a, 0x16, 0x84, 0xd6, 0x5e, 0x1b, 0x1f, 0x13,
|
||||
0xeb, 0x84, 0xdc, 0x84, 0x73, 0x34, 0x8a, 0x98, 0x13, 0xfb, 0xd8, 0xc3, 0x71, 0x26, 0xed, 0xaf,
|
||||
0xf1, 0x86, 0xfd, 0xdb, 0xeb, 0x9d, 0xe5, 0x63, 0x2e, 0x3f, 0x3a, 0xb4, 0x6b, 0x1c, 0x25, 0x17,
|
||||
0x9e, 0xf5, 0x4b, 0x6a, 0xda, 0x41, 0x14, 0x70, 0xbd, 0x67, 0x1d, 0xb2, 0xff, 0xc1, 0xb2, 0x8a,
|
||||
0x8f, 0x8a, 0x17, 0xc9, 0xc4, 0xeb, 0x58, 0x7e, 0xd9, 0x1a, 0x42, 0x6e, 0xc3, 0x5a, 0x44, 0xfd,
|
||||
0x81, 0x1f, 0xba, 0x43, 0xcd, 0xc6, 0x92, 0x60, 0xa3, 0x28, 0x75, 0x57, 0x35, 0x54, 0x32, 0x60,
|
||||
0xdd, 0x87, 0xfa, 0x84, 0x2f, 0x29, 0xcf, 0x19, 0x33, 0x8c, 0x53, 0xcd, 0xb0, 0x5c, 0xb8, 0xa8,
|
||||
0x34, 0x1d, 0x46, 0xdf, 0x85, 0xc3, 0xc8, 0xf5, 0xce, 0x9a, 0x17, 0xeb, 0x95, 0x01, 0xe6, 0xd4,
|
||||
0x1d, 0xef, 0x23, 0x2f, 0x32, 0x9e, 0x97, 0x4e, 0xf7, 0xfc, 0x4b, 0xf8, 0xb7, 0xb2, 0xea, 0x28,
|
||||
0xec, 0x47, 0x67, 0xee, 0xf5, 0xdd, 0x71, 0x83, 0x90, 0xea, 0x0b, 0x03, 0xf4, 0x06, 0x66, 0x3a,
|
||||
0xe3, 0xb4, 0xcd, 0xcd, 0x92, 0xb3, 0x33, 0xd4, 0x1b, 0xe7, 0x52, 0x7e, 0x02, 0x9d, 0x69, 0x6c,
|
||||
0xac, 0xdf, 0x0d, 0xd8, 0xe4, 0x93, 0x45, 0x5d, 0x95, 0xbc, 0x81, 0x1b, 0x9b, 0x50, 0x8e, 0x29,
|
||||
0xf6, 0xfd, 0x13, 0xe5, 0x88, 0x5a, 0x91, 0x1d, 0xa8, 0x25, 0xcc, 0xa5, 0xcc, 0x71, 0xfb, 0x9c,
|
||||
0x43, 0xf9, 0x34, 0x02, 0x21, 0xda, 0xe3, 0x12, 0xb2, 0x0d, 0x80, 0xa1, 0xe7, 0x74, 0xb1, 0xcf,
|
||||
0xe7, 0xd6, 0xa2, 0xd8, 0xaf, 0x62, 0xe8, 0xed, 0x0b, 0x01, 0xb9, 0x04, 0x55, 0x8a, 0x7c, 0x70,
|
||||
0xfa, 0xdf, 0xca, 0x66, 0x58, 0xb1, 0x53, 0x41, 0x3a, 0x4a, 0xcb, 0x99, 0x51, 0xca, 0x55, 0x72,
|
||||
0x7f, 0x9d, 0xfe, 0xd0, 0x1d, 0xc8, 0x57, 0xeb, 0xb2, 0x5d, 0xe5, 0x92, 0xbb, 0x5c, 0x60, 0xfd,
|
||||
0x6a, 0xc0, 0x56, 0xde, 0xbb, 0x94, 0xc3, 0x0f, 0xf3, 0xf3, 0xf6, 0x7a, 0x4a, 0xdc, 0x8c, 0x13,
|
||||
0xed, 0x53, 0xa6, 0xaf, 0x89, 0xb0, 0xa8, 0x5f, 0xb9, 0x22, 0xce, 0x46, 0x26, 0xce, 0x6f, 0x95,
|
||||
0x5c, 0xa4, 0x01, 0x55, 0x3f, 0x71, 0x14, 0xcb, 0x0b, 0xe2, 0x8a, 0x8a, 0x9f, 0x1c, 0x8b, 0xb5,
|
||||
0xf5, 0x8c, 0x27, 0x46, 0xc1, 0x78, 0xe7, 0x4e, 0xed, 0x40, 0x4d, 0x46, 0xc9, 0xc9, 0x0c, 0x7a,
|
||||
0x90, 0xa2, 0x47, 0x7c, 0xdc, 0x6f, 0x03, 0xc4, 0x2e, 0x65, 0x21, 0xd2, 0x74, 0xd4, 0x57, 0x95,
|
||||
0xe4, 0xc8, 0xb3, 0x1a, 0xbc, 0xed, 0x14, 0x0d, 0xf8, 0xc7, 0x43, 0xcf, 0xda, 0x00, 0x72, 0x4c,
|
||||
0xa3, 0xaf, 0xb0, 0x97, 0xad, 0x4c, 0xeb, 0x16, 0x5c, 0xc8, 0x49, 0xd5, 0x73, 0xe6, 0x0a, 0xac,
|
||||
0xc4, 0x52, 0xec, 0x24, 0xee, 0x50, 0xe7, 0x50, 0x4d, 0xc9, 0x3a, 0xee, 0x90, 0xed, 0xfe, 0x59,
|
||||
0x81, 0xca, 0x43, 0x45, 0x3a, 0x79, 0x08, 0x2b, 0xf2, 0xf5, 0xa8, 0xfe, 0xf3, 0xb6, 0x27, 0xdf,
|
||||
0x3f, 0xb9, 0x87, 0xbd, 0x79, 0x79, 0xd6, 0xb6, 0xba, 0xfe, 0x10, 0xaa, 0xf7, 0x90, 0x29, 0x5d,
|
||||
0xe6, 0x24, 0x38, 0x7d, 0x65, 0x9a, 0x8d, 0xc2, 0x3d, 0xa5, 0xe5, 0x21, 0xac, 0xc8, 0xe2, 0x9b,
|
||||
0x65, 0x54, 0xae, 0xf6, 0xa7, 0x8d, 0xca, 0x57, 0x2e, 0xb9, 0x0f, 0x35, 0x9e, 0x5b, 0x72, 0x2f,
|
||||
0x21, 0x8d, 0xa2, 0x27, 0x9e, 0xd6, 0x75, 0xa9, 0x78, 0x53, 0x69, 0x42, 0xd8, 0xe8, 0x68, 0xf7,
|
||||
0x32, 0xd1, 0x22, 0xd7, 0x26, 0x4f, 0x15, 0x66, 0x8a, 0x79, 0xfd, 0x34, 0x98, 0xba, 0xe6, 0x09,
|
||||
0xac, 0x4b, 0x5e, 0x55, 0x39, 0xf0, 0x14, 0x6b, 0xa6, 0x67, 0x8b, 0x5f, 0x5a, 0xe6, 0x95, 0x59,
|
||||
0x88, 0xb4, 0xf8, 0x3e, 0x87, 0x75, 0x39, 0x21, 0x33, 0x8a, 0xa7, 0x8f, 0x4d, 0x3e, 0x08, 0x4c,
|
||||
0x6b, 0x26, 0x24, 0x55, 0xdd, 0x81, 0xd5, 0x4c, 0x83, 0x17, 0x45, 0x31, 0x75, 0x2a, 0x3f, 0x59,
|
||||
0xcc, 0xe6, 0x0c, 0x40, 0xaa, 0xd4, 0x01, 0xa2, 0x67, 0x64, 0xc6, 0xe2, 0xab, 0x53, 0xe7, 0xa6,
|
||||
0x87, 0xb5, 0xf9, 0x9f, 0x39, 0xa0, 0x1c, 0x21, 0x32, 0x59, 0xe6, 0x12, 0x32, 0x39, 0x6a, 0x0a,
|
||||
0x08, 0x99, 0x1e, 0x16, 0x9f, 0xc1, 0x5a, 0xb6, 0xa3, 0x4d, 0xc4, 0xb0, 0xb8, 0xf9, 0x67, 0x63,
|
||||
0x38, 0xab, 0x81, 0x7e, 0x01, 0xe7, 0xf3, 0x69, 0xc3, 0x85, 0x39, 0x83, 0x8a, 0x9b, 0x94, 0x79,
|
||||
0x75, 0x36, 0x26, 0xd5, 0xfe, 0x09, 0xd4, 0x32, 0x6d, 0x85, 0x64, 0xca, 0x61, 0xba, 0x07, 0x99,
|
||||
0xdb, 0x33, 0x76, 0xa5, 0xba, 0xfd, 0xc5, 0x67, 0xa5, 0xb8, 0xdb, 0x2d, 0x8b, 0x47, 0xf2, 0xcd,
|
||||
0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xbc, 0x93, 0xd5, 0xd1, 0x12, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
35
pkg/storj/bucket.go
Normal file
35
pkg/storj/bucket.go
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package storj
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBucket is an error class for general bucket errors
|
||||
ErrBucket = errs.Class("bucket")
|
||||
|
||||
// ErrNoBucket is an error class for using empty bucket name
|
||||
ErrNoBucket = errs.Class("no bucket specified")
|
||||
|
||||
// ErrBucketNotFound is an error class for non-existing bucket
|
||||
ErrBucketNotFound = errs.Class("bucket not found")
|
||||
)
|
||||
|
||||
// Bucket contains information about a specific bucket
|
||||
type Bucket struct {
|
||||
ID uuid.UUID
|
||||
Name string
|
||||
ProjectID uuid.UUID
|
||||
Attribution string
|
||||
Created time.Time
|
||||
PathCipher CipherSuite
|
||||
DefaultSegmentsSize int64
|
||||
DefaultRedundancyScheme RedundancyScheme
|
||||
DefaultEncryptionParameters EncryptionParameters
|
||||
}
|
@ -10,30 +10,13 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoBucket is an error class for using empty bucket name
|
||||
ErrNoBucket = errs.Class("no bucket specified")
|
||||
|
||||
// ErrNoPath is an error class for using empty path
|
||||
ErrNoPath = errs.Class("no path specified")
|
||||
|
||||
// ErrBucketNotFound is an error class for non-existing bucket
|
||||
ErrBucketNotFound = errs.Class("bucket not found")
|
||||
|
||||
// ErrObjectNotFound is an error class for non-existing object
|
||||
ErrObjectNotFound = errs.Class("object not found")
|
||||
)
|
||||
|
||||
// Bucket contains information about a specific bucket
|
||||
type Bucket struct {
|
||||
Name string
|
||||
Attribution string
|
||||
Created time.Time
|
||||
PathCipher CipherSuite
|
||||
SegmentsSize int64
|
||||
RedundancyScheme RedundancyScheme
|
||||
EncryptionParameters EncryptionParameters
|
||||
}
|
||||
|
||||
// Object contains information about a specific object
|
||||
type Object struct {
|
||||
Version uint32
|
||||
|
12
proto.lock
12
proto.lock
@ -189,7 +189,7 @@
|
||||
"def": {
|
||||
"enums": [
|
||||
{
|
||||
"name": "CipherSuite.Type",
|
||||
"name": "CipherSuite",
|
||||
"enum_fields": [
|
||||
{
|
||||
"name": "ENC_UNSPECIFIED"
|
||||
@ -224,16 +224,6 @@
|
||||
"type": "int64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "CipherSuite",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "type",
|
||||
"type": "Type"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"package": {
|
||||
|
24
satellite/metainfo/db.go
Normal file
24
satellite/metainfo/db.go
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2018 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package metainfo
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// BucketsDB is the interface for the database to interact with buckets
|
||||
type BucketsDB interface {
|
||||
// Create creates a new bucket
|
||||
CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)
|
||||
// Get returns an existing bucket
|
||||
GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error)
|
||||
// Delete deletes a bucket
|
||||
DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)
|
||||
// List returns all buckets for a project
|
||||
ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets map[string]struct{}) (bucketList storj.BucketList, err error)
|
||||
}
|
191
satellite/metainfo/db_test.go
Normal file
191
satellite/metainfo/db_test.go
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package metainfo_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
)
|
||||
|
||||
func newTestBucket(name string, projectID uuid.UUID) storj.Bucket {
|
||||
return storj.Bucket{
|
||||
ID: testrand.UUID(),
|
||||
Name: name,
|
||||
ProjectID: projectID,
|
||||
PathCipher: storj.EncAESGCM,
|
||||
DefaultSegmentsSize: 65536,
|
||||
DefaultRedundancyScheme: storj.RedundancyScheme{
|
||||
Algorithm: storj.ReedSolomon,
|
||||
ShareSize: 9,
|
||||
RequiredShares: 10,
|
||||
RepairShares: 11,
|
||||
OptimalShares: 12,
|
||||
TotalShares: 13,
|
||||
},
|
||||
DefaultEncryptionParameters: storj.EncryptionParameters{
|
||||
CipherSuite: storj.EncAESGCM,
|
||||
BlockSize: 32,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicBucketOperations(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
consoleDB := db.Console()
|
||||
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
bucketsDB := db.Buckets()
|
||||
expectedBucket := newTestBucket("testbucket", project.ID)
|
||||
|
||||
// CreateBucket
|
||||
_, err = bucketsDB.CreateBucket(ctx, expectedBucket)
|
||||
require.NoError(t, err)
|
||||
|
||||
// GetBucket
|
||||
bucket, err := bucketsDB.GetBucket(ctx, []byte("testbucket"), project.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBucket.ID, bucket.ID)
|
||||
require.Equal(t, expectedBucket.Name, bucket.Name)
|
||||
require.Equal(t, expectedBucket.ProjectID, bucket.ProjectID)
|
||||
require.Equal(t, expectedBucket.PathCipher, bucket.PathCipher)
|
||||
require.Equal(t, expectedBucket.DefaultSegmentsSize, bucket.DefaultSegmentsSize)
|
||||
require.Equal(t, expectedBucket.DefaultRedundancyScheme, bucket.DefaultRedundancyScheme)
|
||||
require.Equal(t, expectedBucket.DefaultEncryptionParameters, bucket.DefaultEncryptionParameters)
|
||||
|
||||
// DeleteBucket
|
||||
err = bucketsDB.DeleteBucket(ctx, []byte("testbucket"), project.ID)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBucketsAllAllowed(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cursor string
|
||||
limit int
|
||||
expectedItems int
|
||||
expectedMore bool
|
||||
}{
|
||||
{"empty string cursor", "", 10, 10, false},
|
||||
{"last bucket cursor", "zzz", 2, 1, false},
|
||||
{"non matching cursor", "ccc", 10, 5, false},
|
||||
{"first bucket cursor", "0test", 10, 10, false},
|
||||
{"empty string cursor, more", "", 5, 5, true},
|
||||
{"non matching cursor, more", "ccc", 3, 3, true},
|
||||
{"first bucket cursor, more", "0test", 5, 5, true},
|
||||
}
|
||||
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
consoleDB := db.Console()
|
||||
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
bucketsDB := db.Buckets()
|
||||
|
||||
var allowedPaths = map[string]struct{}{}
|
||||
{ // setup some test buckets
|
||||
var testBucketNames = []string{"aaa", "bbb", "mmm", "qqq", "zzz",
|
||||
"test.bucket", "123", "0test", "999", "test-bucket.thing",
|
||||
}
|
||||
for _, bucket := range testBucketNames {
|
||||
testBucket := newTestBucket(bucket, project.ID)
|
||||
_, err := bucketsDB.CreateBucket(ctx, testBucket)
|
||||
allowedPaths[bucket] = struct{}{}
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
tt := tt // avoid scopelint error
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
bucketList, err := bucketsDB.ListBuckets(ctx, project.ID, storj.BucketListOptions{
|
||||
Cursor: tt.cursor,
|
||||
Direction: storj.Forward,
|
||||
Limit: tt.limit,
|
||||
},
|
||||
allowedPaths,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedItems, len(bucketList.Items))
|
||||
require.Equal(t, tt.expectedMore, bucketList.More)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBucketsNotAllowed(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cursor string
|
||||
limit int
|
||||
expectedItems int
|
||||
expectedMore bool
|
||||
allowedPaths map[string]struct{}
|
||||
expectedNames []string
|
||||
}{
|
||||
{"empty string cursor, 2 allowed", "", 10, 1, false, map[string]struct{}{"aaa": {}, "ddd": {}}, []string{"aaa"}},
|
||||
{"empty string cursor, more", "", 2, 2, true, map[string]struct{}{"aaa": {}, "bbb": {}, "zzz": {}}, []string{"aaa", "bbb"}},
|
||||
{"empty string cursor, 3 allowed", "", 4, 3, false, map[string]struct{}{"aaa": {}, "bbb": {}, "zzz": {}}, []string{"aaa", "bbb", "zzz"}},
|
||||
{"last bucket cursor", "zzz", 2, 1, false, map[string]struct{}{"zzz": {}}, []string{"zzz"}},
|
||||
}
|
||||
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
consoleDB := db.Console()
|
||||
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
bucketsDB := db.Buckets()
|
||||
|
||||
{ // setup some test buckets
|
||||
var testBucketNames = []string{"aaa", "bbb", "mmm", "qqq", "zzz",
|
||||
"test.bucket", "123", "0test", "999", "test-bucket.thing",
|
||||
}
|
||||
for _, bucket := range testBucketNames {
|
||||
testBucket := newTestBucket(bucket, project.ID)
|
||||
_, err := bucketsDB.CreateBucket(ctx, testBucket)
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
tt := tt // avoid scopelint error
|
||||
listOpts := storj.BucketListOptions{
|
||||
Cursor: tt.cursor,
|
||||
Direction: storj.Forward,
|
||||
Limit: tt.limit,
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
bucketList, err := bucketsDB.ListBuckets(ctx, project.ID,
|
||||
listOpts,
|
||||
tt.allowedPaths,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedItems, len(bucketList.Items))
|
||||
require.Equal(t, tt.expectedMore, bucketList.More)
|
||||
for _, actualItem := range bucketList.Items {
|
||||
require.Contains(t, tt.expectedNames, actualItem.Name)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
@ -19,6 +19,7 @@ import (
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/accounting"
|
||||
"storj.io/storj/pkg/auth"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
@ -665,37 +666,205 @@ func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRe
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateBucket creates a bucket
|
||||
func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreateRequest) (_ *pb.BucketCreateResponse, err error) {
|
||||
// GetBucket returns a bucket
|
||||
func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetRequest) (resp *pb.BucketGetResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: placeholder to implement pb.MetainfoServer interface.
|
||||
return &pb.BucketCreateResponse{}, err
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
|
||||
Op: macaroon.ActionRead,
|
||||
Bucket: req.Name,
|
||||
Time: time.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
bucket, err := endpoint.metainfo.GetBucket(ctx, req.GetName(), keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &pb.BucketGetResponse{
|
||||
Bucket: convertBucketToProto(ctx, bucket),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBucket gets a bucket
|
||||
func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetRequest) (_ *pb.BucketGetResponse, err error) {
|
||||
// CreateBucket creates a new bucket
|
||||
func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreateRequest) (resp *pb.BucketCreateResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: placeholder to implement pb.MetainfoServer interface.
|
||||
return &pb.BucketGetResponse{}, err
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
|
||||
Op: macaroon.ActionWrite,
|
||||
Bucket: req.Name,
|
||||
Time: time.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateBucket(ctx, req.Name)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateRedundancy(ctx, req.GetDefaultRedundancyScheme())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
bucket, err := convertProtoToBucket(req, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
bucket, err = endpoint.metainfo.CreateBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return &pb.BucketCreateResponse{
|
||||
Bucket: convertBucketToProto(ctx, bucket),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket
|
||||
func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDeleteRequest) (_ *pb.BucketDeleteResponse, err error) {
|
||||
func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDeleteRequest) (resp *pb.BucketDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: placeholder to implement pb.MetainfoServer interface.
|
||||
return &pb.BucketDeleteResponse{}, err
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
|
||||
Op: macaroon.ActionDelete,
|
||||
Bucket: req.Name,
|
||||
Time: time.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateBucket(ctx, req.Name)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.metainfo.DeleteBucket(ctx, req.Name, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &pb.BucketDeleteResponse{}, nil
|
||||
}
|
||||
|
||||
// ListBuckets returns a list of buckets
|
||||
func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListRequest) (_ *pb.BucketListResponse, err error) {
|
||||
// ListBuckets returns buckets in a project where the bucket name matches the request cursor
|
||||
func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListRequest) (resp *pb.BucketListResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: placeholder to implement pb.MetainfoServer interface.
|
||||
return &pb.BucketListResponse{}, err
|
||||
action := macaroon.Action{
|
||||
Op: macaroon.ActionRead,
|
||||
Time: time.Now(),
|
||||
}
|
||||
keyInfo, err := endpoint.validateAuth(ctx, action)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
allowedBuckets, err := getAllowedBuckets(ctx, action)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listOpts := storj.BucketListOptions{
|
||||
Cursor: string(req.Cursor),
|
||||
Limit: int(req.Limit),
|
||||
// We are only supporting the forward direction for listing buckets
|
||||
Direction: storj.Forward,
|
||||
}
|
||||
bucketList, err := endpoint.metainfo.ListBuckets(ctx, keyInfo.ProjectID, listOpts, allowedBuckets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bucketItems := make([]*pb.BucketListItem, len(bucketList.Items))
|
||||
for i, item := range bucketList.Items {
|
||||
bucketItems[i] = &pb.BucketListItem{
|
||||
Name: []byte(item.Name),
|
||||
CreatedAt: item.Created,
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.BucketListResponse{
|
||||
Items: bucketItems,
|
||||
More: bucketList.More,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetBucketAttribution returns a list of buckets
|
||||
func (endpoint *Endpoint) SetBucketAttribution(ctx context.Context, req *pb.BucketSetAttributionRequest) (_ *pb.BucketSetAttributionResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: placeholder to implement pb.MetainfoServer interface.
|
||||
return &pb.BucketSetAttributionResponse{}, err
|
||||
func getAllowedBuckets(ctx context.Context, action macaroon.Action) (allowedBuckets map[string]struct{}, err error) {
|
||||
keyData, ok := auth.GetAPIKey(ctx)
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential GetAPIKey: %v", err)
|
||||
}
|
||||
key, err := macaroon.ParseAPIKey(string(keyData))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential ParseAPIKey: %v", err)
|
||||
}
|
||||
allowedBuckets, err = key.GetAllowedBuckets(ctx, action)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "GetAllowedBuckets: %v", err)
|
||||
}
|
||||
return allowedBuckets, err
|
||||
}
|
||||
|
||||
// SetBucketAttribution sets the bucket attribution.
|
||||
func (endpoint *Endpoint) SetBucketAttribution(context.Context, *pb.BucketSetAttributionRequest) (resp *pb.BucketSetAttributionResponse, err error) {
|
||||
return resp, status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
func convertProtoToBucket(req *pb.BucketCreateRequest, projectID uuid.UUID) (storj.Bucket, error) {
|
||||
bucketID, err := uuid.New()
|
||||
if err != nil {
|
||||
return storj.Bucket{}, err
|
||||
}
|
||||
|
||||
defaultRS := req.GetDefaultRedundancyScheme()
|
||||
defaultEP := req.GetDefaultEncryptionParameters()
|
||||
return storj.Bucket{
|
||||
ID: *bucketID,
|
||||
Name: string(req.GetName()),
|
||||
ProjectID: projectID,
|
||||
Attribution: string(req.GetAttributionId()),
|
||||
PathCipher: storj.CipherSuite(req.GetPathCipher()),
|
||||
DefaultSegmentsSize: req.GetDefaultSegmentSize(),
|
||||
DefaultRedundancyScheme: storj.RedundancyScheme{
|
||||
Algorithm: storj.RedundancyAlgorithm(defaultRS.GetType()),
|
||||
ShareSize: defaultRS.GetErasureShareSize(),
|
||||
RequiredShares: int16(defaultRS.GetMinReq()),
|
||||
RepairShares: int16(defaultRS.GetRepairThreshold()),
|
||||
OptimalShares: int16(defaultRS.GetSuccessThreshold()),
|
||||
TotalShares: int16(defaultRS.GetTotal()),
|
||||
},
|
||||
DefaultEncryptionParameters: storj.EncryptionParameters{
|
||||
CipherSuite: storj.CipherSuite(defaultEP.CipherSuite),
|
||||
BlockSize: int32(defaultEP.BlockSize),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertBucketToProto(ctx context.Context, bucket storj.Bucket) (pbBucket *pb.Bucket) {
|
||||
rs := bucket.DefaultRedundancyScheme
|
||||
return &pb.Bucket{
|
||||
Name: []byte(bucket.Name),
|
||||
PathCipher: pb.CipherSuite(int(bucket.PathCipher)),
|
||||
AttributionId: []byte(bucket.Attribution),
|
||||
CreatedAt: bucket.Created,
|
||||
DefaultSegmentSize: bucket.DefaultSegmentsSize,
|
||||
DefaultRedundancyScheme: &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: int32(rs.RequiredShares),
|
||||
Total: int32(rs.TotalShares),
|
||||
RepairThreshold: int32(rs.RepairShares),
|
||||
SuccessThreshold: int32(rs.OptimalShares),
|
||||
ErasureShareSize: rs.ShareSize,
|
||||
},
|
||||
DefaultEncryptionParameters: &pb.EncryptionParameters{
|
||||
CipherSuite: pb.CipherSuite(int(bucket.DefaultEncryptionParameters.CipherSuite)),
|
||||
BlockSize: int64(bucket.DefaultEncryptionParameters.BlockSize),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -8,23 +8,26 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storage/meta"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
// Service structure
|
||||
type Service struct {
|
||||
logger *zap.Logger
|
||||
DB storage.KeyValueStore
|
||||
logger *zap.Logger
|
||||
DB storage.KeyValueStore
|
||||
bucketsDB BucketsDB
|
||||
}
|
||||
|
||||
// NewService creates new metainfo service
|
||||
func NewService(logger *zap.Logger, db storage.KeyValueStore) *Service {
|
||||
return &Service{logger: logger, DB: db}
|
||||
func NewService(logger *zap.Logger, db storage.KeyValueStore, bucketsDB BucketsDB) *Service {
|
||||
return &Service{logger: logger, DB: db, bucketsDB: bucketsDB}
|
||||
}
|
||||
|
||||
// Put puts pointer to db under specific path
|
||||
@ -165,3 +168,27 @@ func (s *Service) Iterate(ctx context.Context, prefix string, first string, recu
|
||||
}
|
||||
return s.DB.Iterate(ctx, opts, f)
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket in the buckets db
|
||||
func (s *Service) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.bucketsDB.CreateBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// GetBucket returns an existing bucket in the buckets db
|
||||
func (s *Service) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.bucketsDB.GetBucket(ctx, bucketName, projectID)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket from the bucekts db
|
||||
func (s *Service) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.bucketsDB.DeleteBucket(ctx, bucketName, projectID)
|
||||
}
|
||||
|
||||
// ListBuckets returns a list of buckets for a project
|
||||
func (s *Service) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets map[string]struct{}) (bucketList storj.BucketList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.bucketsDB.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
|
||||
}
|
||||
|
@ -98,6 +98,8 @@ type DB interface {
|
||||
Orders() orders.DB
|
||||
// Containment returns database for containment
|
||||
Containment() audit.Containment
|
||||
// Buckets returns the database to interact with buckets
|
||||
Buckets() metainfo.BucketsDB
|
||||
}
|
||||
|
||||
// Config is the global config satellite
|
||||
@ -401,7 +403,10 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config, ve
|
||||
}
|
||||
|
||||
peer.Metainfo.Database = db // for logging: storelogger.New(peer.Log.Named("pdb"), db)
|
||||
peer.Metainfo.Service = metainfo.NewService(peer.Log.Named("metainfo:service"), peer.Metainfo.Database)
|
||||
peer.Metainfo.Service = metainfo.NewService(peer.Log.Named("metainfo:service"),
|
||||
peer.Metainfo.Database,
|
||||
peer.DB.Buckets(),
|
||||
)
|
||||
|
||||
peer.Metainfo.Endpoint2 = metainfo.NewEndpoint(
|
||||
peer.Log.Named("metainfo:endpoint"),
|
||||
|
176
satellite/satellitedb/buckets.go
Normal file
176
satellite/satellitedb/buckets.go
Normal file
@ -0,0 +1,176 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package satellitedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
type bucketsDB struct {
|
||||
db dbx.Methods
|
||||
}
|
||||
|
||||
// Buckets returns database for interacting with buckets
|
||||
func (db *DB) Buckets() metainfo.BucketsDB {
|
||||
return &bucketsDB{db: db.db}
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket
|
||||
func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
row, err := db.db.Create_BucketMetainfo(ctx,
|
||||
dbx.BucketMetainfo_Id(bucket.ID[:]),
|
||||
dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]),
|
||||
dbx.BucketMetainfo_Name([]byte(bucket.Name)),
|
||||
dbx.BucketMetainfo_PathCipher(int(bucket.PathCipher)),
|
||||
dbx.BucketMetainfo_DefaultSegmentSize(int(bucket.DefaultSegmentsSize)),
|
||||
dbx.BucketMetainfo_DefaultEncryptionCipherSuite(int(bucket.DefaultEncryptionParameters.CipherSuite)),
|
||||
dbx.BucketMetainfo_DefaultEncryptionBlockSize(int(bucket.DefaultEncryptionParameters.BlockSize)),
|
||||
dbx.BucketMetainfo_DefaultRedundancyAlgorithm(int(bucket.DefaultRedundancyScheme.Algorithm)),
|
||||
dbx.BucketMetainfo_DefaultRedundancyShareSize(int(bucket.DefaultRedundancyScheme.ShareSize)),
|
||||
dbx.BucketMetainfo_DefaultRedundancyRequiredShares(int(bucket.DefaultRedundancyScheme.RequiredShares)),
|
||||
dbx.BucketMetainfo_DefaultRedundancyRepairShares(int(bucket.DefaultRedundancyScheme.RepairShares)),
|
||||
dbx.BucketMetainfo_DefaultRedundancyOptimalShares(int(bucket.DefaultRedundancyScheme.OptimalShares)),
|
||||
dbx.BucketMetainfo_DefaultRedundancyTotalShares(int(bucket.DefaultRedundancyScheme.TotalShares)),
|
||||
)
|
||||
if err != nil {
|
||||
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
|
||||
}
|
||||
|
||||
bucket, err = convertDBXtoBucket(row)
|
||||
if err != nil {
|
||||
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
|
||||
}
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
// GetBucket returns a bucket
|
||||
func (db *bucketsDB) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
dbxBucket, err := db.db.Get_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
||||
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
||||
dbx.BucketMetainfo_Name(bucketName),
|
||||
)
|
||||
if err != nil {
|
||||
return bucket, err
|
||||
}
|
||||
return convertDBXtoBucket(dbxBucket)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket
|
||||
func (db *bucketsDB) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = db.db.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
||||
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
||||
dbx.BucketMetainfo_Name(bucketName),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListBuckets returns a list of buckets for a project
|
||||
func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets map[string]struct{}) (bucketList storj.BucketList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
const defaultListLimit = 10000
|
||||
if listOpts.Limit < 1 {
|
||||
listOpts.Limit = defaultListLimit
|
||||
}
|
||||
limit := listOpts.Limit + 1 // add one to detect More
|
||||
|
||||
for {
|
||||
var dbxBuckets []*dbx.BucketMetainfo
|
||||
switch listOpts.Direction {
|
||||
// for listing buckets we are only supporting the forward direction for simplicity
|
||||
case storj.Forward:
|
||||
dbxBuckets, err = db.db.Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx,
|
||||
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
||||
dbx.BucketMetainfo_Name([]byte(listOpts.Cursor)),
|
||||
limit,
|
||||
0,
|
||||
)
|
||||
default:
|
||||
return bucketList, errors.New("unknown list direction")
|
||||
}
|
||||
if err != nil {
|
||||
return bucketList, err
|
||||
}
|
||||
|
||||
bucketList.More = len(dbxBuckets) > listOpts.Limit
|
||||
var nextCursor string
|
||||
if bucketList.More {
|
||||
nextCursor = string(dbxBuckets[listOpts.Limit].Name)
|
||||
// If there are more buckets than listOpts.limit returned,
|
||||
// then remove the extra buckets so that we do not return
|
||||
// more then the limit
|
||||
dbxBuckets = dbxBuckets[0:listOpts.Limit]
|
||||
}
|
||||
|
||||
if bucketList.Items == nil {
|
||||
bucketList.Items = make([]storj.Bucket, 0, len(dbxBuckets))
|
||||
}
|
||||
|
||||
for _, dbxBucket := range dbxBuckets {
|
||||
// Check that the bucket is allowed to be viewed
|
||||
if _, ok := allowedBuckets[string(dbxBucket.Name)]; ok {
|
||||
item, err := convertDBXtoBucket(dbxBucket)
|
||||
if err != nil {
|
||||
return bucketList, err
|
||||
}
|
||||
bucketList.Items = append(bucketList.Items, item)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bucketList.Items) < listOpts.Limit && bucketList.More {
|
||||
// If we filtered out disallowed buckets, then get more buckets
|
||||
// out of database so that we return `limit` number of buckets
|
||||
listOpts = storj.BucketListOptions{
|
||||
Cursor: nextCursor,
|
||||
Limit: listOpts.Limit,
|
||||
Direction: storj.Forward,
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return bucketList, err
|
||||
}
|
||||
|
||||
func convertDBXtoBucket(dbxBucket *dbx.BucketMetainfo) (bucket storj.Bucket, err error) {
|
||||
id, err := bytesToUUID(dbxBucket.Id)
|
||||
if err != nil {
|
||||
return bucket, err
|
||||
}
|
||||
project, err := bytesToUUID(dbxBucket.ProjectId)
|
||||
if err != nil {
|
||||
return bucket, err
|
||||
}
|
||||
return storj.Bucket{
|
||||
ID: id,
|
||||
Name: string(dbxBucket.Name),
|
||||
ProjectID: project,
|
||||
Created: dbxBucket.CreatedAt,
|
||||
PathCipher: storj.CipherSuite(dbxBucket.PathCipher),
|
||||
DefaultSegmentsSize: int64(dbxBucket.DefaultSegmentSize),
|
||||
DefaultRedundancyScheme: storj.RedundancyScheme{
|
||||
Algorithm: storj.RedundancyAlgorithm(dbxBucket.DefaultRedundancyAlgorithm),
|
||||
ShareSize: int32(dbxBucket.DefaultRedundancyShareSize),
|
||||
RequiredShares: int16(dbxBucket.DefaultRedundancyRequiredShares),
|
||||
RepairShares: int16(dbxBucket.DefaultRedundancyRepairShares),
|
||||
OptimalShares: int16(dbxBucket.DefaultRedundancyOptimalShares),
|
||||
TotalShares: int16(dbxBucket.DefaultRedundancyTotalShares),
|
||||
},
|
||||
DefaultEncryptionParameters: storj.EncryptionParameters{
|
||||
CipherSuite: storj.CipherSuite(dbxBucket.DefaultEncryptionCipherSuite),
|
||||
BlockSize: int32(dbxBucket.DefaultEncryptionBlockSize),
|
||||
},
|
||||
}, err
|
||||
}
|
@ -770,3 +770,10 @@ delete bucket_metainfo (
|
||||
where bucket_metainfo.project_id = ?
|
||||
where bucket_metainfo.name = ?
|
||||
)
|
||||
|
||||
read limitoffset ( // Forward
|
||||
select bucket_metainfo
|
||||
where bucket_metainfo.project_id = ?
|
||||
where bucket_metainfo.name >= ?
|
||||
orderby asc bucket_metainfo.name
|
||||
)
|
||||
|
@ -7330,6 +7330,43 @@ func (obj *postgresImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Co
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
||||
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketMetainfo, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value())
|
||||
|
||||
__values = append(__values, limit, offset)
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
bucket_metainfo := &BucketMetainfo{}
|
||||
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
rows = append(rows, bucket_metainfo)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Update_PendingAudits_By_NodeId(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field,
|
||||
update PendingAudits_Update_Fields) (
|
||||
@ -10825,6 +10862,43 @@ func (obj *sqlite3Impl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Con
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
||||
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketMetainfo, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value())
|
||||
|
||||
__values = append(__values, limit, offset)
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
bucket_metainfo := &BucketMetainfo{}
|
||||
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
rows = append(rows, bucket_metainfo)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *sqlite3Impl) Update_PendingAudits_By_NodeId(ctx context.Context,
|
||||
pending_audits_node_id PendingAudits_NodeId_Field,
|
||||
update PendingAudits_Update_Fields) (
|
||||
@ -13676,6 +13750,18 @@ func (rx *Rx) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Conte
|
||||
return tx.Get_ValueAttribution_By_ProjectId_And_BucketName(ctx, value_attribution_project_id, value_attribution_bucket_name)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
||||
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketMetainfo, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater_or_equal, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
@ -14302,6 +14388,12 @@ type Methods interface {
|
||||
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
||||
value_attribution *ValueAttribution, err error)
|
||||
|
||||
Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
||||
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketMetainfo, err error)
|
||||
|
||||
Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/attribution"
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
"storj.io/storj/satellite/orders"
|
||||
"storj.io/storj/satellite/rewards"
|
||||
)
|
||||
@ -74,6 +75,47 @@ func (m *lockedAttribution) QueryAttribution(ctx context.Context, partnerID uuid
|
||||
return m.db.QueryAttribution(ctx, partnerID, start, end)
|
||||
}
|
||||
|
||||
// Buckets returns the database to interact with buckets
|
||||
func (m *locked) Buckets() metainfo.BucketsDB {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return &lockedBuckets{m.Locker, m.db.Buckets()}
|
||||
}
|
||||
|
||||
// lockedBuckets implements locking wrapper for metainfo.BucketsDB
|
||||
type lockedBuckets struct {
|
||||
sync.Locker
|
||||
db metainfo.BucketsDB
|
||||
}
|
||||
|
||||
// Create creates a new bucket
|
||||
func (m *lockedBuckets) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.CreateBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Delete deletes a bucket
|
||||
func (m *lockedBuckets) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.DeleteBucket(ctx, bucketName, projectID)
|
||||
}
|
||||
|
||||
// Get returns an existing bucket
|
||||
func (m *lockedBuckets) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.GetBucket(ctx, bucketName, projectID)
|
||||
}
|
||||
|
||||
// List returns all buckets for a project
|
||||
func (m *lockedBuckets) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets map[string]struct{}) (bucketList storj.BucketList, err error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
|
||||
}
|
||||
|
||||
// CertDB returns database for storing uplink's public key & ID
|
||||
func (m *locked) CertDB() certdb.DB {
|
||||
m.Lock()
|
||||
|
Loading…
Reference in New Issue
Block a user