2019-01-18 15:10:21 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2019-04-25 09:46:32 +01:00
|
|
|
package metainfo
|
2019-01-18 15:10:21 +00:00
|
|
|
|
|
|
|
import (
|
2019-06-04 12:55:38 +01:00
|
|
|
"context"
|
2019-07-08 23:16:50 +01:00
|
|
|
"time"
|
2019-06-04 12:55:38 +01:00
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-01-18 15:10:21 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/macaroon"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2020-09-03 14:54:56 +01:00
|
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
2019-01-18 15:10:21 +00:00
|
|
|
"storj.io/storj/storage"
|
2020-02-21 14:07:29 +00:00
|
|
|
"storj.io/uplink/private/storage/meta"
|
2019-01-18 15:10:21 +00:00
|
|
|
)
|
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
var (
|
|
|
|
// ErrBucketNotEmpty is returned when bucket is required to be empty for an operation.
|
|
|
|
ErrBucketNotEmpty = errs.Class("bucket not empty")
|
|
|
|
)
|
|
|
|
|
2019-01-18 15:10:21 +00:00
|
|
|
// Service structure
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Service
|
2019-01-18 15:10:21 +00:00
|
|
|
type Service struct {
|
2020-10-29 16:54:35 +00:00
|
|
|
logger *zap.Logger
|
|
|
|
db PointerDB
|
|
|
|
bucketsDB BucketsDB
|
|
|
|
metabaseDB MetabaseDB
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 18:14:35 +00:00
|
|
|
// NewService creates new metainfo service.
|
2020-10-29 16:54:35 +00:00
|
|
|
func NewService(logger *zap.Logger, db PointerDB, bucketsDB BucketsDB, metabaseDB MetabaseDB) *Service {
|
|
|
|
return &Service{
|
|
|
|
logger: logger,
|
|
|
|
db: db,
|
|
|
|
bucketsDB: bucketsDB,
|
|
|
|
metabaseDB: metabaseDB,
|
|
|
|
}
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 18:14:35 +00:00
|
|
|
// Put puts pointer to db under specific path.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) Put(ctx context.Context, key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
if err := sanityCheckPointer(key, pointer); err != nil {
|
2019-12-17 16:23:00 +00:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-01-18 15:10:21 +00:00
|
|
|
// Update the pointer with the creation date
|
2019-07-08 23:16:50 +01:00
|
|
|
pointer.CreationDate = time.Now()
|
2019-01-18 15:10:21 +00:00
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
pointerBytes, err := pb.Marshal(pointer)
|
2019-01-18 15:10:21 +00:00
|
|
|
if err != nil {
|
2019-07-23 15:28:06 +01:00
|
|
|
return Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
// CompareAndSwap is used instead of Put to avoid overwriting existing pointers
|
2020-09-03 14:54:56 +01:00
|
|
|
err = s.db.CompareAndSwap(ctx, storage.Key(key), nil, pointerBytes)
|
2019-07-25 17:59:46 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-02-11 17:25:35 +00:00
|
|
|
// UnsynchronizedPut puts pointer to db under specific path without verifying for existing pointer under the same path.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) UnsynchronizedPut(ctx context.Context, key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
|
2020-02-11 17:25:35 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
if err := sanityCheckPointer(key, pointer); err != nil {
|
2019-12-17 16:23:00 +00:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-02-11 17:25:35 +00:00
|
|
|
// Update the pointer with the creation date
|
|
|
|
pointer.CreationDate = time.Now()
|
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
pointerBytes, err := pb.Marshal(pointer)
|
2020-02-11 17:25:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
err = s.db.Put(ctx, storage.Key(key), pointerBytes)
|
2020-02-11 17:25:35 +00:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-11-05 19:13:45 +00:00
|
|
|
// UpdatePieces calls UpdatePiecesCheckDuplicates with checkDuplicates equal to false.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) UpdatePieces(ctx context.Context, key metabase.SegmentKey, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece) (pointer *pb.Pointer, err error) {
|
|
|
|
return s.UpdatePiecesCheckDuplicates(ctx, key, ref, toAdd, toRemove, false)
|
2019-11-05 19:13:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdatePiecesCheckDuplicates atomically adds toAdd pieces and removes toRemove pieces from
|
2019-07-25 17:59:46 +01:00
|
|
|
// the pointer under path. ref is the pointer that caller received via Get
|
|
|
|
// prior to calling this method.
|
|
|
|
//
|
2019-11-05 19:13:45 +00:00
|
|
|
// It will first check if the pointer has been deleted or replaced.
|
|
|
|
// Then if checkDuplicates is true it will return an error if the nodes to be
|
|
|
|
// added are already in the pointer.
|
|
|
|
// Then it will remove the toRemove pieces and then it will add the toAdd pieces.
|
2019-07-25 17:59:46 +01:00
|
|
|
// Replacing the node ID and the hash of a piece can be done by adding the
|
|
|
|
// piece to both toAdd and toRemove.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, key metabase.SegmentKey, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece, checkDuplicates bool) (pointer *pb.Pointer, err error) {
|
2019-07-25 17:59:46 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
if err := sanityCheckPointer(key, ref); err != nil {
|
2019-12-17 16:23:00 +00:00
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
2020-09-03 14:54:56 +01:00
|
|
|
err = sanityCheckPointer(key, pointer)
|
2019-12-17 16:23:00 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
for {
|
|
|
|
// read the pointer
|
2020-09-03 14:54:56 +01:00
|
|
|
oldPointerBytes, err := s.db.Get(ctx, storage.Key(key))
|
2019-07-25 17:59:46 +01:00
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// unmarshal the pointer
|
|
|
|
pointer = &pb.Pointer{}
|
2020-04-08 13:08:57 +01:00
|
|
|
err = pb.Unmarshal(oldPointerBytes, pointer)
|
2019-07-25 17:59:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if pointer has been replaced
|
|
|
|
if !pointer.GetCreationDate().Equal(ref.GetCreationDate()) {
|
|
|
|
return nil, Error.New("pointer has been replaced")
|
|
|
|
}
|
|
|
|
|
|
|
|
// put all existing pieces to a map
|
|
|
|
pieceMap := make(map[int32]*pb.RemotePiece)
|
2019-11-05 19:13:45 +00:00
|
|
|
nodePieceMap := make(map[storj.NodeID]struct{})
|
2019-07-25 17:59:46 +01:00
|
|
|
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
|
|
|
pieceMap[piece.PieceNum] = piece
|
2019-11-05 19:13:45 +00:00
|
|
|
if checkDuplicates {
|
|
|
|
nodePieceMap[piece.NodeId] = struct{}{}
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
}
|
2019-01-18 15:10:21 +00:00
|
|
|
|
2019-11-05 19:13:45 +00:00
|
|
|
// Return an error if the pointer already has a piece for this node
|
|
|
|
if checkDuplicates {
|
|
|
|
for _, piece := range toAdd {
|
|
|
|
_, ok := nodePieceMap[piece.NodeId]
|
|
|
|
if ok {
|
2020-09-03 14:54:56 +01:00
|
|
|
return nil, ErrNodeAlreadyExists.New("node id already exists in pointer. Key: %s, NodeID: %s", key, piece.NodeId.String())
|
2019-11-05 19:13:45 +00:00
|
|
|
}
|
|
|
|
nodePieceMap[piece.NodeId] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
// remove the toRemove pieces from the map
|
2020-07-20 14:17:04 +01:00
|
|
|
// only if all piece number, node id and hash match
|
2019-07-25 17:59:46 +01:00
|
|
|
for _, piece := range toRemove {
|
|
|
|
if piece == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
existing := pieceMap[piece.PieceNum]
|
2019-09-16 17:11:12 +01:00
|
|
|
if existing != nil && existing.NodeId == piece.NodeId {
|
2019-07-25 17:59:46 +01:00
|
|
|
delete(pieceMap, piece.PieceNum)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add the toAdd pieces to the map
|
|
|
|
for _, piece := range toAdd {
|
|
|
|
if piece == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, exists := pieceMap[piece.PieceNum]
|
|
|
|
if exists {
|
|
|
|
return nil, Error.New("piece to add already exists (piece no: %d)", piece.PieceNum)
|
|
|
|
}
|
|
|
|
pieceMap[piece.PieceNum] = piece
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy the pieces from the map back to the pointer
|
|
|
|
var pieces []*pb.RemotePiece
|
|
|
|
for _, piece := range pieceMap {
|
2019-09-16 17:11:12 +01:00
|
|
|
// clear hashes so we don't store them
|
|
|
|
piece.Hash = nil
|
2019-07-25 17:59:46 +01:00
|
|
|
pieces = append(pieces, piece)
|
|
|
|
}
|
|
|
|
pointer.GetRemote().RemotePieces = pieces
|
|
|
|
|
2019-09-17 20:18:48 +01:00
|
|
|
pointer.LastRepaired = ref.LastRepaired
|
|
|
|
pointer.RepairCount = ref.RepairCount
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
// marshal the pointer
|
2020-04-08 13:08:57 +01:00
|
|
|
newPointerBytes, err := pb.Marshal(pointer)
|
2019-07-25 17:59:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// write the pointer using compare-and-swap
|
2020-09-03 14:54:56 +01:00
|
|
|
err = s.db.CompareAndSwap(ctx, storage.Key(key), oldPointerBytes, newPointerBytes)
|
2019-07-25 17:59:46 +01:00
|
|
|
if storage.ErrValueChanged.Has(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
return pointer, nil
|
|
|
|
}
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 11:15:35 +00:00
|
|
|
// Get gets decoded pointer from DB.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) Get(ctx context.Context, key metabase.SegmentKey) (_ *pb.Pointer, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-09-03 14:54:56 +01:00
|
|
|
_, pointer, err := s.GetWithBytes(ctx, key)
|
2019-01-18 15:10:21 +00:00
|
|
|
if err != nil {
|
2019-12-10 11:15:35 +00:00
|
|
|
return nil, err
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return pointer, nil
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:31:23 +01:00
|
|
|
// GetItems gets decoded pointers from DB.
|
|
|
|
// The return value is in the same order as the argument paths.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) GetItems(ctx context.Context, keys []metabase.SegmentKey) (_ []*pb.Pointer, err error) {
|
2020-06-29 21:31:23 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-09-03 14:54:56 +01:00
|
|
|
storageKeys := make(storage.Keys, len(keys))
|
|
|
|
for i := range keys {
|
|
|
|
storageKeys[i] = storage.Key(keys[i])
|
2020-06-29 21:31:23 +01:00
|
|
|
}
|
2020-09-03 14:54:56 +01:00
|
|
|
pointerBytes, err := s.db.GetAll(ctx, storageKeys)
|
2020-06-29 21:31:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pointers := make([]*pb.Pointer, len(pointerBytes))
|
|
|
|
for i, p := range pointerBytes {
|
|
|
|
if p == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var pointer pb.Pointer
|
|
|
|
err = pb.Unmarshal([]byte(p), &pointer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
pointers[i] = &pointer
|
|
|
|
}
|
|
|
|
return pointers, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 11:15:35 +00:00
|
|
|
// GetWithBytes gets the protocol buffers encoded and decoded pointer from the DB.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) GetWithBytes(ctx context.Context, key metabase.SegmentKey) (pointerBytes []byte, pointer *pb.Pointer, err error) {
|
2019-11-06 17:02:14 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
pointerBytes, err = s.db.Get(ctx, storage.Key(key))
|
2019-11-06 17:02:14 +00:00
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-11-06 17:02:14 +00:00
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pointer = &pb.Pointer{}
|
2020-04-08 13:08:57 +01:00
|
|
|
err = pb.Unmarshal(pointerBytes, pointer)
|
2019-11-06 17:02:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pointerBytes, pointer, nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// List returns all Path keys in the pointers bucket.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) List(ctx context.Context, prefix metabase.SegmentKey, startAfter string, recursive bool, limit int32,
|
2019-01-18 15:10:21 +00:00
|
|
|
metaFlags uint32) (items []*pb.ListResponse_Item, more bool, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-01-18 15:10:21 +00:00
|
|
|
|
|
|
|
var prefixKey storage.Key
|
2020-09-03 14:54:56 +01:00
|
|
|
if len(prefix) != 0 {
|
2019-01-18 15:10:21 +00:00
|
|
|
prefixKey = storage.Key(prefix)
|
|
|
|
if prefix[len(prefix)-1] != storage.Delimiter {
|
|
|
|
prefixKey = append(prefixKey, storage.Delimiter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 17:50:25 +01:00
|
|
|
more, err = storage.ListV2Iterate(ctx, s.db, storage.ListOptions{
|
2019-01-18 15:10:21 +00:00
|
|
|
Prefix: prefixKey,
|
|
|
|
StartAfter: storage.Key(startAfter),
|
|
|
|
Recursive: recursive,
|
|
|
|
Limit: int(limit),
|
|
|
|
IncludeValue: metaFlags != meta.None,
|
2020-04-13 17:50:25 +01:00
|
|
|
}, func(ctx context.Context, item *storage.ListItem) error {
|
|
|
|
items = append(items, s.createListItem(ctx, *item, metaFlags))
|
|
|
|
return nil
|
2019-01-18 15:10:21 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, false, Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return items, more, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createListItem creates a new list item with the given path. It also adds
|
|
|
|
// the metadata according to the given metaFlags.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (s *Service) createListItem(ctx context.Context, rawItem storage.ListItem, metaFlags uint32) *pb.ListResponse_Item {
|
|
|
|
defer mon.Task()(&ctx)(nil)
|
2019-01-18 15:10:21 +00:00
|
|
|
item := &pb.ListResponse_Item{
|
|
|
|
Path: rawItem.Key.String(),
|
|
|
|
IsPrefix: rawItem.IsPrefix,
|
|
|
|
}
|
|
|
|
if item.IsPrefix {
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s.setMetadata(item, rawItem.Value, metaFlags)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Warn("err retrieving metadata", zap.Error(err))
|
|
|
|
}
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
|
|
|
// getMetadata adds the metadata to the given item pointer according to the
|
2020-07-16 15:18:02 +01:00
|
|
|
// given metaFlags.
|
2019-01-18 15:10:21 +00:00
|
|
|
func (s *Service) setMetadata(item *pb.ListResponse_Item, data []byte, metaFlags uint32) (err error) {
|
|
|
|
if metaFlags == meta.None || len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
pr := &pb.Pointer{}
|
2020-04-08 13:08:57 +01:00
|
|
|
err = pb.Unmarshal(data, pr)
|
2019-01-18 15:10:21 +00:00
|
|
|
if err != nil {
|
2019-07-25 17:59:46 +01:00
|
|
|
return Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start with an empty pointer to and add only what's requested in
|
|
|
|
// metaFlags to safe to transfer payload
|
|
|
|
item.Pointer = &pb.Pointer{}
|
|
|
|
if metaFlags&meta.Modified != 0 {
|
|
|
|
item.Pointer.CreationDate = pr.GetCreationDate()
|
|
|
|
}
|
|
|
|
if metaFlags&meta.Expiration != 0 {
|
|
|
|
item.Pointer.ExpirationDate = pr.GetExpirationDate()
|
|
|
|
}
|
|
|
|
if metaFlags&meta.Size != 0 {
|
|
|
|
item.Pointer.SegmentSize = pr.GetSegmentSize()
|
|
|
|
}
|
|
|
|
if metaFlags&meta.UserDefined != 0 {
|
|
|
|
item.Pointer.Metadata = pr.GetMetadata()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-06 17:02:14 +00:00
|
|
|
// Delete deletes a pointer bytes when it matches oldPointerBytes, otherwise it'll fail.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) Delete(ctx context.Context, key metabase.SegmentKey, oldPointerBytes []byte) (err error) {
|
2019-11-06 17:02:14 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
err = s.db.CompareAndSwap(ctx, storage.Key(key), oldPointerBytes, nil)
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
2019-11-06 17:02:14 +00:00
|
|
|
}
|
|
|
|
|
2020-06-29 21:31:23 +01:00
|
|
|
// UnsynchronizedGetDel deletes items from db without verifying whether the pointers have changed in the database,
|
|
|
|
// and it returns deleted items.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) UnsynchronizedGetDel(ctx context.Context, keys []metabase.SegmentKey) ([]metabase.SegmentKey, []*pb.Pointer, error) {
|
|
|
|
storageKeys := make(storage.Keys, len(keys))
|
|
|
|
for i := range keys {
|
|
|
|
storageKeys[i] = storage.Key(keys[i])
|
2020-06-29 21:31:23 +01:00
|
|
|
}
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
items, err := s.db.DeleteMultiple(ctx, storageKeys)
|
2020-06-29 21:31:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
pointerPaths := make([]metabase.SegmentKey, 0, len(items))
|
2020-06-29 21:31:23 +01:00
|
|
|
pointers := make([]*pb.Pointer, 0, len(items))
|
|
|
|
|
|
|
|
for _, item := range items {
|
|
|
|
data := &pb.Pointer{}
|
|
|
|
err = pb.Unmarshal(item.Value, data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
pointerPaths = append(pointerPaths, metabase.SegmentKey(item.Key))
|
2020-06-29 21:31:23 +01:00
|
|
|
pointers = append(pointers, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pointerPaths, pointers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnsynchronizedDelete deletes item from db without verifying whether the pointer has changed in the database.
|
2020-09-03 14:54:56 +01:00
|
|
|
func (s *Service) UnsynchronizedDelete(ctx context.Context, key metabase.SegmentKey) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-12-10 20:21:30 +00:00
|
|
|
|
2020-09-03 14:54:56 +01:00
|
|
|
err = s.db.Delete(ctx, storage.Key(key))
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// CreateBucket creates a new bucket in the buckets db.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (s *Service) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.CreateBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// GetBucket returns an existing bucket in the buckets db.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (s *Service) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.GetBucket(ctx, bucketName, projectID)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// UpdateBucket returns an updated bucket in the buckets db.
|
2019-07-19 16:17:34 +01:00
|
|
|
func (s *Service) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.UpdateBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// DeleteBucket deletes a bucket from the bucekts db.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (s *Service) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 12:52:00 +00:00
|
|
|
|
|
|
|
empty, err := s.IsBucketEmpty(ctx, projectID, bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !empty {
|
|
|
|
return ErrBucketNotEmpty.New("")
|
|
|
|
}
|
|
|
|
|
2019-07-08 23:32:18 +01:00
|
|
|
return s.bucketsDB.DeleteBucket(ctx, bucketName, projectID)
|
|
|
|
}
|
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
// IsBucketEmpty returns whether bucket is empty.
|
|
|
|
func (s *Service) IsBucketEmpty(ctx context.Context, projectID uuid.UUID, bucketName []byte) (bool, error) {
|
2020-11-17 17:37:58 +00:00
|
|
|
empty, err := s.metabaseDB.BucketEmpty(ctx, metabase.BucketEmpty{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: string(bucketName),
|
|
|
|
})
|
|
|
|
return empty, Error.Wrap(err)
|
2020-02-14 12:52:00 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// ListBuckets returns a list of buckets for a project.
|
2019-07-12 13:57:02 +01:00
|
|
|
func (s *Service) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error) {
|
2019-07-08 23:32:18 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
|
|
|
|
}
|
2020-06-30 22:49:29 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// CountBuckets returns the number of buckets a project currently has.
|
2020-06-30 22:49:29 +01:00
|
|
|
func (s *Service) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.CountBuckets(ctx, projectID)
|
|
|
|
}
|