2019-01-18 15:10:21 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2019-04-25 09:46:32 +01:00
|
|
|
package metainfo
|
2019-01-18 15:10:21 +00:00
|
|
|
|
|
|
|
import (
|
2019-06-04 12:55:38 +01:00
|
|
|
"context"
|
2020-05-20 21:18:43 +01:00
|
|
|
"fmt"
|
2019-07-08 23:16:50 +01:00
|
|
|
"time"
|
2019-06-04 12:55:38 +01:00
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-01-18 15:10:21 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/macaroon"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2019-01-18 15:10:21 +00:00
|
|
|
"storj.io/storj/storage"
|
2020-02-21 14:07:29 +00:00
|
|
|
"storj.io/uplink/private/storage/meta"
|
2019-01-18 15:10:21 +00:00
|
|
|
)
|
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
var (
|
|
|
|
// ErrBucketNotEmpty is returned when bucket is required to be empty for an operation.
|
|
|
|
ErrBucketNotEmpty = errs.Class("bucket not empty")
|
|
|
|
)
|
|
|
|
|
2019-01-18 15:10:21 +00:00
|
|
|
// Service structure
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Service
|
2019-01-18 15:10:21 +00:00
|
|
|
type Service struct {
|
2019-07-08 23:32:18 +01:00
|
|
|
logger *zap.Logger
|
2019-11-06 17:02:14 +00:00
|
|
|
db PointerDB
|
2019-07-08 23:32:18 +01:00
|
|
|
bucketsDB BucketsDB
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 18:14:35 +00:00
|
|
|
// NewService creates new metainfo service.
|
2019-09-10 14:24:16 +01:00
|
|
|
func NewService(logger *zap.Logger, db PointerDB, bucketsDB BucketsDB) *Service {
|
2019-11-06 17:02:14 +00:00
|
|
|
return &Service{logger: logger, db: db, bucketsDB: bucketsDB}
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 18:14:35 +00:00
|
|
|
// Put puts pointer to db under specific path.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (s *Service) Put(ctx context.Context, path string, pointer *pb.Pointer) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-17 16:23:00 +00:00
|
|
|
if err := sanityCheckPointer(path, pointer); err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-01-18 15:10:21 +00:00
|
|
|
// Update the pointer with the creation date
|
2019-07-08 23:16:50 +01:00
|
|
|
pointer.CreationDate = time.Now()
|
2019-01-18 15:10:21 +00:00
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
pointerBytes, err := pb.Marshal(pointer)
|
2019-01-18 15:10:21 +00:00
|
|
|
if err != nil {
|
2019-07-23 15:28:06 +01:00
|
|
|
return Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
// CompareAndSwap is used instead of Put to avoid overwriting existing pointers
|
2019-11-06 17:02:14 +00:00
|
|
|
err = s.db.CompareAndSwap(ctx, []byte(path), nil, pointerBytes)
|
2019-07-25 17:59:46 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-02-11 17:25:35 +00:00
|
|
|
// UnsynchronizedPut puts pointer to db under specific path without verifying for existing pointer under the same path.
|
|
|
|
func (s *Service) UnsynchronizedPut(ctx context.Context, path string, pointer *pb.Pointer) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-17 16:23:00 +00:00
|
|
|
if err := sanityCheckPointer(path, pointer); err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-02-11 17:25:35 +00:00
|
|
|
// Update the pointer with the creation date
|
|
|
|
pointer.CreationDate = time.Now()
|
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
pointerBytes, err := pb.Marshal(pointer)
|
2020-02-11 17:25:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = s.db.Put(ctx, []byte(path), pointerBytes)
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-11-05 19:13:45 +00:00
|
|
|
// UpdatePieces calls UpdatePiecesCheckDuplicates with checkDuplicates equal to false.
|
|
|
|
func (s *Service) UpdatePieces(ctx context.Context, path string, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece) (pointer *pb.Pointer, err error) {
|
|
|
|
return s.UpdatePiecesCheckDuplicates(ctx, path, ref, toAdd, toRemove, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdatePiecesCheckDuplicates atomically adds toAdd pieces and removes toRemove pieces from
|
2019-07-25 17:59:46 +01:00
|
|
|
// the pointer under path. ref is the pointer that caller received via Get
|
|
|
|
// prior to calling this method.
|
|
|
|
//
|
2019-11-05 19:13:45 +00:00
|
|
|
// It will first check if the pointer has been deleted or replaced.
|
|
|
|
// Then if checkDuplicates is true it will return an error if the nodes to be
|
|
|
|
// added are already in the pointer.
|
|
|
|
// Then it will remove the toRemove pieces and then it will add the toAdd pieces.
|
2019-07-25 17:59:46 +01:00
|
|
|
// Replacing the node ID and the hash of a piece can be done by adding the
|
|
|
|
// piece to both toAdd and toRemove.
|
2019-11-05 19:13:45 +00:00
|
|
|
func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, path string, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece, checkDuplicates bool) (pointer *pb.Pointer, err error) {
|
2019-07-25 17:59:46 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-17 16:23:00 +00:00
|
|
|
if err := sanityCheckPointer(path, ref); err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
err = sanityCheckPointer(path, pointer)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
for {
|
|
|
|
// read the pointer
|
2019-11-06 17:02:14 +00:00
|
|
|
oldPointerBytes, err := s.db.Get(ctx, []byte(path))
|
2019-07-25 17:59:46 +01:00
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// unmarshal the pointer
|
|
|
|
pointer = &pb.Pointer{}
|
2020-04-08 13:08:57 +01:00
|
|
|
err = pb.Unmarshal(oldPointerBytes, pointer)
|
2019-07-25 17:59:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if pointer has been replaced
|
|
|
|
if !pointer.GetCreationDate().Equal(ref.GetCreationDate()) {
|
|
|
|
return nil, Error.New("pointer has been replaced")
|
|
|
|
}
|
|
|
|
|
|
|
|
// put all existing pieces to a map
|
|
|
|
pieceMap := make(map[int32]*pb.RemotePiece)
|
2019-11-05 19:13:45 +00:00
|
|
|
nodePieceMap := make(map[storj.NodeID]struct{})
|
2019-07-25 17:59:46 +01:00
|
|
|
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
|
|
|
pieceMap[piece.PieceNum] = piece
|
2019-11-05 19:13:45 +00:00
|
|
|
if checkDuplicates {
|
|
|
|
nodePieceMap[piece.NodeId] = struct{}{}
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
}
|
2019-01-18 15:10:21 +00:00
|
|
|
|
2019-11-05 19:13:45 +00:00
|
|
|
// Return an error if the pointer already has a piece for this node
|
|
|
|
if checkDuplicates {
|
|
|
|
for _, piece := range toAdd {
|
|
|
|
_, ok := nodePieceMap[piece.NodeId]
|
|
|
|
if ok {
|
|
|
|
return nil, ErrNodeAlreadyExists.New("node id already exists in pointer. Path: %s, NodeID: %s", path, piece.NodeId.String())
|
|
|
|
}
|
|
|
|
nodePieceMap[piece.NodeId] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
// remove the toRemove pieces from the map
|
2020-05-20 21:18:43 +01:00
|
|
|
// only if piece number and node id match
|
2019-07-25 17:59:46 +01:00
|
|
|
for _, piece := range toRemove {
|
|
|
|
if piece == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
existing := pieceMap[piece.PieceNum]
|
2019-09-16 17:11:12 +01:00
|
|
|
if existing != nil && existing.NodeId == piece.NodeId {
|
2019-07-25 17:59:46 +01:00
|
|
|
delete(pieceMap, piece.PieceNum)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add the toAdd pieces to the map
|
|
|
|
for _, piece := range toAdd {
|
|
|
|
if piece == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-05-20 21:18:43 +01:00
|
|
|
piece.Hash = nil
|
2019-07-25 17:59:46 +01:00
|
|
|
_, exists := pieceMap[piece.PieceNum]
|
|
|
|
if exists {
|
2020-05-20 21:18:43 +01:00
|
|
|
// temporary logging to get some insight into this error
|
|
|
|
s.logger.Info("temporary logging around error: 'piece to add already exists'",
|
|
|
|
zap.String("old pointer", fmt.Sprintf("%v", ref.GetRemote().GetRemotePieces())),
|
|
|
|
zap.String("latest pointer", fmt.Sprintf("%v", pointer.GetRemote().GetRemotePieces())),
|
|
|
|
zap.String("nodes to remove", fmt.Sprintf("%v", toRemove)),
|
|
|
|
zap.String("nodes to add", fmt.Sprintf("%v", toAdd)))
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, Error.New("piece to add already exists (piece no: %d)", piece.PieceNum)
|
|
|
|
}
|
|
|
|
pieceMap[piece.PieceNum] = piece
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy the pieces from the map back to the pointer
|
|
|
|
var pieces []*pb.RemotePiece
|
|
|
|
for _, piece := range pieceMap {
|
2019-09-16 17:11:12 +01:00
|
|
|
// clear hashes so we don't store them
|
|
|
|
piece.Hash = nil
|
2019-07-25 17:59:46 +01:00
|
|
|
pieces = append(pieces, piece)
|
|
|
|
}
|
|
|
|
pointer.GetRemote().RemotePieces = pieces
|
|
|
|
|
2019-09-17 20:18:48 +01:00
|
|
|
pointer.LastRepaired = ref.LastRepaired
|
|
|
|
pointer.RepairCount = ref.RepairCount
|
|
|
|
|
2019-07-25 17:59:46 +01:00
|
|
|
// marshal the pointer
|
2020-04-08 13:08:57 +01:00
|
|
|
newPointerBytes, err := pb.Marshal(pointer)
|
2019-07-25 17:59:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// write the pointer using compare-and-swap
|
2019-11-06 17:02:14 +00:00
|
|
|
err = s.db.CompareAndSwap(ctx, []byte(path), oldPointerBytes, newPointerBytes)
|
2019-07-25 17:59:46 +01:00
|
|
|
if storage.ErrValueChanged.Has(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
return pointer, nil
|
|
|
|
}
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 11:15:35 +00:00
|
|
|
// Get gets decoded pointer from DB.
|
|
|
|
func (s *Service) Get(ctx context.Context, path string) (_ *pb.Pointer, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-12-10 11:15:35 +00:00
|
|
|
_, pointer, err := s.GetWithBytes(ctx, path)
|
2019-01-18 15:10:21 +00:00
|
|
|
if err != nil {
|
2019-12-10 11:15:35 +00:00
|
|
|
return nil, err
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return pointer, nil
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:31:23 +01:00
|
|
|
// GetItems gets decoded pointers from DB.
|
|
|
|
// The return value is in the same order as the argument paths.
|
|
|
|
func (s *Service) GetItems(ctx context.Context, paths [][]byte) (_ []*pb.Pointer, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
keys := make(storage.Keys, len(paths))
|
|
|
|
for i := range paths {
|
|
|
|
keys[i] = paths[i]
|
|
|
|
}
|
|
|
|
pointerBytes, err := s.db.GetAll(ctx, keys)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pointers := make([]*pb.Pointer, len(pointerBytes))
|
|
|
|
for i, p := range pointerBytes {
|
|
|
|
if p == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var pointer pb.Pointer
|
|
|
|
err = pb.Unmarshal([]byte(p), &pointer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
pointers[i] = &pointer
|
|
|
|
}
|
|
|
|
return pointers, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 11:15:35 +00:00
|
|
|
// GetWithBytes gets the protocol buffers encoded and decoded pointer from the DB.
|
2019-11-06 17:02:14 +00:00
|
|
|
func (s *Service) GetWithBytes(ctx context.Context, path string) (pointerBytes []byte, pointer *pb.Pointer, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
pointerBytes, err = s.db.Get(ctx, []byte(path))
|
|
|
|
if err != nil {
|
2019-12-10 20:21:30 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-11-06 17:02:14 +00:00
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pointer = &pb.Pointer{}
|
2020-04-08 13:08:57 +01:00
|
|
|
err = pb.Unmarshal(pointerBytes, pointer)
|
2019-11-06 17:02:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pointerBytes, pointer, nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// List returns all Path keys in the pointers bucket.
|
2019-09-25 22:30:41 +01:00
|
|
|
func (s *Service) List(ctx context.Context, prefix string, startAfter string, recursive bool, limit int32,
|
2019-01-18 15:10:21 +00:00
|
|
|
metaFlags uint32) (items []*pb.ListResponse_Item, more bool, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-01-18 15:10:21 +00:00
|
|
|
|
|
|
|
var prefixKey storage.Key
|
|
|
|
if prefix != "" {
|
|
|
|
prefixKey = storage.Key(prefix)
|
|
|
|
if prefix[len(prefix)-1] != storage.Delimiter {
|
|
|
|
prefixKey = append(prefixKey, storage.Delimiter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 17:50:25 +01:00
|
|
|
more, err = storage.ListV2Iterate(ctx, s.db, storage.ListOptions{
|
2019-01-18 15:10:21 +00:00
|
|
|
Prefix: prefixKey,
|
|
|
|
StartAfter: storage.Key(startAfter),
|
|
|
|
Recursive: recursive,
|
|
|
|
Limit: int(limit),
|
|
|
|
IncludeValue: metaFlags != meta.None,
|
2020-04-13 17:50:25 +01:00
|
|
|
}, func(ctx context.Context, item *storage.ListItem) error {
|
|
|
|
items = append(items, s.createListItem(ctx, *item, metaFlags))
|
|
|
|
return nil
|
2019-01-18 15:10:21 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2019-07-25 17:59:46 +01:00
|
|
|
return nil, false, Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return items, more, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createListItem creates a new list item with the given path. It also adds
|
|
|
|
// the metadata according to the given metaFlags.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (s *Service) createListItem(ctx context.Context, rawItem storage.ListItem, metaFlags uint32) *pb.ListResponse_Item {
|
|
|
|
defer mon.Task()(&ctx)(nil)
|
2019-01-18 15:10:21 +00:00
|
|
|
item := &pb.ListResponse_Item{
|
|
|
|
Path: rawItem.Key.String(),
|
|
|
|
IsPrefix: rawItem.IsPrefix,
|
|
|
|
}
|
|
|
|
if item.IsPrefix {
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s.setMetadata(item, rawItem.Value, metaFlags)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Warn("err retrieving metadata", zap.Error(err))
|
|
|
|
}
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
|
|
|
// getMetadata adds the metadata to the given item pointer according to the
|
2020-07-16 15:18:02 +01:00
|
|
|
// given metaFlags.
|
2019-01-18 15:10:21 +00:00
|
|
|
func (s *Service) setMetadata(item *pb.ListResponse_Item, data []byte, metaFlags uint32) (err error) {
|
|
|
|
if metaFlags == meta.None || len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
pr := &pb.Pointer{}
|
2020-04-08 13:08:57 +01:00
|
|
|
err = pb.Unmarshal(data, pr)
|
2019-01-18 15:10:21 +00:00
|
|
|
if err != nil {
|
2019-07-25 17:59:46 +01:00
|
|
|
return Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start with an empty pointer to and add only what's requested in
|
|
|
|
// metaFlags to safe to transfer payload
|
|
|
|
item.Pointer = &pb.Pointer{}
|
|
|
|
if metaFlags&meta.Modified != 0 {
|
|
|
|
item.Pointer.CreationDate = pr.GetCreationDate()
|
|
|
|
}
|
|
|
|
if metaFlags&meta.Expiration != 0 {
|
|
|
|
item.Pointer.ExpirationDate = pr.GetExpirationDate()
|
|
|
|
}
|
|
|
|
if metaFlags&meta.Size != 0 {
|
|
|
|
item.Pointer.SegmentSize = pr.GetSegmentSize()
|
|
|
|
}
|
|
|
|
if metaFlags&meta.UserDefined != 0 {
|
|
|
|
item.Pointer.Metadata = pr.GetMetadata()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-06 17:02:14 +00:00
|
|
|
// Delete deletes a pointer bytes when it matches oldPointerBytes, otherwise it'll fail.
|
|
|
|
func (s *Service) Delete(ctx context.Context, path string, oldPointerBytes []byte) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-10 20:21:30 +00:00
|
|
|
err = s.db.CompareAndSwap(ctx, []byte(path), oldPointerBytes, nil)
|
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
2019-11-06 17:02:14 +00:00
|
|
|
}
|
|
|
|
|
2020-06-29 21:31:23 +01:00
|
|
|
// UnsynchronizedGetDel deletes items from db without verifying whether the pointers have changed in the database,
|
|
|
|
// and it returns deleted items.
|
|
|
|
func (s *Service) UnsynchronizedGetDel(ctx context.Context, paths [][]byte) ([][]byte, []*pb.Pointer, error) {
|
|
|
|
keys := make(storage.Keys, len(paths))
|
|
|
|
for i := range paths {
|
|
|
|
keys[i] = paths[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
items, err := s.db.DeleteMultiple(ctx, keys)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pointerPaths := make([][]byte, 0, len(items))
|
|
|
|
pointers := make([]*pb.Pointer, 0, len(items))
|
|
|
|
|
|
|
|
for _, item := range items {
|
|
|
|
data := &pb.Pointer{}
|
|
|
|
err = pb.Unmarshal(item.Value, data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pointerPaths = append(pointerPaths, item.Key)
|
|
|
|
pointers = append(pointers, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pointerPaths, pointers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnsynchronizedDelete deletes item from db without verifying whether the pointer has changed in the database.
|
2019-11-06 17:02:14 +00:00
|
|
|
func (s *Service) UnsynchronizedDelete(ctx context.Context, path string) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-12-10 20:21:30 +00:00
|
|
|
|
|
|
|
err = s.db.Delete(ctx, []byte(path))
|
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
2019-01-18 15:10:21 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// CreateBucket creates a new bucket in the buckets db.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (s *Service) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.CreateBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// GetBucket returns an existing bucket in the buckets db.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (s *Service) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.GetBucket(ctx, bucketName, projectID)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// UpdateBucket returns an updated bucket in the buckets db.
|
2019-07-19 16:17:34 +01:00
|
|
|
func (s *Service) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.UpdateBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// DeleteBucket deletes a bucket from the bucekts db.
|
2019-07-08 23:32:18 +01:00
|
|
|
func (s *Service) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-02-14 12:52:00 +00:00
|
|
|
|
|
|
|
empty, err := s.IsBucketEmpty(ctx, projectID, bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !empty {
|
|
|
|
return ErrBucketNotEmpty.New("")
|
|
|
|
}
|
|
|
|
|
2019-07-08 23:32:18 +01:00
|
|
|
return s.bucketsDB.DeleteBucket(ctx, bucketName, projectID)
|
|
|
|
}
|
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
// IsBucketEmpty returns whether bucket is empty.
|
|
|
|
func (s *Service) IsBucketEmpty(ctx context.Context, projectID uuid.UUID, bucketName []byte) (bool, error) {
|
|
|
|
prefix, err := CreatePath(ctx, projectID, -1, bucketName, []byte{})
|
|
|
|
if err != nil {
|
|
|
|
return false, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
items, _, err := s.List(ctx, prefix, "", true, 1, 0)
|
|
|
|
if err != nil {
|
|
|
|
return false, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
return len(items) == 0, nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// ListBuckets returns a list of buckets for a project.
|
2019-07-12 13:57:02 +01:00
|
|
|
func (s *Service) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error) {
|
2019-07-08 23:32:18 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
|
|
|
|
}
|
2020-06-30 22:49:29 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// CountBuckets returns the number of buckets a project currently has.
|
2020-06-30 22:49:29 +01:00
|
|
|
func (s *Service) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return s.bucketsDB.CountBuckets(ctx, projectID)
|
|
|
|
}
|