2019-03-18 10:55:06 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2019-06-19 13:02:37 +01:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
2019-03-18 10:55:06 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2019-07-09 22:54:00 +01:00
|
|
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
"storj.io/storj/pkg/auth/grpcauth"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/storj"
|
|
|
|
"storj.io/storj/pkg/transport"
|
|
|
|
"storj.io/storj/storage"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
|
|
|
|
// Error is the errs class of standard metainfo errors
|
|
|
|
Error = errs.Class("metainfo error")
|
|
|
|
)
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
// Client creates a grpcClient
|
|
|
|
type Client struct {
|
2019-03-18 10:55:06 +00:00
|
|
|
client pb.MetainfoClient
|
2019-06-25 16:36:23 +01:00
|
|
|
conn *grpc.ClientConn
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListItem is a single item in a listing
|
|
|
|
type ListItem struct {
|
|
|
|
Path storj.Path
|
|
|
|
Pointer *pb.Pointer
|
|
|
|
IsPrefix bool
|
|
|
|
}
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
// New used as a public function
|
|
|
|
func New(client pb.MetainfoClient) *Client {
|
|
|
|
return &Client{
|
|
|
|
client: client,
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
// Dial dials to metainfo endpoint with the specified api key.
|
|
|
|
func Dial(ctx context.Context, tc transport.Client, address string, apiKey string) (*Client, error) {
|
2019-05-29 14:14:25 +01:00
|
|
|
apiKeyInjector := grpcauth.NewAPIKeyInjector(apiKey)
|
2019-03-18 10:55:06 +00:00
|
|
|
conn, err := tc.DialAddress(
|
|
|
|
ctx,
|
|
|
|
address,
|
|
|
|
grpc.WithUnaryInterceptor(apiKeyInjector),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
return &Client{
|
|
|
|
client: pb.NewMetainfoClient(conn),
|
|
|
|
conn: conn,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the dialed connection.
|
|
|
|
func (client *Client) Close() error {
|
|
|
|
if client.conn != nil {
|
|
|
|
return Error.Wrap(client.conn.Close())
|
|
|
|
}
|
|
|
|
return nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateSegment requests the order limits for creating a new segment
|
2019-07-11 21:51:40 +01:00
|
|
|
func (client *Client) CreateSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, piecePrivateKey storj.PiecePrivateKey, err error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
response, err := client.client.CreateSegmentOld(ctx, &pb.SegmentWriteRequestOld{
|
2019-03-18 10:55:06 +00:00
|
|
|
Bucket: []byte(bucket),
|
|
|
|
Path: []byte(path),
|
|
|
|
Segment: segmentIndex,
|
|
|
|
Redundancy: redundancy,
|
|
|
|
MaxEncryptedSegmentSize: maxEncryptedSegmentSize,
|
2019-07-09 22:54:00 +01:00
|
|
|
Expiration: expiration,
|
2019-03-18 10:55:06 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2019-07-11 21:51:40 +01:00
|
|
|
return nil, rootPieceID, piecePrivateKey, Error.Wrap(err)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
return response.GetAddressedLimits(), response.RootPieceId, response.PrivateKey, nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CommitSegment requests to store the pointer for the segment
|
2019-07-01 16:54:11 +01:00
|
|
|
func (client *Client) CommitSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, pointer *pb.Pointer, originalLimits []*pb.OrderLimit) (savedPointer *pb.Pointer, err error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
response, err := client.client.CommitSegmentOld(ctx, &pb.SegmentCommitRequestOld{
|
2019-03-18 10:55:06 +00:00
|
|
|
Bucket: []byte(bucket),
|
|
|
|
Path: []byte(path),
|
|
|
|
Segment: segmentIndex,
|
|
|
|
Pointer: pointer,
|
|
|
|
OriginalLimits: originalLimits,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.GetPointer(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SegmentInfo requests the pointer of a segment
|
2019-06-25 16:36:23 +01:00
|
|
|
func (client *Client) SegmentInfo(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, err error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
response, err := client.client.SegmentInfoOld(ctx, &pb.SegmentInfoRequestOld{
|
2019-03-18 10:55:06 +00:00
|
|
|
Bucket: []byte(bucket),
|
|
|
|
Path: []byte(path),
|
|
|
|
Segment: segmentIndex,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if status.Code(err) == codes.NotFound {
|
|
|
|
return nil, storage.ErrKeyNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.GetPointer(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSegment requests the order limits for reading a segment
|
2019-07-11 21:51:40 +01:00
|
|
|
func (client *Client) ReadSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
response, err := client.client.DownloadSegmentOld(ctx, &pb.SegmentDownloadRequestOld{
|
2019-03-18 10:55:06 +00:00
|
|
|
Bucket: []byte(bucket),
|
|
|
|
Path: []byte(path),
|
|
|
|
Segment: segmentIndex,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if status.Code(err) == codes.NotFound {
|
2019-07-11 21:51:40 +01:00
|
|
|
return nil, nil, piecePrivateKey, storage.ErrKeyNotFound.Wrap(err)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2019-07-11 21:51:40 +01:00
|
|
|
return nil, nil, piecePrivateKey, Error.Wrap(err)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
return response.GetPointer(), sortLimits(response.GetAddressedLimits(), response.GetPointer()), response.PrivateKey, nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// sortLimits sorts order limits and fill missing ones with nil values
|
|
|
|
func sortLimits(limits []*pb.AddressedOrderLimit, pointer *pb.Pointer) []*pb.AddressedOrderLimit {
|
|
|
|
sorted := make([]*pb.AddressedOrderLimit, pointer.GetRemote().GetRedundancy().GetTotal())
|
|
|
|
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
|
|
|
sorted[piece.GetPieceNum()] = getLimitByStorageNodeID(limits, piece.NodeId)
|
|
|
|
}
|
|
|
|
return sorted
|
|
|
|
}
|
|
|
|
|
|
|
|
func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID storj.NodeID) *pb.AddressedOrderLimit {
|
|
|
|
for _, limit := range limits {
|
|
|
|
if limit.GetLimit().StorageNodeId == storageNodeID {
|
|
|
|
return limit
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteSegment requests the order limits for deleting a segment
|
2019-07-11 21:51:40 +01:00
|
|
|
func (client *Client) DeleteSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
response, err := client.client.DeleteSegmentOld(ctx, &pb.SegmentDeleteRequestOld{
|
2019-03-18 10:55:06 +00:00
|
|
|
Bucket: []byte(bucket),
|
|
|
|
Path: []byte(path),
|
|
|
|
Segment: segmentIndex,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if status.Code(err) == codes.NotFound {
|
2019-07-11 21:51:40 +01:00
|
|
|
return nil, piecePrivateKey, storage.ErrKeyNotFound.Wrap(err)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2019-07-11 21:51:40 +01:00
|
|
|
return nil, piecePrivateKey, Error.Wrap(err)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
return response.GetAddressedLimits(), response.PrivateKey, nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListSegments lists the available segments
|
2019-06-25 16:36:23 +01:00
|
|
|
func (client *Client) ListSegments(ctx context.Context, bucket string, prefix, startAfter, endBefore storj.Path, recursive bool, limit int32, metaFlags uint32) (items []ListItem, more bool, err error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
response, err := client.client.ListSegmentsOld(ctx, &pb.ListSegmentsRequestOld{
|
2019-03-18 10:55:06 +00:00
|
|
|
Bucket: []byte(bucket),
|
|
|
|
Prefix: []byte(prefix),
|
|
|
|
StartAfter: []byte(startAfter),
|
|
|
|
EndBefore: []byte(endBefore),
|
|
|
|
Recursive: recursive,
|
|
|
|
Limit: limit,
|
|
|
|
MetaFlags: metaFlags,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
list := response.GetItems()
|
|
|
|
items = make([]ListItem, len(list))
|
|
|
|
for i, item := range list {
|
|
|
|
items[i] = ListItem{
|
|
|
|
Path: storj.Path(item.GetPath()),
|
|
|
|
Pointer: item.GetPointer(),
|
|
|
|
IsPrefix: item.IsPrefix,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return items, response.GetMore(), nil
|
|
|
|
}
|
2019-06-13 02:35:37 +01:00
|
|
|
|
2019-06-19 13:02:37 +01:00
|
|
|
// SetAttribution tries to set the attribution information on the bucket.
|
2019-06-25 16:36:23 +01:00
|
|
|
func (client *Client) SetAttribution(ctx context.Context, bucket string, partnerID uuid.UUID) (err error) {
|
2019-06-13 02:35:37 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
_, err = client.client.SetAttributionOld(ctx, &pb.SetAttributionRequestOld{
|
2019-06-19 13:02:37 +01:00
|
|
|
PartnerId: partnerID[:], // TODO: implement storj.UUID that can be sent using pb
|
2019-06-13 02:35:37 +01:00
|
|
|
BucketName: []byte(bucket),
|
|
|
|
})
|
|
|
|
|
2019-07-12 13:57:02 +01:00
|
|
|
return Error.Wrap(err)
|
2019-06-13 02:35:37 +01:00
|
|
|
}
|
2019-06-27 18:36:51 +01:00
|
|
|
|
|
|
|
// GetProjectInfo gets the ProjectInfo for the api key associated with the metainfo client.
|
|
|
|
func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoResponse, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{})
|
|
|
|
}
|
2019-07-12 13:57:02 +01:00
|
|
|
|
|
|
|
// CreateBucket creates a new bucket
|
|
|
|
func (client *Client) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
req := convertBucketToProtoRequest(bucket)
|
|
|
|
resp, err := client.client.CreateBucket(ctx, &req)
|
|
|
|
if err != nil {
|
|
|
|
return storj.Bucket{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return convertProtoToBucket(resp.Bucket), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucket returns a bucket
|
|
|
|
func (client *Client) GetBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
resp, err := client.client.GetBucket(ctx, &pb.BucketGetRequest{Name: []byte(bucketName)})
|
|
|
|
if err != nil {
|
|
|
|
if status.Code(err) == codes.NotFound {
|
|
|
|
return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return storj.Bucket{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
return convertProtoToBucket(resp.Bucket), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucket deletes a bucket
|
|
|
|
func (client *Client) DeleteBucket(ctx context.Context, bucketName string) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
_, err = client.client.DeleteBucket(ctx, &pb.BucketDeleteRequest{Name: []byte(bucketName)})
|
|
|
|
if err != nil {
|
|
|
|
if status.Code(err) == codes.NotFound {
|
|
|
|
return storj.ErrBucketNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListBuckets lists buckets
|
|
|
|
func (client *Client) ListBuckets(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
req := &pb.BucketListRequest{
|
|
|
|
Cursor: []byte(listOpts.Cursor),
|
|
|
|
Limit: int32(listOpts.Limit),
|
|
|
|
Direction: int32(listOpts.Direction),
|
|
|
|
}
|
|
|
|
resp, err := client.client.ListBuckets(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return storj.BucketList{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
resultBucketList := storj.BucketList{
|
|
|
|
More: resp.GetMore(),
|
|
|
|
}
|
|
|
|
resultBucketList.Items = make([]storj.Bucket, len(resp.GetItems()))
|
|
|
|
for i, item := range resp.GetItems() {
|
|
|
|
resultBucketList.Items[i] = storj.Bucket{
|
|
|
|
Name: string(item.GetName()),
|
|
|
|
Created: item.GetCreatedAt(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return resultBucketList, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertBucketToProtoRequest(bucket storj.Bucket) pb.BucketCreateRequest {
|
|
|
|
rs := bucket.DefaultRedundancyScheme
|
|
|
|
return pb.BucketCreateRequest{
|
|
|
|
Name: []byte(bucket.Name),
|
|
|
|
PathCipher: pb.CipherSuite(bucket.PathCipher),
|
|
|
|
DefaultSegmentSize: bucket.DefaultSegmentsSize,
|
|
|
|
DefaultRedundancyScheme: &pb.RedundancyScheme{
|
|
|
|
Type: pb.RedundancyScheme_SchemeType(rs.Algorithm),
|
|
|
|
MinReq: int32(rs.RequiredShares),
|
|
|
|
Total: int32(rs.TotalShares),
|
|
|
|
RepairThreshold: int32(rs.RepairShares),
|
|
|
|
SuccessThreshold: int32(rs.OptimalShares),
|
|
|
|
ErasureShareSize: rs.ShareSize,
|
|
|
|
},
|
|
|
|
DefaultEncryptionParameters: &pb.EncryptionParameters{
|
|
|
|
CipherSuite: pb.CipherSuite(bucket.DefaultEncryptionParameters.CipherSuite),
|
|
|
|
BlockSize: int64(bucket.DefaultEncryptionParameters.BlockSize),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertProtoToBucket(pbBucket *pb.Bucket) storj.Bucket {
|
|
|
|
defaultRS := pbBucket.GetDefaultRedundancyScheme()
|
|
|
|
defaultEP := pbBucket.GetDefaultEncryptionParameters()
|
|
|
|
return storj.Bucket{
|
|
|
|
Name: string(pbBucket.GetName()),
|
|
|
|
PathCipher: storj.CipherSuite(pbBucket.GetPathCipher()),
|
|
|
|
Created: pbBucket.GetCreatedAt(),
|
|
|
|
DefaultSegmentsSize: pbBucket.GetDefaultSegmentSize(),
|
|
|
|
DefaultRedundancyScheme: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.RedundancyAlgorithm(defaultRS.GetType()),
|
|
|
|
ShareSize: defaultRS.GetErasureShareSize(),
|
|
|
|
RequiredShares: int16(defaultRS.GetMinReq()),
|
|
|
|
RepairShares: int16(defaultRS.GetRepairThreshold()),
|
|
|
|
OptimalShares: int16(defaultRS.GetSuccessThreshold()),
|
|
|
|
TotalShares: int16(defaultRS.GetTotal()),
|
|
|
|
},
|
|
|
|
DefaultEncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.CipherSuite(defaultEP.CipherSuite),
|
|
|
|
BlockSize: int32(defaultEP.BlockSize),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2019-07-16 11:39:23 +01:00
|
|
|
|
|
|
|
// BeginObject begins object creation
|
|
|
|
func (client *Client) BeginObject(ctx context.Context, bucket []byte, encryptedPath []byte, version int32,
|
|
|
|
rs storj.RedundancyScheme, ep storj.EncryptionParameters, expiresAt time.Time, nonce storj.Nonce, encryptedMetadata []byte) (_ storj.StreamID, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
// TODO do proper algorithm conversion
|
|
|
|
response, err := client.client.BeginObject(ctx, &pb.ObjectBeginRequest{
|
|
|
|
Bucket: bucket,
|
|
|
|
EncryptedPath: encryptedPath,
|
|
|
|
Version: version,
|
|
|
|
ExpiresAt: expiresAt,
|
|
|
|
EncryptedMetadataNonce: nonce,
|
|
|
|
EncryptedMetadata: encryptedMetadata,
|
|
|
|
RedundancyScheme: &pb.RedundancyScheme{
|
|
|
|
Type: pb.RedundancyScheme_RS,
|
|
|
|
ErasureShareSize: rs.ShareSize,
|
|
|
|
MinReq: int32(rs.RequiredShares),
|
|
|
|
RepairThreshold: int32(rs.RepairShares),
|
|
|
|
SuccessThreshold: int32(rs.OptimalShares),
|
|
|
|
Total: int32(rs.TotalShares),
|
|
|
|
},
|
|
|
|
EncryptionParameters: &pb.EncryptionParameters{
|
|
|
|
CipherSuite: pb.CipherSuite(ep.CipherSuite),
|
|
|
|
BlockSize: int64(ep.BlockSize),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.StreamId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitObject commits created object
|
|
|
|
func (client *Client) CommitObject(ctx context.Context, streamID storj.StreamID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
_, err = client.client.CommitObject(ctx, &pb.ObjectCommitRequest{
|
|
|
|
StreamId: streamID,
|
|
|
|
})
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginDeleteObject begins object deletion process
|
|
|
|
func (client *Client) BeginDeleteObject(ctx context.Context, bucket []byte, encryptedPath []byte, version int32) (_ storj.StreamID, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
response, err := client.client.BeginDeleteObject(ctx, &pb.ObjectBeginDeleteRequest{
|
|
|
|
Bucket: bucket,
|
|
|
|
EncryptedPath: encryptedPath,
|
|
|
|
Version: version,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return storj.StreamID{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.StreamId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FinishDeleteObject finishes object deletion process
|
|
|
|
func (client *Client) FinishDeleteObject(ctx context.Context, streamID storj.StreamID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
_, err = client.client.FinishDeleteObject(ctx, &pb.ObjectFinishDeleteRequest{
|
|
|
|
StreamId: streamID,
|
|
|
|
})
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects lists objects according to specific parameters
|
|
|
|
func (client *Client) ListObjects(ctx context.Context, bucket []byte, encryptedPrefix []byte, encryptedCursor []byte, limit int32) (_ []storj.ObjectListItem, more bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
response, err := client.client.ListObjects(ctx, &pb.ObjectListRequest{
|
|
|
|
Bucket: bucket,
|
|
|
|
EncryptedPrefix: encryptedPrefix,
|
|
|
|
EncryptedCursor: encryptedCursor,
|
|
|
|
Limit: limit,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return []storj.ObjectListItem{}, false, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
objects := make([]storj.ObjectListItem, len(response.Items))
|
|
|
|
for i, object := range response.Items {
|
|
|
|
objects[i] = storj.ObjectListItem{
|
|
|
|
EncryptedPath: object.EncryptedPath,
|
|
|
|
Version: object.Version,
|
|
|
|
Status: int32(object.Status),
|
|
|
|
StatusAt: object.StatusAt,
|
|
|
|
CreatedAt: object.CreatedAt,
|
|
|
|
ExpiresAt: object.ExpiresAt,
|
|
|
|
EncryptedMetadataNonce: object.EncryptedMetadataNonce,
|
|
|
|
EncryptedMetadata: object.EncryptedMetadata,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return objects, response.More, Error.Wrap(err)
|
|
|
|
}
|