2019-03-18 10:55:06 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"context"
2019-06-24 18:15:45 +01:00
"crypto/sha256"
2019-04-02 15:55:58 +01:00
"errors"
2019-03-18 10:55:06 +00:00
"strconv"
2019-04-02 19:21:18 +01:00
"time"
2019-03-18 10:55:06 +00:00
2019-07-16 11:39:23 +01:00
"github.com/gogo/protobuf/proto"
2019-03-18 10:55:06 +00:00
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
2019-04-02 19:21:18 +01:00
"storj.io/storj/pkg/accounting"
2019-07-08 23:32:18 +01:00
"storj.io/storj/pkg/auth"
2019-07-16 11:39:23 +01:00
"storj.io/storj/pkg/auth/signing"
2019-03-18 10:55:06 +00:00
"storj.io/storj/pkg/eestream"
2019-05-24 17:51:27 +01:00
"storj.io/storj/pkg/macaroon"
2019-03-18 10:55:06 +00:00
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
2019-07-16 11:39:23 +01:00
"storj.io/storj/pkg/storage/meta"
2019-03-18 10:55:06 +00:00
"storj.io/storj/pkg/storj"
2019-06-21 20:14:34 +01:00
"storj.io/storj/satellite/attribution"
2019-03-18 10:55:06 +00:00
"storj.io/storj/satellite/console"
2019-03-27 10:24:35 +00:00
"storj.io/storj/satellite/orders"
2019-03-18 10:55:06 +00:00
"storj.io/storj/storage"
)
2019-07-16 11:39:23 +01:00
const (
pieceHashExpiration = 2 * time . Hour
satIDExpiration = 24 * time . Hour
)
2019-07-03 17:14:37 +01:00
2019-03-18 10:55:06 +00:00
var (
mon = monkit . Package ( )
// Error general metainfo error
Error = errs . Class ( "metainfo error" )
)
// APIKeys is api keys store methods used by endpoint
type APIKeys interface {
2019-05-24 17:51:27 +01:00
GetByHead ( ctx context . Context , head [ ] byte ) ( * console . APIKeyInfo , error )
}
// Revocations is the revocations store methods used by the endpoint
type Revocations interface {
GetByProjectID ( ctx context . Context , projectID uuid . UUID ) ( [ ] [ ] byte , error )
2019-03-18 10:55:06 +00:00
}
2019-05-24 20:56:08 +01:00
// Containment is a copy/paste of containment interface to avoid import cycle error
type Containment interface {
Delete ( ctx context . Context , nodeID pb . NodeID ) ( bool , error )
}
2019-03-18 10:55:06 +00:00
// Endpoint metainfo endpoint
type Endpoint struct {
2019-06-05 17:41:02 +01:00
log * zap . Logger
metainfo * Service
orders * orders . Service
cache * overlay . Cache
2019-06-21 20:14:34 +01:00
partnerinfo attribution . DB
2019-06-05 17:41:02 +01:00
projectUsage * accounting . ProjectUsage
containment Containment
apiKeys APIKeys
createRequests * createRequests
2019-06-21 19:15:58 +01:00
rsConfig RSConfig
2019-07-16 11:39:23 +01:00
satellite signing . Signer
2019-03-18 10:55:06 +00:00
}
// NewEndpoint creates new metainfo endpoint instance
2019-06-21 20:14:34 +01:00
func NewEndpoint ( log * zap . Logger , metainfo * Service , orders * orders . Service , cache * overlay . Cache , partnerinfo attribution . DB ,
2019-07-16 11:39:23 +01:00
containment Containment , apiKeys APIKeys , projectUsage * accounting . ProjectUsage , rsConfig RSConfig , satellite signing . Signer ) * Endpoint {
2019-03-18 10:55:06 +00:00
// TODO do something with too many params
return & Endpoint {
2019-06-05 17:41:02 +01:00
log : log ,
metainfo : metainfo ,
orders : orders ,
cache : cache ,
2019-06-21 20:14:34 +01:00
partnerinfo : partnerinfo ,
2019-06-05 17:41:02 +01:00
containment : containment ,
apiKeys : apiKeys ,
projectUsage : projectUsage ,
createRequests : newCreateRequests ( ) ,
2019-06-21 19:15:58 +01:00
rsConfig : rsConfig ,
2019-07-16 11:39:23 +01:00
satellite : satellite ,
2019-03-18 10:55:06 +00:00
}
}
// Close closes resources
func ( endpoint * Endpoint ) Close ( ) error { return nil }
2019-07-08 14:33:15 +01:00
// SegmentInfoOld returns segment metadata info
func ( endpoint * Endpoint ) SegmentInfoOld ( ctx context . Context , req * pb . SegmentInfoRequestOld ) ( resp * pb . SegmentInfoResponseOld , err error ) {
2019-03-18 10:55:06 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-05-24 17:51:27 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . Path ,
Time : time . Now ( ) ,
} )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
path , err := CreatePath ( ctx , keyInfo . ProjectID , req . Segment , req . Bucket , req . Path )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
// TODO refactor to use []byte directly
2019-06-05 15:23:10 +01:00
pointer , err := endpoint . metainfo . Get ( ctx , path )
2019-03-18 10:55:06 +00:00
if err != nil {
if storage . ErrKeyNotFound . Has ( err ) {
return nil , status . Errorf ( codes . NotFound , err . Error ( ) )
}
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-07-08 14:33:15 +01:00
return & pb . SegmentInfoResponseOld { Pointer : pointer } , nil
2019-03-18 10:55:06 +00:00
}
2019-07-08 14:33:15 +01:00
// CreateSegmentOld will generate requested number of OrderLimit with coresponding node addresses for them
func ( endpoint * Endpoint ) CreateSegmentOld ( ctx context . Context , req * pb . SegmentWriteRequestOld ) ( resp * pb . SegmentWriteResponseOld , err error ) {
2019-03-18 10:55:06 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-05-24 17:51:27 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . Path ,
Time : time . Now ( ) ,
} )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
2019-04-01 21:14:58 +01:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-07-16 20:16:41 +01:00
if ! req . Expiration . IsZero ( ) && ! req . Expiration . After ( time . Now ( ) ) {
return nil , status . Errorf ( codes . InvalidArgument , "Invalid expiration time" )
}
2019-06-04 12:55:38 +01:00
err = endpoint . validateRedundancy ( ctx , req . Redundancy )
2019-04-09 14:31:19 +01:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-05-28 16:36:52 +01:00
exceeded , limit , err := endpoint . projectUsage . ExceedsStorageUsage ( ctx , keyInfo . ProjectID )
2019-04-02 19:21:18 +01:00
if err != nil {
2019-05-10 02:39:21 +01:00
endpoint . log . Error ( "retrieving project storage totals" , zap . Error ( err ) )
2019-04-02 19:21:18 +01:00
}
if exceeded {
2019-05-28 16:36:52 +01:00
endpoint . log . Sugar ( ) . Errorf ( "monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s" ,
limit , keyInfo . ProjectID ,
2019-04-02 19:21:18 +01:00
)
2019-05-28 16:36:52 +01:00
return nil , status . Errorf ( codes . ResourceExhausted , "Exceeded Usage Limit" )
2019-04-02 19:21:18 +01:00
}
2019-03-18 10:55:06 +00:00
redundancy , err := eestream . NewRedundancyStrategyFromProto ( req . GetRedundancy ( ) )
if err != nil {
return nil , err
}
maxPieceSize := eestream . CalcPieceSize ( req . GetMaxEncryptedSegmentSize ( ) , redundancy )
2019-03-23 08:06:11 +00:00
request := overlay . FindStorageNodesRequest {
RequestedCount : int ( req . Redundancy . Total ) ,
FreeBandwidth : maxPieceSize ,
FreeDisk : maxPieceSize ,
2019-03-18 10:55:06 +00:00
}
2019-03-23 08:06:11 +00:00
nodes , err := endpoint . cache . FindStorageNodes ( ctx , request )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-05-10 02:39:21 +01:00
bucketID := createBucketID ( keyInfo . ProjectID , req . Bucket )
2019-07-11 21:51:40 +01:00
rootPieceID , addressedLimits , piecePrivateKey , err := endpoint . orders . CreatePutOrderLimits ( ctx , bucketID , nodes , req . Expiration , maxPieceSize )
2019-03-27 10:24:35 +00:00
if err != nil {
2019-03-28 20:09:23 +00:00
return nil , Error . Wrap ( err )
2019-03-27 10:24:35 +00:00
}
2019-06-05 17:41:02 +01:00
if len ( addressedLimits ) > 0 {
endpoint . createRequests . Put ( addressedLimits [ 0 ] . Limit . SerialNumber , & createRequest {
Expiration : req . Expiration ,
Redundancy : req . Redundancy ,
} )
}
2019-07-11 21:51:40 +01:00
return & pb . SegmentWriteResponseOld { AddressedLimits : addressedLimits , RootPieceId : rootPieceID , PrivateKey : piecePrivateKey } , nil
2019-03-18 10:55:06 +00:00
}
2019-05-10 02:39:21 +01:00
func calculateSpaceUsed ( ptr * pb . Pointer ) ( inlineSpace , remoteSpace int64 ) {
inline := ptr . GetInlineSegment ( )
if inline != nil {
return int64 ( len ( inline ) ) , 0
}
segmentSize := ptr . GetSegmentSize ( )
remote := ptr . GetRemote ( )
if remote == nil {
return 0 , 0
}
minReq := remote . GetRedundancy ( ) . GetMinReq ( )
pieceSize := segmentSize / int64 ( minReq )
pieces := remote . GetRemotePieces ( )
return 0 , pieceSize * int64 ( len ( pieces ) )
}
2019-07-08 14:33:15 +01:00
// CommitSegmentOld commits segment metadata
func ( endpoint * Endpoint ) CommitSegmentOld ( ctx context . Context , req * pb . SegmentCommitRequestOld ) ( resp * pb . SegmentCommitResponseOld , err error ) {
2019-03-18 10:55:06 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-05-24 17:51:27 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . Path ,
Time : time . Now ( ) ,
} )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-06-05 17:41:02 +01:00
err = endpoint . validateCommitSegment ( ctx , req )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-07-03 17:14:37 +01:00
err = endpoint . filterValidPieces ( ctx , req . Pointer , req . OriginalLimits )
2019-03-30 11:21:49 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-03-18 10:55:06 +00:00
2019-06-04 12:55:38 +01:00
path , err := CreatePath ( ctx , keyInfo . ProjectID , req . Segment , req . Bucket , req . Path )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-07-08 23:24:38 +01:00
exceeded , limit , err := endpoint . projectUsage . ExceedsStorageUsage ( ctx , keyInfo . ProjectID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
if exceeded {
endpoint . log . Sugar ( ) . Errorf ( "monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s." ,
limit , keyInfo . ProjectID ,
)
return nil , status . Errorf ( codes . ResourceExhausted , "Exceeded Usage Limit" )
}
2019-07-09 21:36:18 +01:00
// clear hashes so we don't store them
for _ , piece := range req . GetPointer ( ) . GetRemote ( ) . GetRemotePieces ( ) {
piece . Hash = nil
}
2019-05-10 02:39:21 +01:00
inlineUsed , remoteUsed := calculateSpaceUsed ( req . Pointer )
2019-07-08 23:24:38 +01:00
// ToDo: Replace with hash & signature validation
// Ensure neither uplink or storage nodes are cheating on us
if req . Pointer . Type == pb . Pointer_REMOTE {
//We cannot have more redundancy than total/min
if float64 ( remoteUsed ) > ( float64 ( req . Pointer . SegmentSize ) / float64 ( req . Pointer . Remote . Redundancy . MinReq ) ) * float64 ( req . Pointer . Remote . Redundancy . Total ) {
endpoint . log . Sugar ( ) . Debugf ( "data size mismatch, got segment: %d, pieces: %d, RS Min, Total: %d,%d" , req . Pointer . SegmentSize , remoteUsed , req . Pointer . Remote . Redundancy . MinReq , req . Pointer . Remote . Redundancy . Total )
return nil , status . Errorf ( codes . InvalidArgument , "mismatched segment size and piece usage" )
}
}
2019-05-28 16:36:52 +01:00
if err := endpoint . projectUsage . AddProjectStorageUsage ( ctx , keyInfo . ProjectID , inlineUsed , remoteUsed ) ; err != nil {
2019-05-10 02:39:21 +01:00
endpoint . log . Sugar ( ) . Errorf ( "Could not track new storage usage by project %v: %v" , keyInfo . ProjectID , err )
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
// that will be affected is our per-project bandwidth and storage limits.
}
2019-06-05 15:23:10 +01:00
err = endpoint . metainfo . Put ( ctx , path , req . Pointer )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-04-05 08:42:56 +01:00
if req . Pointer . Type == pb . Pointer_INLINE {
// TODO or maybe use pointer.SegmentSize ??
2019-06-25 16:58:42 +01:00
err = endpoint . orders . UpdatePutInlineOrder ( ctx , keyInfo . ProjectID , req . Bucket , int64 ( len ( req . Pointer . InlineSegment ) ) )
2019-04-05 08:42:56 +01:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
}
2019-06-05 15:23:10 +01:00
pointer , err := endpoint . metainfo . Get ( ctx , path )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-06-05 17:41:02 +01:00
if len ( req . OriginalLimits ) > 0 {
endpoint . createRequests . Remove ( req . OriginalLimits [ 0 ] . SerialNumber )
}
2019-07-08 14:33:15 +01:00
return & pb . SegmentCommitResponseOld { Pointer : pointer } , nil
2019-03-18 10:55:06 +00:00
}
2019-07-08 14:33:15 +01:00
// DownloadSegmentOld gets Pointer incase of INLINE data or list of OrderLimit necessary to download remote data
func ( endpoint * Endpoint ) DownloadSegmentOld ( ctx context . Context , req * pb . SegmentDownloadRequestOld ) ( resp * pb . SegmentDownloadResponseOld , err error ) {
2019-03-18 10:55:06 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-05-24 17:51:27 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . Path ,
Time : time . Now ( ) ,
} )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-04-02 19:21:18 +01:00
bucketID := createBucketID ( keyInfo . ProjectID , req . Bucket )
2019-05-28 16:36:52 +01:00
exceeded , limit , err := endpoint . projectUsage . ExceedsBandwidthUsage ( ctx , keyInfo . ProjectID , bucketID )
2019-04-02 19:21:18 +01:00
if err != nil {
2019-05-28 16:36:52 +01:00
endpoint . log . Error ( "retrieving project bandwidth total" , zap . Error ( err ) )
2019-04-02 19:21:18 +01:00
}
if exceeded {
2019-05-28 16:36:52 +01:00
endpoint . log . Sugar ( ) . Errorf ( "monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for bandwidth for projectID %s." ,
limit , keyInfo . ProjectID ,
2019-04-02 19:21:18 +01:00
)
2019-05-28 16:36:52 +01:00
return nil , status . Errorf ( codes . ResourceExhausted , "Exceeded Usage Limit" )
2019-04-02 19:21:18 +01:00
}
2019-06-04 12:55:38 +01:00
path , err := CreatePath ( ctx , keyInfo . ProjectID , req . Segment , req . Bucket , req . Path )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
// TODO refactor to use []byte directly
2019-06-05 15:23:10 +01:00
pointer , err := endpoint . metainfo . Get ( ctx , path )
2019-03-18 10:55:06 +00:00
if err != nil {
if storage . ErrKeyNotFound . Has ( err ) {
return nil , status . Errorf ( codes . NotFound , err . Error ( ) )
}
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
if pointer . Type == pb . Pointer_INLINE {
2019-04-05 08:42:56 +01:00
// TODO or maybe use pointer.SegmentSize ??
2019-06-25 16:58:42 +01:00
err := endpoint . orders . UpdateGetInlineOrder ( ctx , keyInfo . ProjectID , req . Bucket , int64 ( len ( pointer . InlineSegment ) ) )
2019-04-05 08:42:56 +01:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-07-08 14:33:15 +01:00
return & pb . SegmentDownloadResponseOld { Pointer : pointer } , nil
2019-03-18 10:55:06 +00:00
} else if pointer . Type == pb . Pointer_REMOTE && pointer . Remote != nil {
2019-07-11 21:51:40 +01:00
limits , privateKey , err := endpoint . orders . CreateGetOrderLimits ( ctx , bucketID , pointer )
2019-03-28 20:09:23 +00:00
if err != nil {
2019-03-27 10:24:35 +00:00
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-07-11 21:51:40 +01:00
return & pb . SegmentDownloadResponseOld { Pointer : pointer , AddressedLimits : limits , PrivateKey : privateKey } , nil
2019-03-18 10:55:06 +00:00
}
2019-07-08 14:33:15 +01:00
return & pb . SegmentDownloadResponseOld { } , nil
2019-03-18 10:55:06 +00:00
}
2019-07-08 14:33:15 +01:00
// DeleteSegmentOld deletes segment metadata from satellite and returns OrderLimit array to remove them from storage node
func ( endpoint * Endpoint ) DeleteSegmentOld ( ctx context . Context , req * pb . SegmentDeleteRequestOld ) ( resp * pb . SegmentDeleteResponseOld , err error ) {
2019-03-18 10:55:06 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-05-24 17:51:27 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . Path ,
Time : time . Now ( ) ,
} )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
path , err := CreatePath ( ctx , keyInfo . ProjectID , req . Segment , req . Bucket , req . Path )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
// TODO refactor to use []byte directly
2019-06-05 15:23:10 +01:00
pointer , err := endpoint . metainfo . Get ( ctx , path )
2019-03-18 10:55:06 +00:00
if err != nil {
if storage . ErrKeyNotFound . Has ( err ) {
return nil , status . Errorf ( codes . NotFound , err . Error ( ) )
}
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
2019-06-05 15:23:10 +01:00
err = endpoint . metainfo . Delete ( ctx , path )
2019-05-24 20:56:08 +01:00
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
if pointer . Type == pb . Pointer_REMOTE && pointer . Remote != nil {
2019-05-24 20:56:08 +01:00
for _ , piece := range pointer . GetRemote ( ) . GetRemotePieces ( ) {
_ , err := endpoint . containment . Delete ( ctx , piece . NodeId )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
}
2019-03-28 20:09:23 +00:00
bucketID := createBucketID ( keyInfo . ProjectID , req . Bucket )
2019-07-11 21:51:40 +01:00
limits , privateKey , err := endpoint . orders . CreateDeleteOrderLimits ( ctx , bucketID , pointer )
2019-03-18 10:55:06 +00:00
if err != nil {
2019-03-28 20:09:23 +00:00
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
2019-03-18 10:55:06 +00:00
}
2019-07-11 21:51:40 +01:00
return & pb . SegmentDeleteResponseOld { AddressedLimits : limits , PrivateKey : privateKey } , nil
2019-03-18 10:55:06 +00:00
}
2019-07-08 14:33:15 +01:00
return & pb . SegmentDeleteResponseOld { } , nil
2019-03-18 10:55:06 +00:00
}
2019-07-08 14:33:15 +01:00
// ListSegmentsOld returns all Path keys in the Pointers bucket
func ( endpoint * Endpoint ) ListSegmentsOld ( ctx context . Context , req * pb . ListSegmentsRequestOld ) ( resp * pb . ListSegmentsResponseOld , err error ) {
2019-03-18 10:55:06 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-05-24 17:51:27 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
EncryptedPath : req . Prefix ,
Time : time . Now ( ) ,
} )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
2019-06-04 12:55:38 +01:00
prefix , err := CreatePath ( ctx , keyInfo . ProjectID , - 1 , req . Bucket , req . Prefix )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
2019-06-05 15:23:10 +01:00
items , more , err := endpoint . metainfo . List ( ctx , prefix , string ( req . StartAfter ) , string ( req . EndBefore ) , req . Recursive , req . Limit , req . MetaFlags )
2019-03-18 10:55:06 +00:00
if err != nil {
return nil , status . Errorf ( codes . Internal , "ListV2: %v" , err )
}
2019-07-08 14:33:15 +01:00
segmentItems := make ( [ ] * pb . ListSegmentsResponseOld_Item , len ( items ) )
2019-03-18 10:55:06 +00:00
for i , item := range items {
2019-07-08 14:33:15 +01:00
segmentItems [ i ] = & pb . ListSegmentsResponseOld_Item {
2019-03-18 10:55:06 +00:00
Path : [ ] byte ( item . Path ) ,
Pointer : item . Pointer ,
IsPrefix : item . IsPrefix ,
}
}
2019-07-08 14:33:15 +01:00
return & pb . ListSegmentsResponseOld { Items : segmentItems , More : more } , nil
2019-03-18 10:55:06 +00:00
}
2019-03-28 20:09:23 +00:00
func createBucketID ( projectID uuid . UUID , bucket [ ] byte ) [ ] byte {
entries := make ( [ ] string , 0 )
entries = append ( entries , projectID . String ( ) )
entries = append ( entries , string ( bucket ) )
return [ ] byte ( storj . JoinPaths ( entries ... ) )
}
2019-07-03 17:14:37 +01:00
func ( endpoint * Endpoint ) filterValidPieces ( ctx context . Context , pointer * pb . Pointer , limits [ ] * pb . OrderLimit ) ( err error ) {
2019-06-04 12:55:38 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-03-18 10:55:06 +00:00
if pointer . Type == pb . Pointer_REMOTE {
var remotePieces [ ] * pb . RemotePiece
remote := pointer . Remote
2019-07-03 17:14:37 +01:00
allSizesValid := true
lastPieceSize := int64 ( 0 )
2019-03-18 10:55:06 +00:00
for _ , piece := range remote . RemotePieces {
// TODO enable verification
// err := auth.VerifyMsg(piece.Hash, piece.NodeId)
// if err == nil {
// remotePieces = append(remotePieces, piece)
// } else {
// // TODO satellite should send Delete request for piece that failed
// s.logger.Warn("unable to verify piece hash: %v", zap.Error(err))
// }
2019-07-03 17:14:37 +01:00
err = endpoint . validatePieceHash ( ctx , piece , limits )
if err != nil {
// TODO maybe this should be logged also to uplink too
endpoint . log . Sugar ( ) . Warn ( err )
continue
}
if piece . Hash . PieceSize <= 0 || ( lastPieceSize > 0 && lastPieceSize != piece . Hash . PieceSize ) {
allSizesValid = false
break
}
lastPieceSize = piece . Hash . PieceSize
2019-03-18 10:55:06 +00:00
remotePieces = append ( remotePieces , piece )
}
2019-07-03 17:14:37 +01:00
if allSizesValid {
redundancy , err := eestream . NewRedundancyStrategyFromProto ( pointer . GetRemote ( ) . GetRedundancy ( ) )
if err != nil {
return Error . Wrap ( err )
}
expectedPieceSize := eestream . CalcPieceSize ( pointer . SegmentSize , redundancy )
if expectedPieceSize != lastPieceSize {
return Error . New ( "expected piece size is different from provided (%v != %v)" , expectedPieceSize , lastPieceSize )
}
} else {
return Error . New ( "all pieces needs to have the same size" )
}
2019-05-17 20:02:40 +01:00
// we repair when the number of healthy files is less than or equal to the repair threshold
// except for the case when the repair and success thresholds are the same (a case usually seen during testing)
2019-06-19 21:13:11 +01:00
if int32 ( len ( remotePieces ) ) <= remote . Redundancy . RepairThreshold && int32 ( len ( remotePieces ) ) < remote . Redundancy . SuccessThreshold {
return Error . New ( "Number of valid pieces (%d) is less than or equal to the repair threshold (%d)" ,
2019-03-18 10:55:06 +00:00
len ( remotePieces ) ,
2019-04-05 11:19:20 +01:00
remote . Redundancy . RepairThreshold ,
2019-03-18 10:55:06 +00:00
)
}
remote . RemotePieces = remotePieces
}
return nil
}
2019-04-02 15:55:58 +01:00
// CreatePath will create a Segment path
2019-06-04 12:55:38 +01:00
func CreatePath ( ctx context . Context , projectID uuid . UUID , segmentIndex int64 , bucket , path [ ] byte ) ( _ storj . Path , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2019-04-02 15:55:58 +01:00
if segmentIndex < - 1 {
return "" , errors . New ( "invalid segment index" )
}
segment := "l"
if segmentIndex > - 1 {
segment = "s" + strconv . FormatInt ( segmentIndex , 10 )
}
entries := make ( [ ] string , 0 )
entries = append ( entries , projectID . String ( ) )
entries = append ( entries , segment )
if len ( bucket ) != 0 {
entries = append ( entries , string ( bucket ) )
}
if len ( path ) != 0 {
entries = append ( entries , string ( path ) )
}
return storj . JoinPaths ( entries ... ) , nil
}
2019-06-13 02:35:37 +01:00
2019-07-08 14:33:15 +01:00
// SetAttributionOld tries to add attribution to the bucket.
func ( endpoint * Endpoint ) SetAttributionOld ( ctx context . Context , req * pb . SetAttributionRequestOld ) ( _ * pb . SetAttributionResponseOld , err error ) {
2019-06-19 13:02:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-06-21 20:14:34 +01:00
// try to add an attribution that doesn't exist
partnerID , err := bytesToUUID ( req . GetPartnerId ( ) )
2019-06-19 13:02:37 +01:00
if err != nil {
2019-06-21 20:14:34 +01:00
return nil , Error . Wrap ( err )
2019-06-19 13:02:37 +01:00
}
2019-06-13 02:35:37 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . BucketName ,
EncryptedPath : [ ] byte ( "" ) ,
Time : time . Now ( ) ,
} )
if err != nil {
2019-06-21 20:14:34 +01:00
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
// check if attribution is set for given bucket
_ , err = endpoint . partnerinfo . Get ( ctx , keyInfo . ProjectID , req . GetBucketName ( ) )
if err == nil {
2019-06-26 17:22:01 +01:00
endpoint . log . Sugar ( ) . Info ( "Bucket:" , string ( req . BucketName ) , " PartnerID:" , partnerID . String ( ) , "already attributed" )
2019-07-08 14:33:15 +01:00
return & pb . SetAttributionResponseOld { } , nil
2019-06-21 20:14:34 +01:00
}
if ! attribution . ErrBucketNotAttributed . Has ( err ) {
// try only to set the attribution, when it's missing
return nil , Error . Wrap ( err )
2019-06-13 02:35:37 +01:00
}
prefix , err := CreatePath ( ctx , keyInfo . ProjectID , - 1 , req . BucketName , [ ] byte ( "" ) )
if err != nil {
2019-06-21 20:14:34 +01:00
return nil , Error . Wrap ( err )
2019-06-13 02:35:37 +01:00
}
2019-06-21 20:14:34 +01:00
items , _ , err := endpoint . metainfo . List ( ctx , prefix , "" , "" , true , 1 , 0 )
2019-06-13 02:35:37 +01:00
if err != nil {
2019-06-21 20:14:34 +01:00
return nil , Error . Wrap ( err )
2019-06-13 02:35:37 +01:00
}
if len ( items ) > 0 {
2019-06-21 20:14:34 +01:00
return nil , Error . New ( "Bucket(%q) , PartnerID(%s) cannot be attributed" , req . BucketName , req . PartnerId )
}
_ , err = endpoint . partnerinfo . Insert ( ctx , & attribution . Info {
ProjectID : keyInfo . ProjectID ,
BucketName : req . GetBucketName ( ) ,
PartnerID : partnerID ,
} )
if err != nil {
return nil , Error . Wrap ( err )
}
2019-07-08 14:33:15 +01:00
return & pb . SetAttributionResponseOld { } , nil
2019-06-21 20:14:34 +01:00
}
// bytesToUUID is used to convert []byte to UUID
func bytesToUUID ( data [ ] byte ) ( uuid . UUID , error ) {
var id uuid . UUID
copy ( id [ : ] , data )
if len ( id ) != len ( data ) {
return uuid . UUID { } , errs . New ( "Invalid uuid" )
2019-06-13 02:35:37 +01:00
}
2019-06-21 20:14:34 +01:00
return id , nil
2019-06-13 02:35:37 +01:00
}
2019-06-24 18:15:45 +01:00
// ProjectInfo returns allowed ProjectInfo for the provided API key
func ( endpoint * Endpoint ) ProjectInfo ( ctx context . Context , req * pb . ProjectInfoRequest ) ( _ * pb . ProjectInfoResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionProjectInfo ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
salt := sha256 . Sum256 ( keyInfo . ProjectID [ : ] )
return & pb . ProjectInfoResponse {
ProjectSalt : salt [ : ] ,
} , nil
}
2019-07-01 23:17:30 +01:00
2019-07-08 23:32:18 +01:00
// GetBucket returns a bucket
func ( endpoint * Endpoint ) GetBucket ( ctx context . Context , req * pb . BucketGetRequest ) ( resp * pb . BucketGetResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
bucket , err := endpoint . metainfo . GetBucket ( ctx , req . GetName ( ) , keyInfo . ProjectID )
if err != nil {
2019-07-12 13:57:02 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
return nil , status . Errorf ( codes . NotFound , err . Error ( ) )
}
2019-07-08 23:32:18 +01:00
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
return & pb . BucketGetResponse {
Bucket : convertBucketToProto ( ctx , bucket ) ,
} , nil
2019-07-01 23:17:30 +01:00
}
2019-07-08 23:32:18 +01:00
// CreateBucket creates a new bucket
func ( endpoint * Endpoint ) CreateBucket ( ctx context . Context , req * pb . BucketCreateRequest ) ( resp * pb . BucketCreateResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
err = endpoint . validateBucket ( ctx , req . Name )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
err = endpoint . validateRedundancy ( ctx , req . GetDefaultRedundancyScheme ( ) )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
bucket , err := convertProtoToBucket ( req , keyInfo . ProjectID )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
bucket , err = endpoint . metainfo . CreateBucket ( ctx , bucket )
if err != nil {
return nil , Error . Wrap ( err )
}
return & pb . BucketCreateResponse {
Bucket : convertBucketToProto ( ctx , bucket ) ,
} , nil
2019-07-01 23:17:30 +01:00
}
// DeleteBucket deletes a bucket
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) DeleteBucket ( ctx context . Context , req * pb . BucketDeleteRequest ) ( resp * pb . BucketDeleteResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
err = endpoint . validateBucket ( ctx , req . Name )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
err = endpoint . metainfo . DeleteBucket ( ctx , req . Name , keyInfo . ProjectID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
return & pb . BucketDeleteResponse { } , nil
2019-07-01 23:17:30 +01:00
}
2019-07-08 23:32:18 +01:00
// ListBuckets returns buckets in a project where the bucket name matches the request cursor
func ( endpoint * Endpoint ) ListBuckets ( ctx context . Context , req * pb . BucketListRequest ) ( resp * pb . BucketListResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
action := macaroon . Action {
Op : macaroon . ActionRead ,
Time : time . Now ( ) ,
}
keyInfo , err := endpoint . validateAuth ( ctx , action )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
allowedBuckets , err := getAllowedBuckets ( ctx , action )
if err != nil {
return nil , err
}
listOpts := storj . BucketListOptions {
2019-07-12 13:57:02 +01:00
Cursor : string ( req . Cursor ) ,
Limit : int ( req . Limit ) ,
Direction : storj . ListDirection ( req . Direction ) ,
2019-07-08 23:32:18 +01:00
}
bucketList , err := endpoint . metainfo . ListBuckets ( ctx , keyInfo . ProjectID , listOpts , allowedBuckets )
if err != nil {
return nil , err
}
bucketItems := make ( [ ] * pb . BucketListItem , len ( bucketList . Items ) )
for i , item := range bucketList . Items {
bucketItems [ i ] = & pb . BucketListItem {
Name : [ ] byte ( item . Name ) ,
CreatedAt : item . Created ,
}
}
return & pb . BucketListResponse {
Items : bucketItems ,
More : bucketList . More ,
} , nil
2019-07-01 23:17:30 +01:00
}
2019-07-12 13:57:02 +01:00
func getAllowedBuckets ( ctx context . Context , action macaroon . Action ) ( _ macaroon . AllowedBuckets , err error ) {
2019-07-08 23:32:18 +01:00
keyData , ok := auth . GetAPIKey ( ctx )
if ! ok {
2019-07-12 13:57:02 +01:00
return macaroon . AllowedBuckets { } , status . Errorf ( codes . Unauthenticated , "Invalid API credential GetAPIKey: %v" , err )
2019-07-08 23:32:18 +01:00
}
key , err := macaroon . ParseAPIKey ( string ( keyData ) )
if err != nil {
2019-07-12 13:57:02 +01:00
return macaroon . AllowedBuckets { } , status . Errorf ( codes . Unauthenticated , "Invalid API credential ParseAPIKey: %v" , err )
2019-07-08 23:32:18 +01:00
}
2019-07-12 13:57:02 +01:00
allowedBuckets , err := key . GetAllowedBuckets ( ctx , action )
2019-07-08 23:32:18 +01:00
if err != nil {
2019-07-12 13:57:02 +01:00
return macaroon . AllowedBuckets { } , status . Errorf ( codes . Internal , "GetAllowedBuckets: %v" , err )
2019-07-08 23:32:18 +01:00
}
return allowedBuckets , err
}
// SetBucketAttribution sets the bucket attribution.
func ( endpoint * Endpoint ) SetBucketAttribution ( context . Context , * pb . BucketSetAttributionRequest ) ( resp * pb . BucketSetAttributionResponse , err error ) {
return resp , status . Error ( codes . Unimplemented , "not implemented" )
}
func convertProtoToBucket ( req * pb . BucketCreateRequest , projectID uuid . UUID ) ( storj . Bucket , error ) {
bucketID , err := uuid . New ( )
if err != nil {
return storj . Bucket { } , err
}
defaultRS := req . GetDefaultRedundancyScheme ( )
defaultEP := req . GetDefaultEncryptionParameters ( )
return storj . Bucket {
ID : * bucketID ,
Name : string ( req . GetName ( ) ) ,
ProjectID : projectID ,
PathCipher : storj . CipherSuite ( req . GetPathCipher ( ) ) ,
DefaultSegmentsSize : req . GetDefaultSegmentSize ( ) ,
DefaultRedundancyScheme : storj . RedundancyScheme {
Algorithm : storj . RedundancyAlgorithm ( defaultRS . GetType ( ) ) ,
ShareSize : defaultRS . GetErasureShareSize ( ) ,
RequiredShares : int16 ( defaultRS . GetMinReq ( ) ) ,
RepairShares : int16 ( defaultRS . GetRepairThreshold ( ) ) ,
OptimalShares : int16 ( defaultRS . GetSuccessThreshold ( ) ) ,
TotalShares : int16 ( defaultRS . GetTotal ( ) ) ,
} ,
DefaultEncryptionParameters : storj . EncryptionParameters {
CipherSuite : storj . CipherSuite ( defaultEP . CipherSuite ) ,
BlockSize : int32 ( defaultEP . BlockSize ) ,
} ,
} , nil
}
func convertBucketToProto ( ctx context . Context , bucket storj . Bucket ) ( pbBucket * pb . Bucket ) {
rs := bucket . DefaultRedundancyScheme
return & pb . Bucket {
Name : [ ] byte ( bucket . Name ) ,
PathCipher : pb . CipherSuite ( int ( bucket . PathCipher ) ) ,
CreatedAt : bucket . Created ,
DefaultSegmentSize : bucket . DefaultSegmentsSize ,
DefaultRedundancyScheme : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_RS ,
MinReq : int32 ( rs . RequiredShares ) ,
Total : int32 ( rs . TotalShares ) ,
RepairThreshold : int32 ( rs . RepairShares ) ,
SuccessThreshold : int32 ( rs . OptimalShares ) ,
ErasureShareSize : rs . ShareSize ,
} ,
DefaultEncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( int ( bucket . DefaultEncryptionParameters . CipherSuite ) ) ,
BlockSize : int64 ( bucket . DefaultEncryptionParameters . BlockSize ) ,
} ,
}
2019-07-01 23:17:30 +01:00
}
2019-07-16 11:39:23 +01:00
// BeginObject begins object
func ( endpoint * Endpoint ) BeginObject ( ctx context . Context , req * pb . ObjectBeginRequest ) ( resp * pb . ObjectBeginResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
bucket , err := endpoint . metainfo . GetBucket ( ctx , req . Bucket , keyInfo . ProjectID )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
// take bucket RS values if not set in request
pbRS := req . RedundancyScheme
if pbRS . ErasureShareSize == 0 {
pbRS . ErasureShareSize = bucket . DefaultRedundancyScheme . ShareSize
}
if pbRS . MinReq == 0 {
pbRS . MinReq = int32 ( bucket . DefaultRedundancyScheme . RequiredShares )
}
if pbRS . RepairThreshold == 0 {
pbRS . RepairThreshold = int32 ( bucket . DefaultRedundancyScheme . RepairShares )
}
if pbRS . SuccessThreshold == 0 {
pbRS . SuccessThreshold = int32 ( bucket . DefaultRedundancyScheme . OptimalShares )
}
if pbRS . Total == 0 {
pbRS . Total = int32 ( bucket . DefaultRedundancyScheme . TotalShares )
}
pbEP := req . EncryptionParameters
if pbEP . CipherSuite == 0 {
pbEP . CipherSuite = pb . CipherSuite ( bucket . DefaultEncryptionParameters . CipherSuite )
}
if pbEP . BlockSize == 0 {
pbEP . BlockSize = int64 ( bucket . DefaultEncryptionParameters . BlockSize )
}
satStreamID := & pb . SatStreamID {
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Version : req . Version ,
Redundancy : pbRS ,
CreationDate : time . Now ( ) ,
ExpirationDate : req . ExpiresAt ,
}
satStreamID , err = signing . SignStreamID ( ctx , endpoint . satellite , satStreamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
encodedStreamID , err := proto . Marshal ( satStreamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
streamID , err := storj . StreamIDFromBytes ( encodedStreamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
return & pb . ObjectBeginResponse {
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Version : req . Version ,
StreamId : streamID ,
RedundancyScheme : pbRS ,
EncryptionParameters : pbEP ,
} , nil
}
// CommitObject commits object when all segments are also committed
func ( endpoint * Endpoint ) CommitObject ( ctx context . Context , req * pb . ObjectCommitRequest ) ( resp * pb . ObjectCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
streamID := & pb . SatStreamID { }
err = proto . Unmarshal ( req . StreamId , streamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
err = signing . VerifyStreamID ( ctx , endpoint . satellite , streamID )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
if streamID . CreationDate . Before ( time . Now ( ) . Add ( - satIDExpiration ) ) {
return nil , status . Errorf ( codes . InvalidArgument , "stream ID expired" )
}
_ , err = endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
// we don't need to do anything for shim implementation
return & pb . ObjectCommitResponse { } , nil
}
// ListObjects list objects according to specific parameters
func ( endpoint * Endpoint ) ListObjects ( ctx context . Context , req * pb . ObjectListRequest ) ( resp * pb . ObjectListResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
keyInfo , err := endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
EncryptedPath : [ ] byte { } ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
prefix , err := CreatePath ( ctx , keyInfo . ProjectID , - 1 , req . Bucket , req . EncryptedPrefix )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
metaflags := meta . All
// TODO use flags
// TODO find out how EncryptedCursor -> startAfter/endAfter
segments , more , err := endpoint . metainfo . List ( ctx , prefix , "" , "" , false , req . Limit , metaflags )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
items := make ( [ ] * pb . ObjectListItem , len ( segments ) )
for i , segment := range segments {
items [ i ] = & pb . ObjectListItem {
EncryptedPath : [ ] byte ( segment . Path ) ,
CreatedAt : segment . Pointer . CreationDate ,
ExpiresAt : segment . Pointer . ExpirationDate ,
}
}
return & pb . ObjectListResponse {
Items : items ,
More : more ,
} , nil
}
// BeginDeleteObject begins object deletion process
func ( endpoint * Endpoint ) BeginDeleteObject ( ctx context . Context , req * pb . ObjectBeginDeleteRequest ) ( resp * pb . ObjectBeginDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
_ , err = endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , status . Errorf ( codes . InvalidArgument , err . Error ( ) )
}
satStreamID := & pb . SatStreamID {
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Version : req . Version ,
CreationDate : time . Now ( ) ,
}
satStreamID , err = signing . SignStreamID ( ctx , endpoint . satellite , satStreamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
encodedStreamID , err := proto . Marshal ( satStreamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
streamID , err := storj . StreamIDFromBytes ( encodedStreamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
return & pb . ObjectBeginDeleteResponse {
StreamId : streamID ,
} , nil
}
// FinishDeleteObject finishes object deletion
func ( endpoint * Endpoint ) FinishDeleteObject ( ctx context . Context , req * pb . ObjectFinishDeleteRequest ) ( resp * pb . ObjectFinishDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
streamID := & pb . SatStreamID { }
err = proto . Unmarshal ( req . StreamId , streamID )
if err != nil {
return nil , status . Errorf ( codes . Internal , err . Error ( ) )
}
err = signing . VerifyStreamID ( ctx , endpoint . satellite , streamID )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
if streamID . CreationDate . Before ( time . Now ( ) . Add ( - satIDExpiration ) ) {
return nil , status . Errorf ( codes . InvalidArgument , "stream ID expired" )
}
_ , err = endpoint . validateAuth ( ctx , macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , status . Errorf ( codes . Unauthenticated , err . Error ( ) )
}
// we don't need to do anything for shim implementation
return & pb . ObjectFinishDeleteResponse { } , nil
}