2019-03-18 10:55:06 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
2021-10-27 09:50:27 +01:00
"bytes"
2019-03-18 10:55:06 +00:00
"context"
2019-06-24 18:15:45 +01:00
"crypto/sha256"
2019-10-17 19:01:40 +01:00
"fmt"
2021-12-14 13:49:33 +00:00
"strconv"
2019-04-02 19:21:18 +01:00
"time"
2019-03-18 10:55:06 +00:00
2019-11-08 20:40:39 +00:00
"github.com/spacemonkeygo/monkit/v3"
2019-03-18 10:55:06 +00:00
"github.com/zeebo/errs"
"go.uber.org/zap"
2020-08-11 14:00:57 +01:00
"storj.io/common/context2"
2020-04-09 09:19:16 +01:00
"storj.io/common/encryption"
2021-11-23 17:50:29 +00:00
"storj.io/common/errs2"
2021-08-12 17:26:43 +01:00
"storj.io/common/lrucache"
2020-05-29 14:31:26 +01:00
"storj.io/common/macaroon"
2020-06-01 21:07:31 +01:00
"storj.io/common/memory"
2019-12-27 11:48:47 +00:00
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/signing"
"storj.io/common/storj"
2020-03-30 10:08:50 +01:00
"storj.io/common/uuid"
2019-07-28 06:55:36 +01:00
"storj.io/storj/satellite/accounting"
2019-06-21 20:14:34 +01:00
"storj.io/storj/satellite/attribution"
2021-11-12 20:47:41 +00:00
"storj.io/storj/satellite/buckets"
2019-03-18 10:55:06 +00:00
"storj.io/storj/satellite/console"
2020-10-29 16:16:25 +00:00
"storj.io/storj/satellite/internalpb"
2021-04-21 13:42:57 +01:00
"storj.io/storj/satellite/metabase"
2020-03-12 07:03:46 +00:00
"storj.io/storj/satellite/metainfo/piecedeletion"
2020-03-18 13:24:31 +00:00
"storj.io/storj/satellite/metainfo/pointerverification"
2019-03-27 10:24:35 +00:00
"storj.io/storj/satellite/orders"
2019-07-28 06:55:36 +01:00
"storj.io/storj/satellite/overlay"
2020-06-03 14:51:02 +01:00
"storj.io/storj/satellite/revocation"
2019-11-26 11:12:37 +00:00
"storj.io/storj/satellite/rewards"
2020-02-21 14:07:29 +00:00
"storj.io/uplink/private/eestream"
2019-03-18 10:55:06 +00:00
)
2019-07-16 11:39:23 +01:00
const (
2020-03-23 13:45:46 +00:00
satIDExpiration = 48 * time . Hour
2020-01-07 18:34:43 +00:00
deleteObjectPiecesSuccessThreshold = 0.75
2019-07-16 11:39:23 +01:00
)
2019-07-03 17:14:37 +01:00
2019-03-18 10:55:06 +00:00
var (
mon = monkit . Package ( )
2020-08-11 15:50:01 +01:00
// Error general metainfo error.
2021-04-28 09:06:17 +01:00
Error = errs . Class ( "metainfo" )
2020-08-11 15:50:01 +01:00
// ErrNodeAlreadyExists pointer already has a piece for a node err.
2021-04-28 09:06:17 +01:00
ErrNodeAlreadyExists = errs . Class ( "metainfo: node already exists" )
2021-09-09 16:21:42 +01:00
// ErrBucketNotEmpty is returned when bucket is required to be empty for an operation.
ErrBucketNotEmpty = errs . Class ( "bucket not empty" )
2019-03-18 10:55:06 +00:00
)
2020-06-30 22:49:29 +01:00
// APIKeys is api keys store methods used by endpoint.
2019-09-10 14:24:16 +01:00
//
// architecture: Database
2019-03-18 10:55:06 +00:00
type APIKeys interface {
2019-05-24 17:51:27 +01:00
GetByHead ( ctx context . Context , head [ ] byte ) ( * console . APIKeyInfo , error )
}
2019-12-11 18:46:41 +00:00
// Endpoint metainfo endpoint.
2019-09-10 14:24:16 +01:00
//
// architecture: Endpoint
2019-03-18 10:55:06 +00:00
type Endpoint struct {
2021-03-29 09:58:04 +01:00
pb . DRPCMetainfoUnimplementedServer
2020-04-09 09:19:16 +01:00
log * zap . Logger
2021-11-12 20:47:41 +00:00
buckets * buckets . Service
2021-09-09 16:21:42 +01:00
metabase * metabase . DB
2020-04-09 09:19:16 +01:00
deletePieces * piecedeletion . Service
orders * orders . Service
overlay * overlay . Service
attributions attribution . DB
partners * rewards . PartnersService
pointerVerification * pointerverification . Service
projectUsage * accounting . Service
projects console . Projects
apiKeys APIKeys
satellite signing . Signer
limiterCache * lrucache . ExpiringLRU
encInlineSegmentSize int64 // max inline segment size + encryption overhead
2020-06-03 14:51:02 +01:00
revocations revocation . DB
2020-11-10 11:56:30 +00:00
defaultRS * pb . RedundancyScheme
2020-04-09 09:19:16 +01:00
config Config
2021-02-09 22:40:23 +00:00
versionCollector * versionCollector
2019-03-18 10:55:06 +00:00
}
2019-12-11 17:44:13 +00:00
// NewEndpoint creates new metainfo endpoint instance.
2021-11-12 20:47:41 +00:00
func NewEndpoint ( log * zap . Logger , buckets * buckets . Service , metabaseDB * metabase . DB ,
2021-09-09 16:21:42 +01:00
deletePieces * piecedeletion . Service , orders * orders . Service , cache * overlay . Service ,
attributions attribution . DB , partners * rewards . PartnersService , peerIdentities overlay . PeerIdentities ,
2020-01-17 15:01:36 +00:00
apiKeys APIKeys , projectUsage * accounting . Service , projects console . Projects ,
2020-06-03 14:51:02 +01:00
satellite signing . Signer , revocations revocation . DB , config Config ) ( * Endpoint , error ) {
2019-03-18 10:55:06 +00:00
// TODO do something with too many params
2020-04-09 09:19:16 +01:00
encInlineSegmentSize , err := encryption . CalcEncryptedSize ( config . MaxInlineSegmentSize . Int64 ( ) , storj . EncryptionParameters {
CipherSuite : storj . EncAESGCM ,
BlockSize : 128 , // intentionally low block size to allow maximum possible encryption overhead
} )
if err != nil {
return nil , err
}
2020-11-10 11:56:30 +00:00
defaultRSScheme := & pb . RedundancyScheme {
Type : pb . RedundancyScheme_RS ,
MinReq : int32 ( config . RS . Min ) ,
RepairThreshold : int32 ( config . RS . Repair ) ,
SuccessThreshold : int32 ( config . RS . Success ) ,
Total : int32 ( config . RS . Total ) ,
ErasureShareSize : config . RS . ErasureShareSize . Int32 ( ) ,
}
2019-03-18 10:55:06 +00:00
return & Endpoint {
2020-03-18 13:24:31 +00:00
log : log ,
2021-09-09 16:21:42 +01:00
buckets : buckets ,
2021-09-24 15:18:21 +01:00
metabase : metabaseDB ,
2020-03-18 13:24:31 +00:00
deletePieces : deletePieces ,
orders : orders ,
overlay : cache ,
attributions : attributions ,
partners : partners ,
pointerVerification : pointerverification . NewService ( peerIdentities ) ,
apiKeys : apiKeys ,
projectUsage : projectUsage ,
projects : projects ,
satellite : satellite ,
2020-01-29 15:22:22 +00:00
limiterCache : lrucache . New ( lrucache . Options {
2020-04-01 10:15:24 +01:00
Capacity : config . RateLimiter . CacheCapacity ,
Expiration : config . RateLimiter . CacheExpiration ,
2020-01-29 15:22:22 +00:00
} ) ,
2020-04-09 09:19:16 +01:00
encInlineSegmentSize : encInlineSegmentSize ,
2020-06-03 14:51:02 +01:00
revocations : revocations ,
2020-11-10 11:56:30 +00:00
defaultRS : defaultRSScheme ,
2020-04-09 09:19:16 +01:00
config : config ,
2021-02-09 22:40:23 +00:00
versionCollector : newVersionCollector ( ) ,
2020-04-09 09:19:16 +01:00
} , nil
2019-03-18 10:55:06 +00:00
}
2020-06-30 22:49:29 +01:00
// Close closes resources.
2019-03-18 10:55:06 +00:00
func ( endpoint * Endpoint ) Close ( ) error { return nil }
2020-11-06 11:54:52 +00:00
func calculateSpaceUsed ( segmentSize int64 , numberOfPieces int , rs storj . RedundancyScheme ) ( totalStored int64 ) {
pieceSize := segmentSize / int64 ( rs . RequiredShares )
return pieceSize * int64 ( numberOfPieces )
2019-05-10 02:39:21 +01:00
}
2020-06-30 22:49:29 +01:00
// ProjectInfo returns allowed ProjectInfo for the provided API key.
2019-06-24 18:15:45 +01:00
func ( endpoint * Endpoint ) ProjectInfo ( ctx context . Context , req * pb . ProjectInfoRequest ) ( _ * pb . ProjectInfoResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-06-24 18:15:45 +01:00
Op : macaroon . ActionProjectInfo ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-06-24 18:15:45 +01:00
}
salt := sha256 . Sum256 ( keyInfo . ProjectID [ : ] )
return & pb . ProjectInfoResponse {
ProjectSalt : salt [ : ] ,
} , nil
}
2019-07-01 23:17:30 +01:00
2020-06-30 22:49:29 +01:00
// GetBucket returns a bucket.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) GetBucket ( ctx context . Context , req * pb . BucketGetRequest ) ( resp * pb . BucketGetResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionRead ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
2021-10-18 09:54:59 +01:00
bucket , err := endpoint . buckets . GetMinimalBucket ( ctx , req . GetName ( ) , keyInfo . ProjectID )
2019-07-08 23:32:18 +01:00
if err != nil {
2019-07-12 13:57:02 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
2019-07-12 13:57:02 +01:00
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2020-01-28 13:44:47 +00:00
// override RS to fit satellite settings
2021-10-18 09:54:59 +01:00
convBucket , err := convertBucketToProto ( bucket , endpoint . defaultRS , endpoint . config . MaxSegmentSize )
2019-07-19 16:17:34 +01:00
if err != nil {
return resp , err
}
2019-07-08 23:32:18 +01:00
return & pb . BucketGetResponse {
2019-07-19 16:17:34 +01:00
Bucket : convBucket ,
2019-07-08 23:32:18 +01:00
} , nil
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// CreateBucket creates a new bucket.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) CreateBucket ( ctx context . Context , req * pb . BucketCreateRequest ) ( resp * pb . BucketCreateResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
err = endpoint . validateBucket ( ctx , req . Name )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2019-07-19 16:17:34 +01:00
// checks if bucket exists before updates it or makes a new entry
2021-09-09 16:21:42 +01:00
exists , err := endpoint . buckets . HasBucket ( ctx , req . GetName ( ) , keyInfo . ProjectID )
2021-04-02 17:19:17 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-04-02 17:19:17 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
} else if exists {
2020-04-14 12:50:50 +01:00
// When the bucket exists, try to set the attribution.
2020-07-24 10:40:17 +01:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . GetName ( ) ) ; err != nil {
2020-04-14 12:50:50 +01:00
return nil , err
}
2019-11-26 11:12:37 +00:00
return nil , rpcstatus . Error ( rpcstatus . AlreadyExists , "bucket already exists" )
}
2019-07-08 23:32:18 +01:00
2020-06-30 22:49:29 +01:00
// check if project has exceeded its allocated bucket limit
maxBuckets , err := endpoint . projects . GetMaxBuckets ( ctx , keyInfo . ProjectID )
if err != nil {
return nil , err
}
2020-09-06 00:02:12 +01:00
if maxBuckets == nil {
defaultMaxBuckets := endpoint . config . ProjectLimits . MaxBuckets
maxBuckets = & defaultMaxBuckets
2020-06-30 22:49:29 +01:00
}
2021-09-09 16:21:42 +01:00
bucketCount , err := endpoint . buckets . CountBuckets ( ctx , keyInfo . ProjectID )
2020-06-30 22:49:29 +01:00
if err != nil {
return nil , err
}
2020-09-06 00:02:12 +01:00
if bucketCount >= * maxBuckets {
2020-06-30 22:49:29 +01:00
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , fmt . Sprintf ( "number of allocated buckets (%d) exceeded" , endpoint . config . ProjectLimits . MaxBuckets ) )
}
bucketReq , err := convertProtoToBucket ( req , keyInfo . ProjectID )
2019-11-26 11:12:37 +00:00
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2019-07-19 16:17:34 +01:00
2021-09-09 16:21:42 +01:00
bucket , err := endpoint . buckets . CreateBucket ( ctx , bucketReq )
2019-11-26 11:12:37 +00:00
if err != nil {
2020-06-30 22:49:29 +01:00
endpoint . log . Error ( "error while creating bucket" , zap . String ( "bucketName" , bucketReq . Name ) , zap . Error ( err ) )
2019-11-26 11:12:37 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , "unable to create bucket" )
}
2019-07-19 16:17:34 +01:00
2020-04-14 12:50:50 +01:00
// Once we have created the bucket, we can try setting the attribution.
2020-07-24 10:40:17 +01:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . GetName ( ) ) ; err != nil {
2020-04-14 12:50:50 +01:00
return nil , err
}
2020-01-28 13:44:47 +00:00
// override RS to fit satellite settings
2021-11-12 20:47:41 +00:00
convBucket , err := convertBucketToProto ( buckets . Bucket {
2021-10-18 09:54:59 +01:00
Name : [ ] byte ( bucket . Name ) ,
CreatedAt : bucket . Created ,
} , endpoint . defaultRS , endpoint . config . MaxSegmentSize )
2019-11-26 11:12:37 +00:00
if err != nil {
endpoint . log . Error ( "error while converting bucket to proto" , zap . String ( "bucketName" , bucket . Name ) , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , "unable to create bucket" )
2019-07-19 16:17:34 +01:00
}
2019-11-26 11:12:37 +00:00
return & pb . BucketCreateResponse {
Bucket : convBucket ,
} , nil
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// DeleteBucket deletes a bucket.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) DeleteBucket ( ctx context . Context , req * pb . BucketDeleteRequest ) ( resp * pb . BucketDeleteResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-03-11 15:53:16 +00:00
now := time . Now ( )
2021-08-31 17:15:43 +01:00
var canRead , canList bool
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Name ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Name ,
Time : now ,
} ,
actionPermitted : & canRead ,
optional : true ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Name ,
Time : now ,
} ,
actionPermitted : & canList ,
optional : true ,
} ,
)
2019-07-08 23:32:18 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
err = endpoint . validateBucket ( ctx , req . Name )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2020-08-06 02:23:45 +01:00
var (
2021-11-12 20:47:41 +00:00
bucket buckets . Bucket
2020-08-06 02:23:45 +01:00
convBucket * pb . Bucket
)
2020-03-11 15:53:16 +00:00
if canRead || canList {
2020-08-06 02:23:45 +01:00
// Info about deleted bucket is returned only if either Read, or List permission is granted.
2021-10-18 09:54:59 +01:00
bucket , err = endpoint . buckets . GetMinimalBucket ( ctx , req . Name , keyInfo . ProjectID )
2020-03-11 15:53:16 +00:00
if err != nil {
if storj . ErrBucketNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
return nil , err
}
2020-08-06 02:23:45 +01:00
2021-10-18 09:54:59 +01:00
convBucket , err = convertBucketToProto ( bucket , endpoint . defaultRS , endpoint . config . MaxSegmentSize )
2020-08-06 02:23:45 +01:00
if err != nil {
return nil , err
}
2020-03-11 15:53:16 +00:00
}
2021-09-09 16:21:42 +01:00
err = endpoint . deleteBucket ( ctx , req . Name , keyInfo . ProjectID )
2019-07-08 23:32:18 +01:00
if err != nil {
2020-03-11 15:53:16 +00:00
if ! canRead && ! canList {
2020-08-06 02:23:45 +01:00
// No error info is returned if neither Read, nor List permission is granted.
2020-04-02 08:45:51 +01:00
return & pb . BucketDeleteResponse { } , nil
2020-03-11 15:53:16 +00:00
}
2020-02-14 12:52:00 +00:00
if ErrBucketNotEmpty . Has ( err ) {
2020-08-06 02:23:45 +01:00
// List permission is required to delete all objects in a bucket.
if ! req . GetDeleteAll ( ) || ! canList {
return nil , rpcstatus . Error ( rpcstatus . FailedPrecondition , err . Error ( ) )
}
_ , deletedObjCount , err := endpoint . deleteBucketNotEmpty ( ctx , keyInfo . ProjectID , req . Name )
if err != nil {
return nil , err
}
2020-12-18 11:33:28 +00:00
return & pb . BucketDeleteResponse { Bucket : convBucket , DeletedObjectsCount : deletedObjCount } , nil
2020-08-06 02:23:45 +01:00
}
if storj . ErrBucketNotFound . Has ( err ) {
return & pb . BucketDeleteResponse { Bucket : convBucket } , nil
2020-02-14 12:52:00 +00:00
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2020-08-06 02:23:45 +01:00
return & pb . BucketDeleteResponse { Bucket : convBucket } , nil
}
2021-09-09 16:21:42 +01:00
// deleteBucket deletes a bucket from the bucekts db.
func ( endpoint * Endpoint ) deleteBucket ( ctx context . Context , bucketName [ ] byte , projectID uuid . UUID ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
empty , err := endpoint . isBucketEmpty ( ctx , projectID , bucketName )
if err != nil {
return err
}
if ! empty {
return ErrBucketNotEmpty . New ( "" )
}
return endpoint . buckets . DeleteBucket ( ctx , bucketName , projectID )
}
// isBucketEmpty returns whether bucket is empty.
func ( endpoint * Endpoint ) isBucketEmpty ( ctx context . Context , projectID uuid . UUID , bucketName [ ] byte ) ( bool , error ) {
empty , err := endpoint . metabase . BucketEmpty ( ctx , metabase . BucketEmpty {
ProjectID : projectID ,
BucketName : string ( bucketName ) ,
} )
return empty , Error . Wrap ( err )
}
2020-11-17 17:53:24 +00:00
// deleteBucketNotEmpty deletes all objects from bucket and deletes this bucket.
// On success, it returns only the number of deleted objects.
2020-12-18 11:33:28 +00:00
func ( endpoint * Endpoint ) deleteBucketNotEmpty ( ctx context . Context , projectID uuid . UUID , bucketName [ ] byte ) ( [ ] byte , int64 , error ) {
2020-11-17 17:53:24 +00:00
deletedCount , err := endpoint . deleteBucketObjects ( ctx , projectID , bucketName )
2020-03-11 15:53:16 +00:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-06 02:23:45 +01:00
return nil , 0 , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-03-11 15:53:16 +00:00
2021-09-09 16:21:42 +01:00
err = endpoint . deleteBucket ( ctx , bucketName , projectID )
2020-08-06 02:23:45 +01:00
if err != nil {
if ErrBucketNotEmpty . Has ( err ) {
2020-08-28 19:27:58 +01:00
return nil , deletedCount , rpcstatus . Error ( rpcstatus . FailedPrecondition , "cannot delete the bucket because it's being used by another process" )
2020-08-06 02:23:45 +01:00
}
if storj . ErrBucketNotFound . Has ( err ) {
return bucketName , 0 , nil
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-28 19:27:58 +01:00
return nil , deletedCount , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2020-08-06 02:23:45 +01:00
}
return bucketName , deletedCount , nil
}
2020-11-17 17:53:24 +00:00
// deleteBucketObjects deletes all objects in a bucket.
2020-12-18 11:33:28 +00:00
func ( endpoint * Endpoint ) deleteBucketObjects ( ctx context . Context , projectID uuid . UUID , bucketName [ ] byte ) ( _ int64 , err error ) {
2020-08-06 02:23:45 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-12-09 12:24:37 +00:00
bucketLocation := metabase . BucketLocation { ProjectID : projectID , BucketName : string ( bucketName ) }
2021-09-09 16:21:42 +01:00
deletedObjects , err := endpoint . metabase . DeleteBucketObjects ( ctx , metabase . DeleteBucketObjects {
2020-12-09 12:24:37 +00:00
Bucket : bucketLocation ,
DeletePieces : func ( ctx context . Context , deleted [ ] metabase . DeletedSegmentInfo ) error {
endpoint . deleteSegmentPieces ( ctx , deleted )
return nil
} ,
2020-11-17 17:53:24 +00:00
} )
2020-12-09 12:24:37 +00:00
2020-12-18 11:33:28 +00:00
return deletedObjects , Error . Wrap ( err )
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// ListBuckets returns buckets in a project where the bucket name matches the request cursor.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) ListBuckets ( ctx context . Context , req * pb . BucketListRequest ) ( resp * pb . BucketListResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-08 23:32:18 +01:00
action := macaroon . Action {
2020-03-11 15:53:16 +00:00
// TODO: This has to be ActionList, but it seems to be set to
// ActionRead as a hacky workaround to make bucket listing possible.
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionRead ,
Time : time . Now ( ) ,
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , action )
2019-07-08 23:32:18 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
2019-09-19 17:19:29 +01:00
allowedBuckets , err := getAllowedBuckets ( ctx , req . Header , action )
2019-07-08 23:32:18 +01:00
if err != nil {
return nil , err
}
listOpts := storj . BucketListOptions {
2019-07-12 13:57:02 +01:00
Cursor : string ( req . Cursor ) ,
Limit : int ( req . Limit ) ,
Direction : storj . ListDirection ( req . Direction ) ,
2019-07-08 23:32:18 +01:00
}
2021-09-09 16:21:42 +01:00
bucketList , err := endpoint . buckets . ListBuckets ( ctx , keyInfo . ProjectID , listOpts , allowedBuckets )
2019-07-08 23:32:18 +01:00
if err != nil {
return nil , err
}
bucketItems := make ( [ ] * pb . BucketListItem , len ( bucketList . Items ) )
for i , item := range bucketList . Items {
bucketItems [ i ] = & pb . BucketListItem {
Name : [ ] byte ( item . Name ) ,
CreatedAt : item . Created ,
}
}
return & pb . BucketListResponse {
Items : bucketItems ,
More : bucketList . More ,
} , nil
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// CountBuckets returns the number of buckets a project currently has.
// TODO: add this to the uplink client side.
func ( endpoint * Endpoint ) CountBuckets ( ctx context . Context , projectID uuid . UUID ) ( count int , err error ) {
2021-09-09 16:21:42 +01:00
count , err = endpoint . buckets . CountBuckets ( ctx , projectID )
2020-06-30 22:49:29 +01:00
if err != nil {
return 0 , err
}
return count , nil
}
2019-09-19 17:19:29 +01:00
func getAllowedBuckets ( ctx context . Context , header * pb . RequestHeader , action macaroon . Action ) ( _ macaroon . AllowedBuckets , err error ) {
key , err := getAPIKey ( ctx , header )
2019-07-08 23:32:18 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return macaroon . AllowedBuckets { } , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "Invalid API credentials: %v" , err )
2019-07-08 23:32:18 +01:00
}
2019-07-12 13:57:02 +01:00
allowedBuckets , err := key . GetAllowedBuckets ( ctx , action )
2019-07-08 23:32:18 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return macaroon . AllowedBuckets { } , rpcstatus . Errorf ( rpcstatus . Internal , "GetAllowedBuckets: %v" , err )
2019-07-08 23:32:18 +01:00
}
return allowedBuckets , err
}
2019-07-19 16:17:34 +01:00
func convertProtoToBucket ( req * pb . BucketCreateRequest , projectID uuid . UUID ) ( bucket storj . Bucket , err error ) {
2019-07-08 23:32:18 +01:00
bucketID , err := uuid . New ( )
if err != nil {
return storj . Bucket { } , err
}
2019-11-26 11:12:37 +00:00
// TODO: resolve partner id
2019-07-19 16:17:34 +01:00
var partnerID uuid . UUID
err = partnerID . UnmarshalJSON ( req . GetPartnerId ( ) )
// bucket's partnerID should never be set
// it is always read back from buckets DB
if err != nil && ! partnerID . IsZero ( ) {
return bucket , errs . New ( "Invalid uuid" )
}
2019-07-08 23:32:18 +01:00
return storj . Bucket {
2021-10-18 09:54:59 +01:00
ID : bucketID ,
Name : string ( req . GetName ( ) ) ,
ProjectID : projectID ,
PartnerID : partnerID ,
2019-07-08 23:32:18 +01:00
} , nil
}
2021-11-12 20:47:41 +00:00
func convertBucketToProto ( bucket buckets . Bucket , rs * pb . RedundancyScheme , maxSegmentSize memory . Size ) ( pbBucket * pb . Bucket , err error ) {
2021-10-18 09:54:59 +01:00
if len ( bucket . Name ) == 0 {
2020-03-11 15:53:16 +00:00
return nil , nil
}
2021-10-18 09:54:59 +01:00
return & pb . Bucket {
Name : bucket . Name ,
CreatedAt : bucket . CreatedAt ,
2020-03-04 17:38:52 +00:00
2021-10-18 09:54:59 +01:00
// default satellite values
PathCipher : pb . CipherSuite_ENC_AESGCM ,
DefaultSegmentSize : maxSegmentSize . Int64 ( ) ,
2020-01-28 13:44:47 +00:00
DefaultRedundancyScheme : rs ,
2019-07-08 23:32:18 +01:00
DefaultEncryptionParameters : & pb . EncryptionParameters {
2021-10-18 09:54:59 +01:00
CipherSuite : pb . CipherSuite_ENC_AESGCM ,
BlockSize : int64 ( rs . ErasureShareSize * rs . MinReq ) ,
2019-07-08 23:32:18 +01:00
} ,
2021-10-18 09:54:59 +01:00
} , nil
2019-07-01 23:17:30 +01:00
}
2019-07-16 11:39:23 +01:00
2020-06-30 22:49:29 +01:00
// BeginObject begins object.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) BeginObject ( ctx context . Context , req * pb . ObjectBeginRequest ) ( resp * pb . ObjectBeginResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2021-08-31 17:15:43 +01:00
now := time . Now ( )
var canDelete bool
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} ,
actionPermitted : & canDelete ,
optional : true ,
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2020-01-03 09:27:10 +00:00
if ! req . ExpiresAt . IsZero ( ) && ! req . ExpiresAt . After ( time . Now ( ) ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "Invalid expiration time" )
}
2020-06-15 12:49:09 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2020-03-16 08:55:52 +00:00
}
2021-12-14 13:49:33 +00:00
if endpoint . config . ProjectLimits . ValidateSegmentLimit {
if exceeded , limit , err := endpoint . projectUsage . ExceedsSegmentUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
endpoint . log . Error (
"Retrieving project segment total failed; segment limit won't be enforced" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
} else if exceeded {
endpoint . log . Warn ( "Segment limit exceeded" ,
zap . String ( "Limit" , strconv . Itoa ( int ( limit ) ) ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
)
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Segments Limit" )
}
}
2020-03-16 08:55:52 +00:00
// TODO this needs to be optimized to avoid DB call on each request
2021-10-27 09:50:27 +01:00
placement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
2020-03-16 08:55:52 +00:00
if err != nil {
2021-10-27 09:50:27 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
2020-03-16 08:55:52 +00:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-07-29 13:08:19 +01:00
objectKeyLength := len ( req . EncryptedPath )
if objectKeyLength > endpoint . config . MaxEncryptedObjectKeyLength {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , fmt . Sprintf ( "key length is too big, got %v, maximum allowed is %v" , objectKeyLength , endpoint . config . MaxEncryptedObjectKeyLength ) )
}
2020-07-31 12:24:40 +01:00
if canDelete {
2021-01-11 10:08:18 +00:00
_ , err = endpoint . DeleteObjectAnyStatus ( ctx , metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} )
if err != nil && ! storj . ErrObjectNotFound . Has ( err ) {
2020-07-31 12:24:40 +01:00
return nil , err
}
} else {
2021-09-09 16:21:42 +01:00
_ , err = endpoint . metabase . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
2020-12-22 10:44:28 +00:00
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} ,
} )
2020-07-31 12:24:40 +01:00
if err == nil {
return nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized API credentials" )
}
2020-01-24 13:25:38 +00:00
}
2020-11-06 11:54:52 +00:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . Bucket ) ; err != nil {
return nil , err
}
streamID , err := uuid . New ( )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
// TODO this will work only with newsest uplink
// figue out what to do with this
encryptionParameters := storj . EncryptionParameters {
CipherSuite : storj . CipherSuite ( req . EncryptionParameters . CipherSuite ) ,
BlockSize : int32 ( req . EncryptionParameters . BlockSize ) , // TODO check conversion
}
2020-11-30 12:33:06 +00:00
var expiresAt * time . Time
if req . ExpiresAt . IsZero ( ) {
expiresAt = nil
} else {
expiresAt = & req . ExpiresAt
}
2021-09-09 16:21:42 +01:00
object , err := endpoint . metabase . BeginObjectExactVersion ( ctx , metabase . BeginObjectExactVersion {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
StreamID : streamID ,
Version : metabase . Version ( 1 ) ,
} ,
2020-11-30 12:33:06 +00:00
ExpiresAt : expiresAt ,
2020-11-06 11:54:52 +00:00
Encryption : encryptionParameters ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-11-06 11:54:52 +00:00
}
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-03-24 09:33:56 +00:00
Bucket : req . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedObjectKey : req . EncryptedPath ,
2021-03-24 09:33:56 +00:00
Version : int32 ( object . Version ) ,
CreationDate : object . CreatedAt ,
ExpirationDate : req . ExpiresAt ,
StreamId : streamID [ : ] ,
MultipartObject : object . FixedSegmentSize <= 0 ,
EncryptionParameters : req . EncryptionParameters ,
2021-10-27 09:50:27 +01:00
Placement : int32 ( placement ) ,
2020-11-06 11:54:52 +00:00
} )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_object" ) . Mark ( 1 )
2020-01-20 18:48:26 +00:00
2019-07-16 11:39:23 +01:00
return & pb . ObjectBeginResponse {
2020-04-06 12:36:34 +01:00
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Version : req . Version ,
2020-11-06 11:54:52 +00:00
StreamId : satStreamID ,
2021-03-26 11:56:40 +00:00
RedundancyScheme : endpoint . defaultRS ,
2019-07-16 11:39:23 +01:00
} , nil
}
2020-06-30 22:49:29 +01:00
// CommitObject commits an object when all its segments have already been committed.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) CommitObject ( ctx context . Context , req * pb . ObjectCommitRequest ) ( resp * pb . ObjectCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-10-30 11:22:16 +00:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedPath : streamID . EncryptedObjectKey ,
2019-07-16 11:39:23 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2020-06-01 21:07:31 +01:00
metadataSize := memory . Size ( len ( req . EncryptedMetadata ) )
if metadataSize > endpoint . config . MaxMetadataSize {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , fmt . Sprintf ( "Metadata is too large, got %v, maximum allowed is %v" , metadataSize , endpoint . config . MaxMetadataSize ) )
}
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
2019-08-01 10:04:31 +01:00
if err != nil {
2020-11-06 11:54:52 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-08-01 10:04:31 +01:00
}
2021-02-10 10:13:14 +00:00
// for old uplinks get Encryption from StreamMeta
streamMeta := & pb . StreamMeta { }
encryption := storj . EncryptionParameters { }
err = pb . Unmarshal ( req . EncryptedMetadata , streamMeta )
if err == nil {
encryption . CipherSuite = storj . CipherSuite ( streamMeta . EncryptionType )
encryption . BlockSize = streamMeta . EncryptionBlockSize
}
2021-10-29 12:04:55 +01:00
request := metabase . CommitObject {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2020-11-06 11:54:52 +00:00
StreamID : id ,
Version : metabase . Version ( 1 ) ,
} ,
2021-02-10 10:13:14 +00:00
Encryption : encryption ,
2021-10-29 12:04:55 +01:00
}
// uplink can send empty metadata with not empty key/nonce
// we need to fix it on uplink side but that part will be
// needed for backward compatibility
if len ( req . EncryptedMetadata ) != 0 {
request . EncryptedMetadata = req . EncryptedMetadata
request . EncryptedMetadataNonce = req . EncryptedMetadataNonce [ : ]
request . EncryptedMetadataEncryptedKey = req . EncryptedMetadataEncryptedKey
// older uplinks might send EncryptedMetadata directly with request but
// key/nonce will be part of StreamMeta
if req . EncryptedMetadataNonce . IsZero ( ) && len ( req . EncryptedMetadataEncryptedKey ) == 0 &&
streamMeta . LastSegmentMeta != nil {
request . EncryptedMetadataNonce = streamMeta . LastSegmentMeta . KeyNonce
request . EncryptedMetadataEncryptedKey = streamMeta . LastSegmentMeta . EncryptedKey
}
}
_ , err = endpoint . metabase . CommitObject ( ctx , request )
2019-08-01 10:04:31 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-08-01 10:04:31 +01:00
}
2019-07-16 11:39:23 +01:00
return & pb . ObjectCommitResponse { } , nil
}
2021-02-17 09:54:04 +00:00
// GetObject gets single object metadata.
2019-07-23 12:09:12 +01:00
func ( endpoint * Endpoint ) GetObject ( ctx context . Context , req * pb . ObjectGetRequest ) ( resp * pb . ObjectGetResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-23 12:09:12 +01:00
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-23 12:09:12 +01:00
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-23 12:09:12 +01:00
}
2021-09-09 16:21:42 +01:00
mbObject , err := endpoint . metabase . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
2020-11-06 11:54:52 +00:00
ObjectLocation : metabase . ObjectLocation {
2021-02-17 09:54:04 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
2020-11-06 11:54:52 +00:00
} ,
} )
2019-07-23 12:09:12 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-07-23 12:09:12 +01:00
}
2021-02-17 09:54:04 +00:00
var segmentRS * pb . RedundancyScheme
// TODO we may try to avoid additional request for inline objects
if ! req . RedundancySchemePerSegment && mbObject . SegmentCount > 0 {
segmentRS = endpoint . defaultRS
2021-09-09 16:21:42 +01:00
segment , err := endpoint . metabase . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
2021-02-17 09:54:04 +00:00
StreamID : mbObject . StreamID ,
2021-02-16 15:36:09 +00:00
Position : metabase . SegmentPosition {
Index : 0 ,
} ,
} )
if err != nil {
// don't fail because its possible that its multipart object
endpoint . log . Error ( "internal" , zap . Error ( err ) )
} else {
2021-02-17 09:54:04 +00:00
segmentRS = & pb . RedundancyScheme {
2021-02-16 15:36:09 +00:00
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
}
}
2021-02-17 09:54:04 +00:00
// monitor how many uplinks is still using this additional code
mon . Meter ( "req_get_object_rs_per_object" ) . Mark ( 1 )
2021-02-16 15:36:09 +00:00
}
2021-02-17 09:54:04 +00:00
object , err := endpoint . objectToProto ( ctx , mbObject , segmentRS )
2020-11-17 14:09:04 +00:00
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-02-17 09:54:04 +00:00
endpoint . log . Info ( "Object Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_get_object" ) . Mark ( 1 )
return & pb . ObjectGetResponse { Object : object } , nil
2019-07-23 12:09:12 +01:00
}
2021-03-31 12:08:22 +01:00
// DownloadObject gets object information, creates a download for segments and lists the object segments.
func ( endpoint * Endpoint ) DownloadObject ( ctx context . Context , req * pb . ObjectDownloadRequest ) ( resp * pb . ObjectDownloadResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-11-23 17:50:29 +00:00
if ctx . Err ( ) != nil {
return nil , rpcstatus . Error ( rpcstatus . Canceled , "client has closed the connection" )
}
2021-03-31 12:08:22 +01:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
if exceeded , limit , err := endpoint . projectUsage . ExceedsBandwidthUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2021-11-11 20:04:48 +00:00
endpoint . log . Error (
"Retrieving project bandwidth total failed; bandwidth limit won't be enforced" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2021-03-31 12:08:22 +01:00
} else if exceeded {
2021-11-11 12:50:14 +00:00
endpoint . log . Warn ( "Monthly bandwidth limit exceeded" ,
2021-03-31 12:08:22 +01:00
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
)
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
}
// get the object information
2021-09-09 16:21:42 +01:00
object , err := endpoint . metabase . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
2021-03-31 12:08:22 +01:00
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
} ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-03-31 12:08:22 +01:00
}
// get the range segments
streamRange , err := calculateStreamRange ( object , req . Range )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2021-09-09 16:21:42 +01:00
segments , err := endpoint . metabase . ListStreamPositions ( ctx , metabase . ListStreamPositions {
2021-03-31 12:08:22 +01:00
StreamID : object . StreamID ,
Range : streamRange ,
Limit : int ( req . Limit ) ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-03-31 12:08:22 +01:00
}
// get the download response for the first segment
2021-04-07 07:55:01 +01:00
downloadSegments , err := func ( ) ( [ ] * pb . SegmentDownloadResponse , error ) {
2021-03-31 12:08:22 +01:00
if len ( segments . Segments ) == 0 {
return nil , nil
}
2021-04-09 09:24:18 +01:00
if object . IsMigrated ( ) && streamRange != nil && streamRange . PlainStart > 0 {
return nil , nil
}
2021-03-31 12:08:22 +01:00
2021-09-09 16:21:42 +01:00
segment , err := endpoint . metabase . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
2021-03-31 12:08:22 +01:00
StreamID : object . StreamID ,
Position : segments . Segments [ 0 ] . Position ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-03-31 12:08:22 +01:00
}
2021-05-13 14:31:55 +01:00
downloadSizes := endpoint . calculateDownloadSizes ( streamRange , segment , object . Encryption )
2021-04-07 12:17:59 +01:00
// Update the current bandwidth cache value incrementing the SegmentSize.
2021-05-13 14:31:55 +01:00
err = endpoint . projectUsage . UpdateProjectBandwidthUsage ( ctx , keyInfo . ProjectID , downloadSizes . encryptedSize )
2021-04-07 12:17:59 +01:00
if err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2021-04-07 12:17:59 +01:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth limits.
2021-11-11 20:04:48 +00:00
endpoint . log . Error (
2021-11-23 17:50:29 +00:00
"Could not track the new project's bandwidth usage when downloading an object" ,
2021-11-11 20:04:48 +00:00
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2021-04-07 12:17:59 +01:00
}
2021-10-12 14:37:12 +01:00
encryptedKeyNonce , err := storj . NonceFromBytes ( segment . EncryptedKeyNonce )
if err != nil {
endpoint . log . Error ( "unable to get encryption key nonce from metadata" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-03-31 12:08:22 +01:00
if segment . Inline ( ) {
2021-05-13 14:31:55 +01:00
err := endpoint . orders . UpdateGetInlineOrder ( ctx , object . Location ( ) . Bucket ( ) , downloadSizes . plainSize )
2021-03-31 12:08:22 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-03-31 12:08:22 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
endpoint . log . Info ( "Inline Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "inline" ) )
mon . Meter ( "req_get_inline" ) . Mark ( 1 )
2021-04-07 07:55:01 +01:00
return [ ] * pb . SegmentDownloadResponse { {
2021-03-31 12:08:22 +01:00
PlainOffset : segment . PlainOffset ,
PlainSize : int64 ( segment . PlainSize ) ,
SegmentSize : int64 ( segment . EncryptedSize ) ,
EncryptedInlineData : segment . InlineData ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2021-03-31 12:08:22 +01:00
EncryptedKey : segment . EncryptedKey ,
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2021-04-07 07:55:01 +01:00
} } , nil
2021-03-31 12:08:22 +01:00
}
2021-05-13 14:31:55 +01:00
limits , privateKey , err := endpoint . orders . CreateGetOrderLimits ( ctx , object . Location ( ) . Bucket ( ) , segment , downloadSizes . orderLimit )
2021-03-31 12:08:22 +01:00
if err != nil {
if orders . ErrDownloadFailedNotEnoughPieces . Has ( err ) {
endpoint . log . Error ( "Unable to create order limits." ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Stringer ( "API Key ID" , keyInfo . ID ) ,
zap . Error ( err ) ,
)
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-03-31 12:08:22 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
endpoint . log . Info ( "Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "remote" ) )
mon . Meter ( "req_get_remote" ) . Mark ( 1 )
2021-04-07 07:55:01 +01:00
return [ ] * pb . SegmentDownloadResponse { {
2021-03-31 12:08:22 +01:00
AddressedLimits : limits ,
PrivateKey : privateKey ,
PlainOffset : segment . PlainOffset ,
PlainSize : int64 ( segment . PlainSize ) ,
SegmentSize : int64 ( segment . EncryptedSize ) ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2021-03-31 12:08:22 +01:00
EncryptedKey : segment . EncryptedKey ,
RedundancyScheme : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
} ,
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2021-04-07 07:55:01 +01:00
} } , nil
2021-03-31 12:08:22 +01:00
} ( )
if err != nil {
return nil , err
}
// convert to response
protoObject , err := endpoint . objectToProto ( ctx , object , nil )
if err != nil {
endpoint . log . Error ( "unable to convert object to proto" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
segmentList , err := convertStreamListResults ( segments )
if err != nil {
endpoint . log . Error ( "unable to convert stream list" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
endpoint . log . Info ( "Download Object" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "download" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_download_object" ) . Mark ( 1 )
return & pb . ObjectDownloadResponse {
Object : protoObject ,
// The RPC API allows for multiple segment download responses, but for now
// we return only one. This can be changed in the future if it seems useful
// to return more than one on the initial response.
2021-04-07 07:55:01 +01:00
SegmentDownload : downloadSegments ,
2021-03-31 12:08:22 +01:00
// In the case where the client needs the segment list, it will contain
// every segment. In the case where the segment list is not needed,
// segmentListItems will be nil.
SegmentList : segmentList ,
} , nil
}
2021-05-13 14:31:55 +01:00
type downloadSizes struct {
// amount of data that uplink eventually gets
plainSize int64
// amount of data that's present after encryption
encryptedSize int64
// amount of data that's read from a storage node
orderLimit int64
}
func ( endpoint * Endpoint ) calculateDownloadSizes ( streamRange * metabase . StreamRange , segment metabase . Segment , encryptionParams storj . EncryptionParameters ) downloadSizes {
if segment . Inline ( ) {
return downloadSizes {
plainSize : int64 ( len ( segment . InlineData ) ) ,
encryptedSize : int64 ( segment . EncryptedSize ) ,
}
}
// calculate the range inside the given segment
readStart := segment . PlainOffset
if streamRange != nil && readStart <= streamRange . PlainStart {
readStart = streamRange . PlainStart
}
readLimit := segment . PlainOffset + int64 ( segment . PlainSize )
if streamRange != nil && streamRange . PlainLimit < readLimit {
readLimit = streamRange . PlainLimit
}
plainSize := readLimit - readStart
// calculate the read range given the segment start
readStart -= segment . PlainOffset
readLimit -= segment . PlainOffset
// align to encryption block size
enc , err := encryption . NewEncrypter ( encryptionParams . CipherSuite , & storj . Key { 1 } , & storj . Nonce { 1 } , int ( encryptionParams . BlockSize ) )
if err != nil {
// We ignore the error and fallback to the max amount to download.
// It's unlikely that we fail here, but if we do, we don't want to block downloading.
endpoint . log . Error ( "unable to create encrypter" , zap . Error ( err ) )
return downloadSizes {
plainSize : int64 ( segment . PlainSize ) ,
encryptedSize : int64 ( segment . EncryptedSize ) ,
orderLimit : 0 ,
}
}
encryptedStartBlock , encryptedLimitBlock := calculateBlocks ( readStart , readLimit , int64 ( enc . InBlockSize ( ) ) )
encryptedStart , encryptedLimit := encryptedStartBlock * int64 ( enc . OutBlockSize ( ) ) , encryptedLimitBlock * int64 ( enc . OutBlockSize ( ) )
encryptedSize := encryptedLimit - encryptedStart
if encryptedSize > int64 ( segment . EncryptedSize ) {
encryptedSize = int64 ( segment . EncryptedSize )
}
// align to blocks
stripeSize := int64 ( segment . Redundancy . StripeSize ( ) )
stripeStart , stripeLimit := alignToBlock ( encryptedStart , encryptedLimit , stripeSize )
// calculate how much shares we need to download from a node
stripeCount := ( stripeLimit - stripeStart ) / stripeSize
orderLimit := stripeCount * int64 ( segment . Redundancy . ShareSize )
return downloadSizes {
plainSize : plainSize ,
encryptedSize : encryptedSize ,
orderLimit : orderLimit ,
}
}
func calculateBlocks ( start , limit int64 , blockSize int64 ) ( startBlock , limitBlock int64 ) {
return start / blockSize , ( limit + blockSize - 1 ) / blockSize
}
func alignToBlock ( start , limit int64 , blockSize int64 ) ( alignedStart , alignedLimit int64 ) {
return ( start / blockSize ) * blockSize , ( ( limit + blockSize - 1 ) / blockSize ) * blockSize
}
2021-03-31 12:08:22 +01:00
func calculateStreamRange ( object metabase . Object , req * pb . Range ) ( * metabase . StreamRange , error ) {
if req == nil || req . Range == nil {
return nil , nil
}
2021-04-09 09:24:18 +01:00
if object . IsMigrated ( ) {
// The object is in old format, which does not have plain_offset specified.
// We need to fallback to returning all segments.
2021-03-31 12:08:22 +01:00
return nil , nil
}
switch r := req . Range . ( type ) {
case * pb . Range_Start :
if r . Start == nil {
return nil , Error . New ( "Start missing for Range_Start" )
}
return & metabase . StreamRange {
PlainStart : r . Start . PlainStart ,
PlainLimit : object . TotalPlainSize ,
} , nil
case * pb . Range_StartLimit :
if r . StartLimit == nil {
return nil , Error . New ( "StartEnd missing for Range_StartEnd" )
}
return & metabase . StreamRange {
PlainStart : r . StartLimit . PlainStart ,
PlainLimit : r . StartLimit . PlainLimit ,
} , nil
case * pb . Range_Suffix :
if r . Suffix == nil {
return nil , Error . New ( "Suffix missing for Range_Suffix" )
}
return & metabase . StreamRange {
PlainStart : object . TotalPlainSize - r . Suffix . PlainSuffix ,
PlainLimit : object . TotalPlainSize ,
} , nil
}
// if it's a new unsupported range type, let's return all data
return nil , nil
}
2020-06-30 22:49:29 +01:00
// ListObjects list objects according to specific parameters.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) ListObjects ( ctx context . Context , req * pb . ObjectListRequest ) ( resp * pb . ObjectListResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2019-10-24 22:05:08 +01:00
EncryptedPath : req . EncryptedPrefix ,
2019-07-16 11:39:23 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-03-16 08:55:52 +00:00
// TODO this needs to be optimized to avoid DB call on each request
2021-10-27 09:50:27 +01:00
placement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
2020-03-16 08:55:52 +00:00
if err != nil {
2021-10-27 09:50:27 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
2020-03-16 08:55:52 +00:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-11-06 12:20:54 +00:00
limit := int ( req . Limit )
if limit < 0 {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "limit is negative" )
}
2021-06-25 09:19:32 +01:00
metabase . ListLimit . Ensure ( & limit )
2019-07-16 11:39:23 +01:00
2020-11-17 12:57:53 +00:00
var prefix metabase . ObjectKey
if len ( req . EncryptedPrefix ) != 0 {
prefix = metabase . ObjectKey ( req . EncryptedPrefix )
2020-11-17 13:37:19 +00:00
if prefix [ len ( prefix ) - 1 ] != metabase . Delimiter {
prefix += metabase . ObjectKey ( metabase . Delimiter )
2020-11-17 12:57:53 +00:00
}
}
2020-11-18 11:16:00 +00:00
// Default to Commmitted status for backward-compatibility with older uplinks.
status := metabase . Committed
if req . Status != pb . Object_INVALID {
status = metabase . ObjectStatus ( req . Status )
}
2020-12-01 14:01:44 +00:00
cursor := string ( req . EncryptedCursor )
if len ( cursor ) != 0 {
cursor = string ( prefix ) + cursor
}
2021-09-28 13:36:10 +01:00
includeCustomMetadata := true
includeSystemMetadata := true
2021-08-02 19:30:02 +01:00
if req . UseObjectIncludes {
2021-09-28 13:36:10 +01:00
includeCustomMetadata = req . ObjectIncludes . Metadata
includeSystemMetadata = ! req . ObjectIncludes . ExcludeSystemMetadata
2021-08-02 19:30:02 +01:00
}
2020-11-06 12:20:54 +00:00
resp = & pb . ObjectListResponse { }
// TODO: Replace with IterateObjectsLatestVersion when ready
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . IterateObjectsAllVersionsWithStatus ( ctx ,
2020-12-21 15:07:00 +00:00
metabase . IterateObjectsWithStatus {
2020-11-06 12:20:54 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
2020-11-17 12:57:53 +00:00
Prefix : prefix ,
2020-11-24 09:50:02 +00:00
Cursor : metabase . IterateCursor {
2020-12-01 14:01:44 +00:00
Key : metabase . ObjectKey ( cursor ) ,
2020-11-24 09:50:02 +00:00
Version : 1 , // TODO: set to a the version from the protobuf request when it supports this
} ,
2021-09-28 13:36:10 +01:00
Recursive : req . Recursive ,
BatchSize : limit + 1 ,
Status : status ,
IncludeCustomMetadata : includeCustomMetadata ,
IncludeSystemMetadata : includeSystemMetadata ,
2020-11-06 12:20:54 +00:00
} , func ( ctx context . Context , it metabase . ObjectsIterator ) error {
entry := metabase . ObjectEntry { }
for len ( resp . Items ) < limit && it . Next ( ctx , & entry ) {
2021-10-27 09:50:27 +01:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , prefix , includeCustomMetadata , placement )
2020-12-02 11:34:41 +00:00
if err != nil {
return err
2020-11-19 12:21:51 +00:00
}
2020-11-06 12:20:54 +00:00
resp . Items = append ( resp . Items , item )
}
resp . More = it . Next ( ctx , & entry )
return nil
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-07-16 11:39:23 +01:00
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object List" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "list" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_list_object" ) . Mark ( 1 )
2019-07-16 11:39:23 +01:00
2020-11-06 12:20:54 +00:00
return resp , nil
2019-07-16 11:39:23 +01:00
}
2021-01-11 12:06:04 +00:00
// ListPendingObjectStreams list pending objects according to specific parameters.
func ( endpoint * Endpoint ) ListPendingObjectStreams ( ctx context . Context , req * pb . ObjectListPendingStreamsRequest ) ( resp * pb . ObjectListPendingStreamsResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2021-01-11 12:06:04 +00:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2021-10-27 09:50:27 +01:00
placement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
2021-01-11 12:06:04 +00:00
if err != nil {
2021-10-27 09:50:27 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
2021-01-11 12:06:04 +00:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
cursor := metabase . StreamIDCursor { }
if req . StreamIdCursor != nil {
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamIdCursor )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
cursor . StreamID , err = uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-01-11 12:06:04 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
}
limit := int ( req . Limit )
if limit < 0 {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "limit is negative" )
}
2021-06-25 09:19:32 +01:00
metabase . ListLimit . Ensure ( & limit )
2021-01-11 12:06:04 +00:00
resp = & pb . ObjectListPendingStreamsResponse { }
resp . Items = [ ] * pb . ObjectListItem { }
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . IteratePendingObjectsByKey ( ctx ,
2021-01-11 12:06:04 +00:00
metabase . IteratePendingObjectsByKey {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} ,
BatchSize : limit + 1 ,
Cursor : cursor ,
} , func ( ctx context . Context , it metabase . ObjectsIterator ) error {
entry := metabase . ObjectEntry { }
for len ( resp . Items ) < limit && it . Next ( ctx , & entry ) {
2021-10-27 09:50:27 +01:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , "" , true , placement )
2021-01-11 12:06:04 +00:00
if err != nil {
return err
}
resp . Items = append ( resp . Items , item )
}
resp . More = it . Next ( ctx , & entry )
return nil
} ,
)
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-01-11 12:06:04 +00:00
}
endpoint . log . Info ( "List pending object streams" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "list" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_list_pending_object_streams" ) . Mark ( 1 )
return resp , nil
}
2019-12-10 11:15:35 +00:00
// BeginDeleteObject begins object deletion process.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) BeginDeleteObject ( ctx context . Context , req * pb . ObjectBeginDeleteRequest ) ( resp * pb . ObjectBeginDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-03-11 15:53:16 +00:00
now := time . Now ( )
2021-08-31 17:15:43 +01:00
var canRead , canList bool
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} ,
actionPermitted : & canRead ,
optional : true ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} ,
actionPermitted : & canList ,
optional : true ,
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-12-03 18:04:01 +00:00
var deletedObjects [ ] * pb . Object
if req . GetStatus ( ) == int32 ( metabase . Pending ) {
if req . StreamId == nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "StreamID missing" )
}
var pbStreamID * internalpb . StreamID
pbStreamID , err = endpoint . unmarshalSatStreamID ( ctx , * ( req . StreamId ) )
if err == nil {
var streamID uuid . UUID
streamID , err = uuid . FromBytes ( pbStreamID . StreamId )
if err == nil {
2021-05-04 14:51:40 +01:00
deletedObjects , err = endpoint . DeletePendingObject ( ctx ,
metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
Version : metabase . Version ( req . GetVersion ( ) ) ,
StreamID : streamID ,
} )
2020-12-03 18:04:01 +00:00
}
}
} else {
deletedObjects , err = endpoint . DeleteCommittedObject ( ctx , keyInfo . ProjectID , string ( req . Bucket ) , metabase . ObjectKey ( req . EncryptedPath ) )
}
2019-08-01 10:04:31 +01:00
if err != nil {
2020-03-11 15:53:16 +00:00
if ! canRead && ! canList {
// No error info is returned if neither Read, nor List permission is granted
2020-04-02 08:45:51 +01:00
return & pb . ObjectBeginDeleteResponse { } , nil
2020-03-11 15:53:16 +00:00
}
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-08-01 10:04:31 +01:00
}
2020-07-27 21:12:14 +01:00
var object * pb . Object
if canRead || canList {
// Info about deleted object is returned only if either Read, or List permission is granted
2020-08-11 14:00:57 +01:00
if err != nil {
endpoint . log . Error ( "failed to construct deleted object information" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . String ( "Bucket" , string ( req . Bucket ) ) ,
zap . String ( "Encrypted Path" , string ( req . EncryptedPath ) ) ,
zap . Error ( err ) ,
)
}
2020-07-27 21:12:14 +01:00
if len ( deletedObjects ) > 0 {
object = deletedObjects [ 0 ]
}
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object Delete" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "delete" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_delete_object" ) . Mark ( 1 )
2020-01-20 18:48:26 +00:00
2019-07-16 11:39:23 +01:00
return & pb . ObjectBeginDeleteResponse {
2020-07-17 10:17:31 +01:00
Object : object ,
2019-07-16 11:39:23 +01:00
} , nil
}
2020-08-11 18:35:23 +01:00
// GetObjectIPs returns the IP addresses of the nodes holding the pieces for
// the provided object. This is useful for knowing the locations of the pieces.
func ( endpoint * Endpoint ) GetObjectIPs ( ctx context . Context , req * pb . ObjectGetIPsRequest ) ( resp * pb . ObjectGetIPsResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-08-13 17:43:21 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2020-12-07 15:43:57 +00:00
// TODO we may need custom metabase request to avoid two DB calls
2021-09-09 16:21:42 +01:00
object , err := endpoint . metabase . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
2020-12-07 15:43:57 +00:00
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} ,
} )
2020-08-13 17:43:21 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-08-13 17:43:21 +01:00
}
2021-09-09 16:21:42 +01:00
pieceCountByNodeID , err := endpoint . metabase . GetStreamPieceCountByNodeID ( ctx ,
2021-03-08 13:09:32 +00:00
metabase . GetStreamPieceCountByNodeID {
2020-12-07 15:43:57 +00:00
StreamID : object . StreamID ,
} )
2021-03-08 13:09:32 +00:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
nodeIDs := make ( [ ] storj . NodeID , 0 , len ( pieceCountByNodeID ) )
for nodeID := range pieceCountByNodeID {
nodeIDs = append ( nodeIDs , nodeID )
}
nodeIPMap , err := endpoint . overlay . GetNodeIPs ( ctx , nodeIDs )
2020-08-13 17:43:21 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-13 17:43:21 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-01-13 13:59:05 +00:00
nodeIPs := make ( [ ] [ ] byte , 0 , len ( nodeIPMap ) )
pieceCount := int64 ( 0 )
reliablePieceCount := int64 ( 0 )
for nodeID , count := range pieceCountByNodeID {
pieceCount += count
ip , reliable := nodeIPMap [ nodeID ]
if ! reliable {
continue
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
nodeIPs = append ( nodeIPs , [ ] byte ( ip ) )
reliablePieceCount += count
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
return & pb . ObjectGetIPsResponse {
Ips : nodeIPs ,
2021-01-14 12:33:00 +00:00
SegmentCount : int64 ( object . SegmentCount ) ,
2021-01-13 13:59:05 +00:00
ReliablePieceCount : reliablePieceCount ,
PieceCount : pieceCount ,
} , nil
2020-08-11 18:35:23 +01:00
}
2021-07-02 13:39:46 +01:00
// UpdateObjectMetadata replaces object metadata.
func ( endpoint * Endpoint ) UpdateObjectMetadata ( ctx context . Context , req * pb . ObjectUpdateMetadataRequest ) ( resp * pb . ObjectUpdateMetadataResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2021-07-08 15:50:37 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-07-08 15:50:37 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-10-29 12:04:55 +01:00
var encryptedMetadataNonce [ ] byte
if ! req . EncryptedMetadataNonce . IsZero ( ) {
encryptedMetadataNonce = req . EncryptedMetadataNonce [ : ]
}
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . UpdateObjectMetadata ( ctx , metabase . UpdateObjectMetadata {
2021-07-08 15:50:37 +01:00
ObjectStream : metabase . ObjectStream {
2021-07-02 13:39:46 +01:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
2021-07-08 15:50:37 +01:00
Version : metabase . Version ( req . Version ) ,
StreamID : id ,
2021-07-02 13:39:46 +01:00
} ,
EncryptedMetadata : req . EncryptedMetadata ,
2021-10-29 12:04:55 +01:00
EncryptedMetadataNonce : encryptedMetadataNonce ,
2021-07-02 13:39:46 +01:00
EncryptedMetadataEncryptedKey : req . EncryptedMetadataEncryptedKey ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-07-02 13:39:46 +01:00
}
return & pb . ObjectUpdateMetadataResponse { } , nil
}
2020-06-30 22:49:29 +01:00
// BeginSegment begins segment uploading.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) BeginSegment ( ctx context . Context , req * pb . SegmentBeginRequest ) ( resp * pb . SegmentBeginResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-22 15:45:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedPath : streamID . EncryptedObjectKey ,
2019-07-22 15:45:18 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2021-12-14 13:49:33 +00:00
if endpoint . config . ProjectLimits . ValidateSegmentLimit {
if exceeded , limit , err := endpoint . projectUsage . ExceedsSegmentUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
endpoint . log . Error (
"Retrieving project segment total failed; segment limit won't be enforced" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
} else if exceeded {
endpoint . log . Warn ( "Segment limit exceeded" ,
zap . String ( "Limit" , strconv . Itoa ( int ( limit ) ) ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
)
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Segments Limit" )
}
}
2019-07-24 12:33:23 +01:00
// no need to validate streamID fields because it was validated during BeginObject
2019-08-01 10:04:31 +01:00
if req . Position . Index < 0 {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "segment index must be greater then 0" )
2019-08-01 10:04:31 +01:00
}
2020-12-22 11:12:07 +00:00
if err := endpoint . checkExceedsStorageUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
return nil , err
2019-07-24 12:33:23 +01:00
}
2021-03-26 11:56:40 +00:00
redundancy , err := eestream . NewRedundancyStrategyFromProto ( endpoint . defaultRS )
2019-07-24 12:33:23 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
maxPieceSize := eestream . CalcPieceSize ( req . MaxOrderLimit , redundancy )
2019-07-22 15:45:18 +01:00
2019-07-24 12:33:23 +01:00
request := overlay . FindStorageNodesRequest {
RequestedCount : redundancy . TotalCount ( ) ,
2021-10-27 09:50:27 +01:00
Placement : storj . PlacementConstraint ( streamID . Placement ) ,
2019-07-24 12:33:23 +01:00
}
2020-05-06 14:05:31 +01:00
nodes , err := endpoint . overlay . FindStorageNodesForUpload ( ctx , request )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-08-28 12:56:09 +01:00
bucket := metabase . BucketLocation { ProjectID : keyInfo . ProjectID , BucketName : string ( streamID . Bucket ) }
rootPieceID , addressedLimits , piecePrivateKey , err := endpoint . orders . CreatePutOrderLimits ( ctx , bucket , nodes , streamID . ExpirationDate , maxPieceSize )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
pieces := metabase . Pieces { }
for i , limit := range addressedLimits {
pieces = append ( pieces , metabase . Piece {
Number : uint16 ( i ) ,
StorageNode : limit . Limit . StorageNodeId ,
} )
}
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . BeginSegment ( ctx , metabase . BeginSegment {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2020-11-06 11:54:52 +00:00
StreamID : id ,
Version : 1 ,
} ,
Position : metabase . SegmentPosition {
Part : uint32 ( req . Position . PartNumber ) ,
Index : uint32 ( req . Position . Index ) ,
} ,
RootPieceID : rootPieceID ,
Pieces : pieces ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-11-06 11:54:52 +00:00
}
2020-10-29 16:16:25 +00:00
segmentID , err := endpoint . packSegmentID ( ctx , & internalpb . SegmentID {
2019-07-24 12:33:23 +01:00
StreamId : streamID ,
2020-11-06 11:54:52 +00:00
PartNumber : req . Position . PartNumber ,
2019-08-01 10:04:31 +01:00
Index : req . Position . Index ,
2019-07-24 12:33:23 +01:00
OriginalOrderLimits : addressedLimits ,
RootPieceId : rootPieceID ,
CreationDate : time . Now ( ) ,
} )
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Segment Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "remote" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_remote" ) . Mark ( 1 )
2019-12-02 14:39:19 +00:00
2019-07-24 12:33:23 +01:00
return & pb . SegmentBeginResponse {
2020-11-10 16:05:27 +00:00
SegmentId : segmentID ,
AddressedLimits : addressedLimits ,
PrivateKey : piecePrivateKey ,
RedundancyScheme : endpoint . defaultRS ,
2019-07-24 12:33:23 +01:00
} , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// CommitSegment commits segment after uploading.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) CommitSegment ( ctx context . Context , req * pb . SegmentCommitRequest ) ( resp * pb . SegmentCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-22 15:45:18 +01:00
segmentID , err := endpoint . unmarshalSatSegmentID ( ctx , req . SegmentId )
if err != nil {
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
streamID := segmentID . StreamId
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedPath : streamID . EncryptedObjectKey ,
2019-07-22 15:45:18 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2021-08-23 12:17:40 +01:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2021-04-07 16:51:00 +01:00
// cheap basic verification
2021-03-26 11:56:40 +00:00
if numResults := len ( req . UploadResult ) ; numResults < int ( endpoint . defaultRS . GetSuccessThreshold ( ) ) {
2019-10-17 19:01:40 +01:00
endpoint . log . Debug ( "the results of uploaded pieces for the segment is below the redundancy optimal threshold" ,
zap . Int ( "upload pieces results" , numResults ) ,
2021-03-26 11:56:40 +00:00
zap . Int32 ( "redundancy optimal threshold" , endpoint . defaultRS . GetSuccessThreshold ( ) ) ,
2019-11-05 21:04:07 +00:00
zap . Stringer ( "Segment ID" , req . SegmentId ) ,
2019-10-17 19:01:40 +01:00
)
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument ,
2019-10-17 19:01:40 +01:00
"the number of results of uploaded pieces (%d) is below the optimal threshold (%d)" ,
2021-03-26 11:56:40 +00:00
numResults , endpoint . defaultRS . GetSuccessThreshold ( ) ,
2019-10-17 19:01:40 +01:00
)
}
2020-11-06 11:54:52 +00:00
rs := storj . RedundancyScheme {
2020-12-08 11:51:48 +00:00
Algorithm : storj . RedundancyAlgorithm ( endpoint . defaultRS . Type ) ,
RequiredShares : int16 ( endpoint . defaultRS . MinReq ) ,
RepairShares : int16 ( endpoint . defaultRS . RepairThreshold ) ,
OptimalShares : int16 ( endpoint . defaultRS . SuccessThreshold ) ,
TotalShares : int16 ( endpoint . defaultRS . Total ) ,
ShareSize : endpoint . defaultRS . ErasureShareSize ,
2020-11-06 11:54:52 +00:00
}
2021-04-07 16:51:00 +01:00
err = endpoint . pointerVerification . VerifySizes ( ctx , rs , req . SizeEncryptedData , req . UploadResult )
if err != nil {
endpoint . log . Debug ( "piece sizes are invalid" , zap . Error ( err ) )
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "piece sizes are invalid: %v" , err )
2021-04-07 16:51:00 +01:00
}
// extract the original order limits
originalLimits := make ( [ ] * pb . OrderLimit , len ( segmentID . OriginalOrderLimits ) )
for i , orderLimit := range segmentID . OriginalOrderLimits {
originalLimits [ i ] = orderLimit . Limit
}
// verify the piece upload results
validPieces , invalidPieces , err := endpoint . pointerVerification . SelectValidPieces ( ctx , req . UploadResult , originalLimits )
if err != nil {
endpoint . log . Debug ( "pointer verification failed" , zap . Error ( err ) )
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "pointer verification failed: %s" , err )
2021-04-07 16:51:00 +01:00
}
if len ( validPieces ) < int ( rs . OptimalShares ) {
endpoint . log . Debug ( "Number of valid pieces is less than the success threshold" ,
zap . Int ( "totalReceivedPieces" , len ( req . UploadResult ) ) ,
zap . Int ( "validPieces" , len ( validPieces ) ) ,
zap . Int ( "invalidPieces" , len ( invalidPieces ) ) ,
zap . Int ( "successThreshold" , int ( rs . OptimalShares ) ) ,
)
errMsg := fmt . Sprintf ( "Number of valid pieces (%d) is less than the success threshold (%d). Found %d invalid pieces" ,
len ( validPieces ) ,
rs . OptimalShares ,
len ( invalidPieces ) ,
)
if len ( invalidPieces ) > 0 {
errMsg = fmt . Sprintf ( "%s. Invalid Pieces:" , errMsg )
for _ , p := range invalidPieces {
errMsg = fmt . Sprintf ( "%s\nNodeID: %v, PieceNum: %d, Reason: %s" ,
errMsg , p . NodeID , p . PieceNum , p . Reason ,
)
}
}
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , errMsg )
2021-04-07 16:51:00 +01:00
}
pieces := metabase . Pieces { }
for _ , result := range validPieces {
pieces = append ( pieces , metabase . Piece {
Number : uint16 ( result . PieceNum ) ,
StorageNode : result . NodeId ,
} )
}
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2020-11-06 11:54:52 +00:00
}
2020-01-27 20:25:52 +00:00
2021-06-10 11:08:21 +01:00
var expiresAt * time . Time
if ! streamID . ExpirationDate . IsZero ( ) {
expiresAt = & streamID . ExpirationDate
}
2021-04-07 15:20:05 +01:00
mbCommitSegment := metabase . CommitSegment {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2020-11-06 11:54:52 +00:00
StreamID : id ,
Version : 1 ,
} ,
2021-06-10 11:08:21 +01:00
ExpiresAt : expiresAt ,
2020-11-06 11:54:52 +00:00
EncryptedKey : req . EncryptedKey ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : req . EncryptedKeyNonce [ : ] ,
2020-11-06 11:54:52 +00:00
2020-11-20 12:37:54 +00:00
EncryptedSize : int32 ( req . SizeEncryptedData ) , // TODO incompatible types int32 vs int64
PlainSize : int32 ( req . PlainSize ) , // TODO incompatible types int32 vs int64
2021-03-25 07:56:13 +00:00
EncryptedETag : req . EncryptedETag ,
2020-11-06 11:54:52 +00:00
Position : metabase . SegmentPosition {
Part : uint32 ( segmentID . PartNumber ) ,
Index : uint32 ( segmentID . Index ) ,
} ,
RootPieceID : segmentID . RootPieceId ,
Redundancy : rs ,
Pieces : pieces ,
2021-10-27 09:50:27 +01:00
Placement : storj . PlacementConstraint ( streamID . Placement ) ,
2021-04-07 15:20:05 +01:00
}
2021-04-07 16:51:00 +01:00
err = endpoint . validateRemoteSegment ( ctx , mbCommitSegment , originalLimits )
2021-04-07 15:20:05 +01:00
if err != nil {
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2021-04-07 15:20:05 +01:00
}
if err := endpoint . checkExceedsStorageUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
2021-08-23 12:17:40 +01:00
return nil , err
2021-04-07 15:20:05 +01:00
}
segmentSize := req . SizeEncryptedData
totalStored := calculateSpaceUsed ( segmentSize , len ( pieces ) , rs )
// ToDo: Replace with hash & signature validation
// Ensure neither uplink or storage nodes are cheating on us
// We cannot have more redundancy than total/min
if float64 ( totalStored ) > ( float64 ( segmentSize ) / float64 ( rs . RequiredShares ) ) * float64 ( rs . TotalShares ) {
endpoint . log . Debug ( "data size mismatch" ,
zap . Int64 ( "segment" , segmentSize ) ,
zap . Int64 ( "pieces" , totalStored ) ,
zap . Int16 ( "redundancy minimum requested" , rs . RequiredShares ) ,
zap . Int16 ( "redundancy total" , rs . TotalShares ) ,
)
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "mismatched segment size and piece usage" )
2021-04-07 15:20:05 +01:00
}
if err := endpoint . projectUsage . AddProjectStorageUsage ( ctx , keyInfo . ProjectID , segmentSize ) ; err != nil {
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// storage limits.
endpoint . log . Error ( "Could not track new project's storage usage" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
}
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . CommitSegment ( ctx , mbCommitSegment )
2020-11-06 11:54:52 +00:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-07-24 12:33:23 +01:00
}
2019-07-22 15:45:18 +01:00
2021-12-14 13:49:33 +00:00
// Update the current segment cache value incrementing by 1 as we commit single segment.
err = endpoint . projectUsage . UpdateProjectSegmentUsage ( ctx , keyInfo . ProjectID , 1 )
if err != nil {
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// segment limits.
endpoint . log . Error (
"Could not track the new project's segment usage when committing segment" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
}
2021-08-23 12:17:40 +01:00
return & pb . SegmentCommitResponse {
2020-11-06 11:54:52 +00:00
SuccessfulPieces : int32 ( len ( pieces ) ) ,
2019-09-18 14:50:33 +01:00
} , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// MakeInlineSegment makes inline segment on satellite.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) MakeInlineSegment ( ctx context . Context , req * pb . SegmentMakeInlineRequest ) ( resp * pb . SegmentMakeInlineResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-22 15:45:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedPath : streamID . EncryptedObjectKey ,
2019-07-22 15:45:18 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2021-08-23 12:17:40 +01:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2019-08-01 10:04:31 +01:00
if req . Position . Index < 0 {
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "segment index must be greater then 0" )
2019-07-24 12:33:23 +01:00
}
2020-04-01 10:15:24 +01:00
inlineUsed := int64 ( len ( req . EncryptedInlineData ) )
2020-04-09 09:19:16 +01:00
if inlineUsed > endpoint . encInlineSegmentSize {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "inline segment size cannot be larger than %s" , endpoint . config . MaxInlineSegmentSize )
2020-04-01 10:15:24 +01:00
}
2020-12-22 11:12:07 +00:00
if err := endpoint . checkExceedsStorageUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
2021-08-23 12:17:40 +01:00
return nil , err
2019-07-24 12:33:23 +01:00
}
2021-12-14 13:49:33 +00:00
if endpoint . config . ProjectLimits . ValidateSegmentLimit {
if exceeded , limit , err := endpoint . projectUsage . ExceedsSegmentUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
endpoint . log . Error (
"Retrieving project segment total failed; segment limit won't be enforced" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
} else if exceeded {
endpoint . log . Warn ( "Segment limit exceeded" ,
zap . String ( "Limit" , strconv . Itoa ( int ( limit ) ) ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
)
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Segments Limit" )
}
}
2019-10-31 17:27:38 +00:00
if err := endpoint . projectUsage . AddProjectStorageUsage ( ctx , keyInfo . ProjectID , inlineUsed ) ; err != nil {
2020-12-22 11:12:07 +00:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth and storage limits.
endpoint . log . Error ( "Could not track new project's storage usage" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2019-07-24 12:33:23 +01:00
}
2020-11-11 10:54:10 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2021-06-10 11:08:21 +01:00
var expiresAt * time . Time
if ! streamID . ExpirationDate . IsZero ( ) {
expiresAt = & streamID . ExpirationDate
}
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . CommitInlineSegment ( ctx , metabase . CommitInlineSegment {
2020-11-11 10:54:10 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2020-11-11 10:54:10 +00:00
StreamID : id ,
Version : 1 ,
} ,
2021-06-10 11:08:21 +01:00
ExpiresAt : expiresAt ,
2020-11-11 10:54:10 +00:00
EncryptedKey : req . EncryptedKey ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : req . EncryptedKeyNonce . Bytes ( ) ,
2020-01-27 20:25:52 +00:00
2020-11-11 10:54:10 +00:00
Position : metabase . SegmentPosition {
Part : uint32 ( req . Position . PartNumber ) ,
Index : uint32 ( req . Position . Index ) ,
} ,
2021-03-25 07:56:13 +00:00
PlainSize : int32 ( req . PlainSize ) , // TODO incompatible types int32 vs int64
EncryptedETag : req . EncryptedETag ,
2020-11-11 10:54:10 +00:00
InlineData : req . EncryptedInlineData ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-07-24 12:33:23 +01:00
}
2020-08-28 12:56:09 +01:00
bucket := metabase . BucketLocation { ProjectID : keyInfo . ProjectID , BucketName : string ( streamID . Bucket ) }
err = endpoint . orders . UpdatePutInlineOrder ( ctx , bucket , inlineUsed )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-08-23 12:17:40 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2019-07-22 15:45:18 +01:00
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Inline Segment Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "inline" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_inline" ) . Mark ( 1 )
2019-12-02 14:39:19 +00:00
2021-12-14 13:49:33 +00:00
// Update the current segment cache value incrementing by 1 as we commit single segment.
err = endpoint . projectUsage . UpdateProjectSegmentUsage ( ctx , keyInfo . ProjectID , 1 )
if err != nil {
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// segment limits.
endpoint . log . Error (
"Could not track the new project's segment usage when committing segment" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
}
2021-08-23 12:17:40 +01:00
return & pb . SegmentMakeInlineResponse { } , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// ListSegments list object segments.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) ListSegments ( ctx context . Context , req * pb . SegmentListRequest ) ( resp * pb . SegmentListResponse , err error ) {
2020-11-10 15:51:37 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2020-11-24 16:23:47 +00:00
Op : macaroon . ActionRead ,
2020-11-10 15:51:37 +00:00
Bucket : streamID . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedPath : streamID . EncryptedObjectKey ,
2020-11-10 15:51:37 +00:00
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
cursor := req . CursorPosition
if cursor == nil {
cursor = & pb . SegmentPosition { }
}
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-09-09 16:21:42 +01:00
result , err := endpoint . metabase . ListStreamPositions ( ctx , metabase . ListStreamPositions {
2020-11-10 15:51:37 +00:00
StreamID : id ,
Cursor : metabase . SegmentPosition {
Part : uint32 ( cursor . PartNumber ) ,
Index : uint32 ( cursor . Index ) ,
} ,
Limit : int ( req . Limit ) ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-11-10 15:51:37 +00:00
}
2021-03-31 12:08:22 +01:00
response , err := convertStreamListResults ( result )
if err != nil {
endpoint . log . Error ( "unable to convert stream list" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
response . EncryptionParameters = streamID . EncryptionParameters
return response , nil
}
func convertStreamListResults ( result metabase . ListStreamPositionsResult ) ( * pb . SegmentListResponse , error ) {
2020-11-10 15:51:37 +00:00
items := make ( [ ] * pb . SegmentListItem , len ( result . Segments ) )
for i , item := range result . Segments {
items [ i ] = & pb . SegmentListItem {
Position : & pb . SegmentPosition {
PartNumber : int32 ( item . Position . Part ) ,
Index : int32 ( item . Position . Index ) ,
} ,
2021-03-29 16:31:17 +01:00
PlainSize : int64 ( item . PlainSize ) ,
PlainOffset : item . PlainOffset ,
2020-11-10 15:51:37 +00:00
}
2021-03-12 18:21:36 +00:00
if item . CreatedAt != nil {
items [ i ] . CreatedAt = * item . CreatedAt
}
2021-03-25 07:56:13 +00:00
items [ i ] . EncryptedETag = item . EncryptedETag
2021-10-12 14:37:12 +01:00
var err error
items [ i ] . EncryptedKeyNonce , err = storj . NonceFromBytes ( item . EncryptedKeyNonce )
if err != nil {
return nil , err
}
2021-03-25 07:56:13 +00:00
items [ i ] . EncryptedKey = item . EncryptedKey
2020-11-10 15:51:37 +00:00
}
return & pb . SegmentListResponse {
2021-03-31 12:08:22 +01:00
Items : items ,
More : result . More ,
2020-11-10 15:51:37 +00:00
} , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// DownloadSegment returns data necessary to download segment.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) DownloadSegment ( ctx context . Context , req * pb . SegmentDownloadRequest ) ( resp * pb . SegmentDownloadResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-11-23 17:50:29 +00:00
if ctx . Err ( ) != nil {
return nil , rpcstatus . Error ( rpcstatus . Canceled , "client has closed the connection" )
}
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-22 15:45:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionRead ,
Bucket : streamID . Bucket ,
2021-08-25 20:00:55 +01:00
EncryptedPath : streamID . EncryptedObjectKey ,
2019-07-22 15:45:18 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2020-08-28 12:56:09 +01:00
bucket := metabase . BucketLocation { ProjectID : keyInfo . ProjectID , BucketName : string ( streamID . Bucket ) }
2019-07-24 12:33:23 +01:00
2020-12-22 11:12:07 +00:00
if exceeded , limit , err := endpoint . projectUsage . ExceedsBandwidthUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2021-11-11 20:04:48 +00:00
endpoint . log . Error (
"Retrieving project bandwidth total failed; bandwidth limit won't be enforced" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2020-12-22 11:12:07 +00:00
} else if exceeded {
2021-11-11 12:50:14 +00:00
endpoint . log . Warn ( "Monthly bandwidth limit exceeded" ,
2020-04-13 10:31:17 +01:00
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
2019-07-24 12:33:23 +01:00
)
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
2019-07-24 12:33:23 +01:00
}
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-11-06 11:54:52 +00:00
var segment metabase . Segment
if req . CursorPosition . PartNumber == 0 && req . CursorPosition . Index == - 1 {
2021-01-07 08:46:49 +00:00
if streamID . MultipartObject {
return nil , rpcstatus . Error ( rpcstatus . Unimplemented , "Used uplink version cannot download multipart objects." )
}
2021-09-09 16:21:42 +01:00
segment , err = endpoint . metabase . GetLatestObjectLastSegment ( ctx , metabase . GetLatestObjectLastSegment {
2020-11-06 11:54:52 +00:00
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2020-11-06 11:54:52 +00:00
} ,
} )
} else {
2021-09-09 16:21:42 +01:00
segment , err = endpoint . metabase . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
2020-11-06 11:54:52 +00:00
StreamID : id ,
Position : metabase . SegmentPosition {
Part : uint32 ( req . CursorPosition . PartNumber ) ,
Index : uint32 ( req . CursorPosition . Index ) ,
} ,
} )
}
2020-06-02 00:19:10 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-06-02 00:19:10 +01:00
}
2020-11-06 11:54:52 +00:00
// Update the current bandwidth cache value incrementing the SegmentSize.
err = endpoint . projectUsage . UpdateProjectBandwidthUsage ( ctx , keyInfo . ProjectID , int64 ( segment . EncryptedSize ) )
2020-06-02 09:39:41 +01:00
if err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2020-12-22 11:12:07 +00:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth limits.
2021-11-23 17:50:29 +00:00
endpoint . log . Error ( "Could not track the new project's bandwidth usage when downloading a segment" ,
2020-12-22 11:12:07 +00:00
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2020-06-02 09:39:41 +01:00
}
2019-08-01 10:04:31 +01:00
2021-10-12 14:37:12 +01:00
encryptedKeyNonce , err := storj . NonceFromBytes ( segment . EncryptedKeyNonce )
if err != nil {
endpoint . log . Error ( "unable to get encryption key nonce from metadata" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-01-07 15:03:16 +00:00
if segment . Inline ( ) {
2020-11-06 11:54:52 +00:00
err := endpoint . orders . UpdateGetInlineOrder ( ctx , bucket , int64 ( len ( segment . InlineData ) ) )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-01-29 15:03:30 +00:00
endpoint . log . Info ( "Inline Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "inline" ) )
mon . Meter ( "req_get_inline" ) . Mark ( 1 )
2019-07-24 12:33:23 +01:00
return & pb . SegmentDownloadResponse {
2021-04-06 12:52:59 +01:00
PlainOffset : segment . PlainOffset ,
2021-03-31 12:08:22 +01:00
PlainSize : int64 ( segment . PlainSize ) ,
2020-11-06 11:54:52 +00:00
SegmentSize : int64 ( segment . EncryptedSize ) ,
EncryptedInlineData : segment . InlineData ,
2019-08-01 10:04:31 +01:00
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2020-11-06 11:54:52 +00:00
EncryptedKey : segment . EncryptedKey ,
2021-04-05 17:26:07 +01:00
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2019-07-24 12:33:23 +01:00
} , nil
2020-11-06 11:54:52 +00:00
}
// Remote segment
2021-05-13 14:31:55 +01:00
limits , privateKey , err := endpoint . orders . CreateGetOrderLimits ( ctx , bucket , segment , 0 )
2020-11-06 11:54:52 +00:00
if err != nil {
if orders . ErrDownloadFailedNotEnoughPieces . Has ( err ) {
endpoint . log . Error ( "Unable to create order limits." ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Stringer ( "API Key ID" , keyInfo . ID ) ,
zap . Error ( err ) ,
)
2019-07-24 12:33:23 +01:00
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2019-07-24 12:33:23 +01:00
2020-11-06 11:54:52 +00:00
endpoint . log . Info ( "Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "remote" ) )
mon . Meter ( "req_get_remote" ) . Mark ( 1 )
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
return & pb . SegmentDownloadResponse {
AddressedLimits : limits ,
PrivateKey : privateKey ,
2021-04-06 12:52:59 +01:00
PlainOffset : segment . PlainOffset ,
2021-03-31 12:08:22 +01:00
PlainSize : int64 ( segment . PlainSize ) ,
2020-11-06 11:54:52 +00:00
SegmentSize : int64 ( segment . EncryptedSize ) ,
2019-07-24 12:33:23 +01:00
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2020-11-06 11:54:52 +00:00
EncryptedKey : segment . EncryptedKey ,
2021-02-17 09:54:04 +00:00
RedundancyScheme : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
} ,
2021-04-05 17:26:07 +01:00
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2020-11-06 11:54:52 +00:00
} , nil
2019-07-24 12:33:23 +01:00
}
2021-10-14 10:23:15 +01:00
// DeletePart is a no-op.
//
// It was used to perform the deletion of a single part from satellite db and
// from storage nodes. We made this method noop because now we can overwrite
// segments for pending objects. It's returning no error to avoid failures with
// uplinks that still are using this method.
2021-08-05 08:36:32 +01:00
func ( endpoint * Endpoint ) DeletePart ( ctx context . Context , req * pb . PartDeleteRequest ) ( resp * pb . PartDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
return & pb . PartDeleteResponse { } , nil
}
2020-10-29 16:16:25 +00:00
func ( endpoint * Endpoint ) packStreamID ( ctx context . Context , satStreamID * internalpb . StreamID ) ( streamID storj . StreamID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-22 15:45:18 +01:00
2020-10-29 16:16:25 +00:00
signedStreamID , err := SignStreamID ( ctx , endpoint . satellite , satStreamID )
2019-07-24 12:33:23 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-04-08 13:08:57 +01:00
encodedStreamID , err := pb . Marshal ( signedStreamID )
2019-07-24 12:33:23 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
streamID , err = storj . StreamIDFromBytes ( encodedStreamID )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
return streamID , nil
2019-07-22 15:45:18 +01:00
}
2020-10-29 16:16:25 +00:00
func ( endpoint * Endpoint ) packSegmentID ( ctx context . Context , satSegmentID * internalpb . SegmentID ) ( segmentID storj . SegmentID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-29 16:16:25 +00:00
signedSegmentID , err := SignSegmentID ( ctx , endpoint . satellite , satSegmentID )
2019-07-24 12:33:23 +01:00
if err != nil {
return nil , err
}
2020-04-08 13:08:57 +01:00
encodedSegmentID , err := pb . Marshal ( signedSegmentID )
2019-07-24 12:33:23 +01:00
if err != nil {
return nil , err
}
segmentID , err = storj . SegmentIDFromBytes ( encodedSegmentID )
if err != nil {
return nil , err
}
return segmentID , nil
}
2020-10-29 16:16:25 +00:00
func ( endpoint * Endpoint ) unmarshalSatStreamID ( ctx context . Context , streamID storj . StreamID ) ( _ * internalpb . StreamID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-29 16:16:25 +00:00
satStreamID := & internalpb . StreamID { }
2020-04-08 13:08:57 +01:00
err = pb . Unmarshal ( streamID , satStreamID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
2020-10-29 16:16:25 +00:00
err = VerifyStreamID ( ctx , endpoint . satellite , satStreamID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
return satStreamID , nil
}
2020-10-30 11:22:16 +00:00
func ( endpoint * Endpoint ) unmarshalSatSegmentID ( ctx context . Context , segmentID storj . SegmentID ) ( _ * internalpb . SegmentID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-30 11:22:16 +00:00
satSegmentID := & internalpb . SegmentID { }
2020-04-08 13:08:57 +01:00
err = pb . Unmarshal ( segmentID , satSegmentID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
2019-11-15 15:44:23 +00:00
if satSegmentID . StreamId == nil {
return nil , errs . New ( "stream ID missing" )
}
2019-07-22 15:45:18 +01:00
2020-10-30 11:22:16 +00:00
err = VerifySegmentID ( ctx , endpoint . satellite , satSegmentID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
if satSegmentID . CreationDate . Before ( time . Now ( ) . Add ( - satIDExpiration ) ) {
return nil , errs . New ( "segment ID expired" )
}
return satSegmentID , nil
}
2019-12-11 17:44:13 +00:00
2020-12-03 18:04:01 +00:00
// DeleteCommittedObject deletes all the pieces of the storage nodes that belongs
2019-12-11 17:44:13 +00:00
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2020-12-03 18:04:01 +00:00
func ( endpoint * Endpoint ) DeleteCommittedObject (
2020-12-08 08:08:02 +00:00
ctx context . Context , projectID uuid . UUID , bucket string , object metabase . ObjectKey ,
2020-11-03 12:58:27 +00:00
) ( deletedObjects [ ] * pb . Object , err error ) {
2020-12-08 08:08:02 +00:00
defer mon . Task ( ) ( & ctx , projectID . String ( ) , bucket , object ) ( & err )
2019-12-11 17:44:13 +00:00
2020-11-03 12:58:27 +00:00
req := metabase . ObjectLocation {
2020-09-04 08:46:53 +01:00
ProjectID : projectID ,
2020-12-08 08:08:02 +00:00
BucketName : bucket ,
ObjectKey : object ,
2020-07-27 21:12:14 +01:00
}
2020-08-06 02:23:45 +01:00
2021-09-09 16:21:42 +01:00
result , err := endpoint . metabase . DeleteObjectsAllVersions ( ctx , metabase . DeleteObjectsAllVersions { Locations : [ ] metabase . ObjectLocation { req } } )
2020-12-03 18:04:01 +00:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , Error . Wrap ( err )
2020-12-03 18:04:01 +00:00
}
deletedObjects , err = endpoint . deleteObjectsPieces ( ctx , result )
2020-07-27 21:12:14 +01:00
if err != nil {
endpoint . log . Error ( "failed to delete pointers" ,
2020-12-08 08:08:02 +00:00
zap . Stringer ( "project" , projectID ) ,
zap . String ( "bucket" , bucket ) ,
zap . Binary ( "object" , [ ] byte ( object ) ) ,
2020-07-27 21:12:14 +01:00
zap . Error ( err ) ,
)
2021-10-22 09:42:52 +01:00
return deletedObjects , Error . Wrap ( err )
2019-12-11 17:44:13 +00:00
}
2020-11-03 12:58:27 +00:00
return deletedObjects , nil
2020-08-06 02:23:45 +01:00
}
2021-01-11 10:08:18 +00:00
// DeleteObjectAnyStatus deletes all the pieces of the storage nodes that belongs
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
func ( endpoint * Endpoint ) DeleteObjectAnyStatus ( ctx context . Context , location metabase . ObjectLocation ,
) ( deletedObjects [ ] * pb . Object , err error ) {
defer mon . Task ( ) ( & ctx , location . ProjectID . String ( ) , location . BucketName , location . ObjectKey ) ( & err )
2021-09-09 16:21:42 +01:00
result , err := endpoint . metabase . DeleteObjectAnyStatusAllVersions ( ctx , metabase . DeleteObjectAnyStatusAllVersions {
2021-01-11 10:08:18 +00:00
ObjectLocation : location ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , Error . Wrap ( err )
2021-01-11 10:08:18 +00:00
}
deletedObjects , err = endpoint . deleteObjectsPieces ( ctx , result )
if err != nil {
endpoint . log . Error ( "failed to delete pointers" ,
zap . Stringer ( "project" , location . ProjectID ) ,
zap . String ( "bucket" , location . BucketName ) ,
zap . Binary ( "object" , [ ] byte ( location . ObjectKey ) ) ,
zap . Error ( err ) ,
)
return deletedObjects , err
}
return deletedObjects , nil
}
2020-12-03 18:04:01 +00:00
// DeletePendingObject deletes all the pieces of the storage nodes that belongs
// to the specified pending object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2021-05-04 14:51:40 +01:00
func ( endpoint * Endpoint ) DeletePendingObject ( ctx context . Context , stream metabase . ObjectStream ) ( deletedObjects [ ] * pb . Object , err error ) {
2020-12-03 18:04:01 +00:00
req := metabase . DeletePendingObject {
2021-05-04 14:51:40 +01:00
ObjectStream : stream ,
2020-12-03 18:04:01 +00:00
}
2021-09-09 16:21:42 +01:00
result , err := endpoint . metabase . DeletePendingObject ( ctx , req )
2020-08-06 02:23:45 +01:00
if err != nil {
2020-11-03 12:58:27 +00:00
return nil , err
2020-08-06 02:23:45 +01:00
}
2020-12-03 18:04:01 +00:00
return endpoint . deleteObjectsPieces ( ctx , result )
}
func ( endpoint * Endpoint ) deleteObjectsPieces ( ctx context . Context , result metabase . DeleteObjectResult ) ( deletedObjects [ ] * pb . Object , err error ) {
2021-10-21 07:47:45 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-12-03 18:04:01 +00:00
// We should ignore client cancelling and always try to delete segments.
ctx = context2 . WithoutCancellation ( ctx )
2020-11-03 12:58:27 +00:00
deletedObjects = make ( [ ] * pb . Object , len ( result . Objects ) )
for i , object := range result . Objects {
2021-02-16 15:36:09 +00:00
deletedObject , err := endpoint . objectToProto ( ctx , object , endpoint . defaultRS )
2020-12-02 11:34:41 +00:00
if err != nil {
return nil , err
}
deletedObjects [ i ] = deletedObject
2020-11-03 12:58:27 +00:00
}
2019-12-16 19:03:20 +00:00
2020-12-09 12:24:37 +00:00
endpoint . deleteSegmentPieces ( ctx , result . Segments )
return deletedObjects , nil
}
func ( endpoint * Endpoint ) deleteSegmentPieces ( ctx context . Context , segments [ ] metabase . DeletedSegmentInfo ) {
2021-10-21 07:47:45 +01:00
var err error
defer mon . Task ( ) ( & ctx ) ( & err )
2020-12-09 12:24:37 +00:00
nodesPieces := groupPiecesByNodeID ( segments )
2019-12-16 19:03:20 +00:00
2020-11-03 12:58:27 +00:00
var requests [ ] piecedeletion . Request
for node , pieces := range nodesPieces {
requests = append ( requests , piecedeletion . Request {
Node : storj . NodeURL {
ID : node ,
} ,
Pieces : pieces ,
} )
2020-08-06 02:23:45 +01:00
}
2019-12-16 19:03:20 +00:00
2020-11-03 12:58:27 +00:00
// Only return an error if we failed to delete the objects. If we failed
// to delete pieces, let garbage collector take care of it.
2021-10-21 07:47:45 +01:00
err = endpoint . deletePieces . Delete ( ctx , requests , deleteObjectPiecesSuccessThreshold )
if err != nil {
2020-08-06 02:23:45 +01:00
endpoint . log . Error ( "failed to delete pieces" , zap . Error ( err ) )
2019-12-16 19:03:20 +00:00
}
2020-11-03 12:58:27 +00:00
}
2021-02-16 15:36:09 +00:00
func ( endpoint * Endpoint ) objectToProto ( ctx context . Context , object metabase . Object , rs * pb . RedundancyScheme ) ( * pb . Object , error ) {
2020-12-02 11:34:41 +00:00
expires := time . Time { }
if object . ExpiresAt != nil {
expires = * object . ExpiresAt
}
2021-01-14 11:47:29 +00:00
// TotalPlainSize != 0 means object was uploaded with newer uplink
multipartObject := object . TotalPlainSize != 0 && object . FixedSegmentSize <= 0
2020-12-02 11:34:41 +00:00
streamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-08-25 20:00:55 +01:00
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedObjectKey : [ ] byte ( object . ObjectKey ) ,
Version : int32 ( object . Version ) , // TODO incomatible types
CreationDate : object . CreatedAt ,
ExpirationDate : expires ,
StreamId : object . StreamID [ : ] ,
MultipartObject : multipartObject ,
2021-03-24 09:33:56 +00:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( object . Encryption . CipherSuite ) ,
BlockSize : int64 ( object . Encryption . BlockSize ) ,
} ,
2021-11-23 17:50:29 +00:00
// TODO: this is the only one place where placement is not added to the StreamID
2021-10-27 09:50:27 +01:00
// bucket info would be required to add placement here
2020-12-02 11:34:41 +00:00
} )
if err != nil {
return nil , err
}
2021-10-12 14:37:12 +01:00
var nonce storj . Nonce
if len ( object . EncryptedMetadataNonce ) > 0 {
nonce , err = storj . NonceFromBytes ( object . EncryptedMetadataNonce )
if err != nil {
return nil , err
}
}
2020-12-02 11:34:41 +00:00
streamMeta := & pb . StreamMeta { }
err = pb . Unmarshal ( object . EncryptedMetadata , streamMeta )
if err != nil {
return nil , err
}
// TODO is this enough to handle old uplinks
if streamMeta . EncryptionBlockSize == 0 {
streamMeta . EncryptionBlockSize = object . Encryption . BlockSize
}
if streamMeta . EncryptionType == 0 {
streamMeta . EncryptionType = int32 ( object . Encryption . CipherSuite )
}
if streamMeta . NumberOfSegments == 0 {
streamMeta . NumberOfSegments = int64 ( object . SegmentCount )
}
if streamMeta . LastSegmentMeta == nil {
streamMeta . LastSegmentMeta = & pb . SegmentMeta {
EncryptedKey : object . EncryptedMetadataEncryptedKey ,
2021-10-12 14:37:12 +01:00
KeyNonce : object . EncryptedMetadataNonce ,
2020-12-02 11:34:41 +00:00
}
}
metadataBytes , err := pb . Marshal ( streamMeta )
if err != nil {
return nil , err
}
2020-11-03 12:58:27 +00:00
result := & pb . Object {
2020-12-02 11:34:41 +00:00
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedPath : [ ] byte ( object . ObjectKey ) ,
Version : int32 ( object . Version ) , // TODO incomatible types
StreamId : streamID ,
ExpiresAt : expires ,
CreatedAt : object . CreatedAt ,
TotalSize : object . TotalEncryptedSize ,
PlainSize : object . TotalPlainSize ,
EncryptedMetadata : metadataBytes ,
2021-10-12 14:37:12 +01:00
EncryptedMetadataNonce : nonce ,
2020-12-02 11:34:41 +00:00
EncryptedMetadataEncryptedKey : object . EncryptedMetadataEncryptedKey ,
2020-11-03 12:58:27 +00:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( object . Encryption . CipherSuite ) ,
BlockSize : int64 ( object . Encryption . BlockSize ) ,
} ,
2020-12-02 11:34:41 +00:00
2021-02-16 15:36:09 +00:00
RedundancyScheme : rs ,
2020-11-03 12:58:27 +00:00
}
2020-12-02 11:34:41 +00:00
return result , nil
}
2021-10-27 09:50:27 +01:00
func ( endpoint * Endpoint ) objectEntryToProtoListItem ( ctx context . Context , bucket [ ] byte ,
entry metabase . ObjectEntry , prefixToPrependInSatStreamID metabase . ObjectKey ,
includeMetadata bool , placement storj . PlacementConstraint ) ( item * pb . ObjectListItem , err error ) {
2020-12-02 11:34:41 +00:00
expires := time . Time { }
if entry . ExpiresAt != nil {
expires = * entry . ExpiresAt
}
2021-08-02 19:30:02 +01:00
item = & pb . ObjectListItem {
EncryptedPath : [ ] byte ( entry . ObjectKey ) ,
Version : int32 ( entry . Version ) , // TODO incompatible types
Status : pb . Object_Status ( entry . Status ) ,
ExpiresAt : expires ,
CreatedAt : entry . CreatedAt ,
PlainSize : entry . TotalPlainSize ,
}
if includeMetadata {
2021-10-12 14:37:12 +01:00
var nonce storj . Nonce
if len ( entry . EncryptedMetadataNonce ) > 0 {
nonce , err = storj . NonceFromBytes ( entry . EncryptedMetadataNonce )
if err != nil {
return nil , err
}
}
2021-08-02 19:30:02 +01:00
streamMeta := & pb . StreamMeta { }
err = pb . Unmarshal ( entry . EncryptedMetadata , streamMeta )
2020-12-02 11:34:41 +00:00
if err != nil {
return nil , err
}
2021-12-06 16:41:36 +00:00
if entry . Encryption != ( storj . EncryptionParameters { } ) {
2021-08-02 19:30:02 +01:00
streamMeta . EncryptionType = int32 ( entry . Encryption . CipherSuite )
2021-12-06 16:41:36 +00:00
streamMeta . EncryptionBlockSize = entry . Encryption . BlockSize
2021-08-02 19:30:02 +01:00
}
2021-12-06 16:41:36 +00:00
if entry . SegmentCount != 0 {
2021-08-02 19:30:02 +01:00
streamMeta . NumberOfSegments = int64 ( entry . SegmentCount )
}
2021-12-06 16:41:36 +00:00
if entry . EncryptedMetadataEncryptedKey != nil {
2021-08-02 19:30:02 +01:00
streamMeta . LastSegmentMeta = & pb . SegmentMeta {
EncryptedKey : entry . EncryptedMetadataEncryptedKey ,
2021-10-12 14:37:12 +01:00
KeyNonce : entry . EncryptedMetadataNonce ,
2021-08-02 19:30:02 +01:00
}
2020-12-02 11:34:41 +00:00
}
2020-11-03 12:58:27 +00:00
2021-08-02 19:30:02 +01:00
metadataBytes , err := pb . Marshal ( streamMeta )
if err != nil {
return nil , err
}
2020-12-02 11:34:41 +00:00
2021-08-02 19:30:02 +01:00
item . EncryptedMetadata = metadataBytes
2021-10-12 14:37:12 +01:00
item . EncryptedMetadataNonce = nonce
2021-12-17 13:31:01 +00:00
item . EncryptedMetadataEncryptedKey = entry . EncryptedMetadataEncryptedKey
2020-12-02 11:34:41 +00:00
}
// Add Stream ID to list items if listing is for pending objects.
// The client requires the Stream ID to use in the MultipartInfo.
if entry . Status == metabase . Pending {
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-08-25 20:00:55 +01:00
Bucket : bucket ,
EncryptedObjectKey : append ( [ ] byte ( prefixToPrependInSatStreamID ) , item . EncryptedPath ... ) ,
Version : item . Version ,
CreationDate : item . CreatedAt ,
ExpirationDate : item . ExpiresAt ,
StreamId : entry . StreamID [ : ] ,
MultipartObject : entry . FixedSegmentSize <= 0 ,
2021-03-24 09:33:56 +00:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( entry . Encryption . CipherSuite ) ,
BlockSize : int64 ( entry . Encryption . BlockSize ) ,
} ,
2021-10-27 09:50:27 +01:00
Placement : int32 ( placement ) ,
2020-12-02 11:34:41 +00:00
} )
if err != nil {
return nil , err
}
item . StreamId = & satStreamID
}
2020-11-03 12:58:27 +00:00
2020-12-02 11:34:41 +00:00
return item , nil
2020-11-03 12:58:27 +00:00
}
// groupPiecesByNodeID returns a map that contains pieces with node id as the key.
func groupPiecesByNodeID ( segments [ ] metabase . DeletedSegmentInfo ) map [ storj . NodeID ] [ ] storj . PieceID {
piecesToDelete := map [ storj . NodeID ] [ ] storj . PieceID { }
for _ , segment := range segments {
for _ , piece := range segment . Pieces {
pieceID := segment . RootPieceID . Derive ( piece . StorageNode , int32 ( piece . Number ) )
piecesToDelete [ piece . StorageNode ] = append ( piecesToDelete [ piece . StorageNode ] , pieceID )
}
}
return piecesToDelete
2020-01-17 18:47:37 +00:00
}
2020-01-28 13:44:47 +00:00
2020-06-10 15:10:44 +01:00
// RevokeAPIKey handles requests to revoke an api key.
func ( endpoint * Endpoint ) RevokeAPIKey ( ctx context . Context , req * pb . RevokeAPIKeyRequest ) ( resp * pb . RevokeAPIKeyResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-06-10 15:10:44 +01:00
macToRevoke , err := macaroon . ParseMacaroon ( req . GetApiKey ( ) )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "API key to revoke is not a macaroon" )
}
keyInfo , err := endpoint . validateRevoke ( ctx , req . Header , macToRevoke )
if err != nil {
return nil , err
}
err = endpoint . revocations . Revoke ( ctx , macToRevoke . Tail ( ) , keyInfo . ID [ : ] )
if err != nil {
endpoint . log . Error ( "Failed to revoke API key" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , "Failed to revoke API key" )
}
return & pb . RevokeAPIKeyResponse { } , nil
2020-06-16 14:03:02 +01:00
}
2020-09-03 14:54:56 +01:00
2020-12-22 11:12:07 +00:00
func ( endpoint * Endpoint ) checkExceedsStorageUsage ( ctx context . Context , projectID uuid . UUID ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
exceeded , limit , err := endpoint . projectUsage . ExceedsStorageUsage ( ctx , projectID )
if err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2020-12-22 11:12:07 +00:00
endpoint . log . Error (
"Retrieving project storage totals failed; storage usage limit won't be enforced" ,
2021-11-11 20:04:48 +00:00
zap . Stringer ( "Project ID" , projectID ) ,
2020-12-22 11:12:07 +00:00
zap . Error ( err ) ,
)
} else if exceeded {
2021-11-11 12:50:14 +00:00
endpoint . log . Warn ( "Monthly storage limit exceeded" ,
2020-12-22 11:12:07 +00:00
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , projectID ) ,
)
return rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
}
return nil
}
2021-08-16 13:04:33 +01:00
// Server side move.
// BeginMoveObject begins moving object to different key.
func ( endpoint * Endpoint ) BeginMoveObject ( ctx context . Context , req * pb . ObjectBeginMoveRequest ) ( resp * pb . ObjectBeginMoveResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
now := time . Now ( )
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . NewBucket ,
EncryptedPath : req . NewEncryptedObjectKey ,
Time : now ,
} ,
} ,
)
if err != nil {
return nil , err
}
for _ , bucket := range [ ] [ ] byte { req . Bucket , req . NewBucket } {
err = endpoint . validateBucket ( ctx , bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
}
// we are verifying existence of target bucket only because source bucket
// will be checked while quering source object
// TODO this needs to be optimized to avoid DB call on each request
2021-10-27 09:50:27 +01:00
newBucketPlacement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . NewBucket , keyInfo . ProjectID )
2021-08-16 13:04:33 +01:00
if err != nil {
2021-10-27 09:50:27 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
2021-12-07 11:11:03 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . NewBucket )
2021-10-27 09:50:27 +01:00
}
2021-08-16 13:04:33 +01:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-10-27 09:50:27 +01:00
// if source and target buckets are different, we need to check their geofencing configs
if ! bytes . Equal ( req . Bucket , req . NewBucket ) {
oldBucketPlacement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
if err != nil {
if storj . ErrBucketNotFound . Has ( err ) {
2021-12-07 11:11:03 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
if oldBucketPlacement != newBucketPlacement {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "moving object to bucket with different placement policy is not (yet) supported" )
}
}
2021-08-16 13:04:33 +01:00
result , err := endpoint . metabase . BeginMoveObject ( ctx , metabase . BeginMoveObject {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
} ,
Version : metabase . DefaultVersion ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-08-16 13:04:33 +01:00
}
response , err := convertBeginMoveObjectResults ( result )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-08-25 20:00:55 +01:00
Bucket : req . Bucket ,
EncryptedObjectKey : req . EncryptedObjectKey ,
Version : int32 ( metabase . DefaultVersion ) ,
StreamId : result . StreamID [ : ] ,
2021-08-16 13:04:33 +01:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( result . EncryptionParameters . CipherSuite ) ,
BlockSize : int64 ( result . EncryptionParameters . BlockSize ) ,
} ,
2021-10-27 09:50:27 +01:00
Placement : int32 ( newBucketPlacement ) ,
2021-08-16 13:04:33 +01:00
} )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
response . StreamId = satStreamID
return response , nil
}
func convertBeginMoveObjectResults ( result metabase . BeginMoveObjectResult ) ( * pb . ObjectBeginMoveResponse , error ) {
keys := make ( [ ] * pb . EncryptedKeyAndNonce , len ( result . EncryptedKeysNonces ) )
for i , key := range result . EncryptedKeysNonces {
2021-10-29 12:04:55 +01:00
var nonce storj . Nonce
var err error
if len ( key . EncryptedKeyNonce ) != 0 {
nonce , err = storj . NonceFromBytes ( key . EncryptedKeyNonce )
if err != nil {
return nil , err
}
2021-10-12 14:37:12 +01:00
}
2021-08-16 13:04:33 +01:00
keys [ i ] = & pb . EncryptedKeyAndNonce {
2021-09-20 09:06:36 +01:00
Position : & pb . SegmentPosition {
PartNumber : int32 ( key . Position . Part ) ,
Index : int32 ( key . Position . Index ) ,
} ,
2021-08-16 13:04:33 +01:00
EncryptedKey : key . EncryptedKey ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : nonce ,
2021-08-16 13:04:33 +01:00
}
}
// TODO we need this becase of an uplink issue with how we are storing key and nonce
if result . EncryptedMetadataKey == nil {
streamMeta := & pb . StreamMeta { }
err := pb . Unmarshal ( result . EncryptedMetadata , streamMeta )
if err != nil {
return nil , err
}
if streamMeta . LastSegmentMeta != nil {
result . EncryptedMetadataKey = streamMeta . LastSegmentMeta . EncryptedKey
2021-10-12 14:37:12 +01:00
result . EncryptedMetadataKeyNonce = streamMeta . LastSegmentMeta . KeyNonce
2021-08-16 13:04:33 +01:00
}
}
2021-10-29 12:04:55 +01:00
var metadataNonce storj . Nonce
var err error
if len ( result . EncryptedMetadataKeyNonce ) != 0 {
metadataNonce , err = storj . NonceFromBytes ( result . EncryptedMetadataKeyNonce )
if err != nil {
return nil , err
}
2021-10-12 14:37:12 +01:00
}
2021-10-29 12:04:55 +01:00
2021-08-16 13:04:33 +01:00
return & pb . ObjectBeginMoveResponse {
EncryptedMetadataKey : result . EncryptedMetadataKey ,
2021-10-12 14:37:12 +01:00
EncryptedMetadataKeyNonce : metadataNonce ,
2021-08-16 13:04:33 +01:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( result . EncryptionParameters . CipherSuite ) ,
BlockSize : int64 ( result . EncryptionParameters . BlockSize ) ,
} ,
SegmentKeys : keys ,
} , nil
}
2021-08-31 12:44:18 +01:00
// FinishMoveObject accepts new encryption keys for moved object and updates the corresponding object ObjectKey and segments EncryptedKey.
func ( endpoint * Endpoint ) FinishMoveObject ( ctx context . Context , req * pb . ObjectFinishMoveRequest ) ( resp * pb . ObjectFinishMoveResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionWrite ,
Time : time . Now ( ) ,
Bucket : req . NewBucket ,
EncryptedPath : req . NewEncryptedObjectKey ,
} )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . Unauthenticated , err . Error ( ) )
}
2021-09-22 08:50:24 +01:00
err = endpoint . validateBucket ( ctx , req . NewBucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
exists , err := endpoint . buckets . HasBucket ( ctx , req . NewBucket , keyInfo . ProjectID )
if err != nil {
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
} else if ! exists {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "target bucket not found: %s" , req . NewBucket )
2021-09-22 08:50:24 +01:00
}
2021-08-31 12:44:18 +01:00
streamUUID , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
err = endpoint . metabase . FinishMoveObject ( ctx , metabase . FinishMoveObject {
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2021-08-31 12:44:18 +01:00
Version : metabase . DefaultVersion ,
StreamID : streamUUID ,
} ,
NewSegmentKeys : protobufkeysToMetabase ( req . NewSegmentKeys ) ,
2021-09-27 09:41:13 +01:00
NewBucket : string ( req . NewBucket ) ,
2021-08-31 12:44:18 +01:00
NewEncryptedObjectKey : req . NewEncryptedObjectKey ,
2021-10-12 14:37:12 +01:00
NewEncryptedMetadataKeyNonce : req . NewEncryptedMetadataKeyNonce [ : ] ,
2021-08-31 12:44:18 +01:00
NewEncryptedMetadataKey : req . NewEncryptedMetadataKey ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-08-31 12:44:18 +01:00
}
return & pb . ObjectFinishMoveResponse { } , nil
}
2021-10-22 09:42:52 +01:00
// convertMetabaseErr converts domain errors from metabase to appropriate rpc statuses errors.
func ( endpoint * Endpoint ) convertMetabaseErr ( err error ) error {
switch {
case storj . ErrObjectNotFound . Has ( err ) :
return rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
case metabase . ErrSegmentNotFound . Has ( err ) :
return rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
case metabase . ErrInvalidRequest . Has ( err ) :
return rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
default :
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
}
2021-08-31 12:44:18 +01:00
// protobufkeysToMetabase converts []*pb.EncryptedKeyAndNonce to []metabase.EncryptedKeyAndNonce.
func protobufkeysToMetabase ( protoKeys [ ] * pb . EncryptedKeyAndNonce ) [ ] metabase . EncryptedKeyAndNonce {
keys := make ( [ ] metabase . EncryptedKeyAndNonce , len ( protoKeys ) )
for i , key := range protoKeys {
position := metabase . SegmentPosition { }
if key . Position != nil {
position = metabase . SegmentPosition {
Part : uint32 ( key . Position . PartNumber ) ,
Index : uint32 ( key . Position . Index ) ,
}
}
keys [ i ] = metabase . EncryptedKeyAndNonce {
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : key . EncryptedKeyNonce . Bytes ( ) ,
2021-08-31 12:44:18 +01:00
EncryptedKey : key . EncryptedKey ,
Position : position ,
}
}
return keys
}