2019-03-18 10:55:06 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"context"
2019-06-24 18:15:45 +01:00
"crypto/sha256"
2019-10-17 19:01:40 +01:00
"fmt"
2019-04-02 19:21:18 +01:00
"time"
2019-03-18 10:55:06 +00:00
2019-11-08 20:40:39 +00:00
"github.com/spacemonkeygo/monkit/v3"
2019-03-18 10:55:06 +00:00
"github.com/zeebo/errs"
"go.uber.org/zap"
2020-08-11 14:00:57 +01:00
"storj.io/common/context2"
2020-04-09 09:19:16 +01:00
"storj.io/common/encryption"
2021-08-12 17:26:43 +01:00
"storj.io/common/lrucache"
2020-05-29 14:31:26 +01:00
"storj.io/common/macaroon"
2020-06-01 21:07:31 +01:00
"storj.io/common/memory"
2019-12-27 11:48:47 +00:00
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/signing"
"storj.io/common/storj"
2020-03-30 10:08:50 +01:00
"storj.io/common/uuid"
2019-07-28 06:55:36 +01:00
"storj.io/storj/satellite/accounting"
2019-06-21 20:14:34 +01:00
"storj.io/storj/satellite/attribution"
2019-03-18 10:55:06 +00:00
"storj.io/storj/satellite/console"
2020-10-29 16:16:25 +00:00
"storj.io/storj/satellite/internalpb"
2021-04-21 13:42:57 +01:00
"storj.io/storj/satellite/metabase"
2020-03-12 07:03:46 +00:00
"storj.io/storj/satellite/metainfo/piecedeletion"
2020-03-18 13:24:31 +00:00
"storj.io/storj/satellite/metainfo/pointerverification"
2019-03-27 10:24:35 +00:00
"storj.io/storj/satellite/orders"
2019-07-28 06:55:36 +01:00
"storj.io/storj/satellite/overlay"
2020-06-03 14:51:02 +01:00
"storj.io/storj/satellite/revocation"
2019-11-26 11:12:37 +00:00
"storj.io/storj/satellite/rewards"
2020-02-21 14:07:29 +00:00
"storj.io/uplink/private/eestream"
2019-03-18 10:55:06 +00:00
)
2019-07-16 11:39:23 +01:00
const (
2020-03-23 13:45:46 +00:00
satIDExpiration = 48 * time . Hour
2020-01-07 18:34:43 +00:00
deleteObjectPiecesSuccessThreshold = 0.75
2019-07-16 11:39:23 +01:00
)
2019-07-03 17:14:37 +01:00
2019-03-18 10:55:06 +00:00
var (
mon = monkit . Package ( )
2020-08-11 15:50:01 +01:00
// Error general metainfo error.
2021-04-28 09:06:17 +01:00
Error = errs . Class ( "metainfo" )
2020-08-11 15:50:01 +01:00
// ErrNodeAlreadyExists pointer already has a piece for a node err.
2021-04-28 09:06:17 +01:00
ErrNodeAlreadyExists = errs . Class ( "metainfo: node already exists" )
2019-03-18 10:55:06 +00:00
)
2020-06-30 22:49:29 +01:00
// APIKeys is api keys store methods used by endpoint.
2019-09-10 14:24:16 +01:00
//
// architecture: Database
2019-03-18 10:55:06 +00:00
type APIKeys interface {
2019-05-24 17:51:27 +01:00
GetByHead ( ctx context . Context , head [ ] byte ) ( * console . APIKeyInfo , error )
}
2019-12-11 18:46:41 +00:00
// Endpoint metainfo endpoint.
2019-09-10 14:24:16 +01:00
//
// architecture: Endpoint
2019-03-18 10:55:06 +00:00
type Endpoint struct {
2021-03-29 09:58:04 +01:00
pb . DRPCMetainfoUnimplementedServer
2020-04-09 09:19:16 +01:00
log * zap . Logger
metainfo * Service
deletePieces * piecedeletion . Service
orders * orders . Service
overlay * overlay . Service
attributions attribution . DB
partners * rewards . PartnersService
pointerVerification * pointerverification . Service
projectUsage * accounting . Service
projects console . Projects
apiKeys APIKeys
satellite signing . Signer
limiterCache * lrucache . ExpiringLRU
encInlineSegmentSize int64 // max inline segment size + encryption overhead
2020-06-03 14:51:02 +01:00
revocations revocation . DB
2020-11-10 11:56:30 +00:00
defaultRS * pb . RedundancyScheme
2020-04-09 09:19:16 +01:00
config Config
2021-02-09 22:40:23 +00:00
versionCollector * versionCollector
2019-03-18 10:55:06 +00:00
}
2019-12-11 17:44:13 +00:00
// NewEndpoint creates new metainfo endpoint instance.
2020-03-12 07:03:46 +00:00
func NewEndpoint ( log * zap . Logger , metainfo * Service , deletePieces * piecedeletion . Service ,
2020-01-09 17:09:22 +00:00
orders * orders . Service , cache * overlay . Service , attributions attribution . DB ,
partners * rewards . PartnersService , peerIdentities overlay . PeerIdentities ,
2020-01-17 15:01:36 +00:00
apiKeys APIKeys , projectUsage * accounting . Service , projects console . Projects ,
2020-06-03 14:51:02 +01:00
satellite signing . Signer , revocations revocation . DB , config Config ) ( * Endpoint , error ) {
2019-03-18 10:55:06 +00:00
// TODO do something with too many params
2020-04-09 09:19:16 +01:00
encInlineSegmentSize , err := encryption . CalcEncryptedSize ( config . MaxInlineSegmentSize . Int64 ( ) , storj . EncryptionParameters {
CipherSuite : storj . EncAESGCM ,
BlockSize : 128 , // intentionally low block size to allow maximum possible encryption overhead
} )
if err != nil {
return nil , err
}
2020-11-10 11:56:30 +00:00
defaultRSScheme := & pb . RedundancyScheme {
Type : pb . RedundancyScheme_RS ,
MinReq : int32 ( config . RS . Min ) ,
RepairThreshold : int32 ( config . RS . Repair ) ,
SuccessThreshold : int32 ( config . RS . Success ) ,
Total : int32 ( config . RS . Total ) ,
ErasureShareSize : config . RS . ErasureShareSize . Int32 ( ) ,
}
2019-03-18 10:55:06 +00:00
return & Endpoint {
2020-03-18 13:24:31 +00:00
log : log ,
metainfo : metainfo ,
deletePieces : deletePieces ,
orders : orders ,
overlay : cache ,
attributions : attributions ,
partners : partners ,
pointerVerification : pointerverification . NewService ( peerIdentities ) ,
apiKeys : apiKeys ,
projectUsage : projectUsage ,
projects : projects ,
satellite : satellite ,
2020-01-29 15:22:22 +00:00
limiterCache : lrucache . New ( lrucache . Options {
2020-04-01 10:15:24 +01:00
Capacity : config . RateLimiter . CacheCapacity ,
Expiration : config . RateLimiter . CacheExpiration ,
2020-01-29 15:22:22 +00:00
} ) ,
2020-04-09 09:19:16 +01:00
encInlineSegmentSize : encInlineSegmentSize ,
2020-06-03 14:51:02 +01:00
revocations : revocations ,
2020-11-10 11:56:30 +00:00
defaultRS : defaultRSScheme ,
2020-04-09 09:19:16 +01:00
config : config ,
2021-02-09 22:40:23 +00:00
versionCollector : newVersionCollector ( ) ,
2020-04-09 09:19:16 +01:00
} , nil
2019-03-18 10:55:06 +00:00
}
2020-06-30 22:49:29 +01:00
// Close closes resources.
2019-03-18 10:55:06 +00:00
func ( endpoint * Endpoint ) Close ( ) error { return nil }
2020-11-06 11:54:52 +00:00
func calculateSpaceUsed ( segmentSize int64 , numberOfPieces int , rs storj . RedundancyScheme ) ( totalStored int64 ) {
pieceSize := segmentSize / int64 ( rs . RequiredShares )
return pieceSize * int64 ( numberOfPieces )
2019-05-10 02:39:21 +01:00
}
2020-06-30 22:49:29 +01:00
// ProjectInfo returns allowed ProjectInfo for the provided API key.
2019-06-24 18:15:45 +01:00
func ( endpoint * Endpoint ) ProjectInfo ( ctx context . Context , req * pb . ProjectInfoRequest ) ( _ * pb . ProjectInfoResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-06-24 18:15:45 +01:00
Op : macaroon . ActionProjectInfo ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-06-24 18:15:45 +01:00
}
salt := sha256 . Sum256 ( keyInfo . ProjectID [ : ] )
return & pb . ProjectInfoResponse {
ProjectSalt : salt [ : ] ,
} , nil
}
2019-07-01 23:17:30 +01:00
2020-06-30 22:49:29 +01:00
// GetBucket returns a bucket.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) GetBucket ( ctx context . Context , req * pb . BucketGetRequest ) ( resp * pb . BucketGetResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionRead ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
bucket , err := endpoint . metainfo . GetBucket ( ctx , req . GetName ( ) , keyInfo . ProjectID )
if err != nil {
2019-07-12 13:57:02 +01:00
if storj . ErrBucketNotFound . Has ( err ) {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
2019-07-12 13:57:02 +01:00
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2020-01-28 13:44:47 +00:00
// override RS to fit satellite settings
2020-11-10 11:56:30 +00:00
convBucket , err := convertBucketToProto ( bucket , endpoint . defaultRS )
2019-07-19 16:17:34 +01:00
if err != nil {
return resp , err
}
2019-07-08 23:32:18 +01:00
return & pb . BucketGetResponse {
2019-07-19 16:17:34 +01:00
Bucket : convBucket ,
2019-07-08 23:32:18 +01:00
} , nil
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// CreateBucket creates a new bucket.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) CreateBucket ( ctx context . Context , req * pb . BucketCreateRequest ) ( resp * pb . BucketCreateResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : req . Name ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
err = endpoint . validateBucket ( ctx , req . Name )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2019-07-19 16:17:34 +01:00
// checks if bucket exists before updates it or makes a new entry
2021-04-02 17:19:17 +01:00
exists , err := endpoint . metainfo . HasBucket ( ctx , req . GetName ( ) , keyInfo . ProjectID )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-04-02 17:19:17 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
} else if exists {
2020-04-14 12:50:50 +01:00
// When the bucket exists, try to set the attribution.
2020-07-24 10:40:17 +01:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . GetName ( ) ) ; err != nil {
2020-04-14 12:50:50 +01:00
return nil , err
}
2019-11-26 11:12:37 +00:00
return nil , rpcstatus . Error ( rpcstatus . AlreadyExists , "bucket already exists" )
}
2019-07-08 23:32:18 +01:00
2020-06-30 22:49:29 +01:00
// check if project has exceeded its allocated bucket limit
maxBuckets , err := endpoint . projects . GetMaxBuckets ( ctx , keyInfo . ProjectID )
if err != nil {
return nil , err
}
2020-09-06 00:02:12 +01:00
if maxBuckets == nil {
defaultMaxBuckets := endpoint . config . ProjectLimits . MaxBuckets
maxBuckets = & defaultMaxBuckets
2020-06-30 22:49:29 +01:00
}
bucketCount , err := endpoint . metainfo . CountBuckets ( ctx , keyInfo . ProjectID )
if err != nil {
return nil , err
}
2020-09-06 00:02:12 +01:00
if bucketCount >= * maxBuckets {
2020-06-30 22:49:29 +01:00
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , fmt . Sprintf ( "number of allocated buckets (%d) exceeded" , endpoint . config . ProjectLimits . MaxBuckets ) )
}
bucketReq , err := convertProtoToBucket ( req , keyInfo . ProjectID )
2019-11-26 11:12:37 +00:00
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2019-07-19 16:17:34 +01:00
2020-06-30 22:49:29 +01:00
bucket , err := endpoint . metainfo . CreateBucket ( ctx , bucketReq )
2019-11-26 11:12:37 +00:00
if err != nil {
2020-06-30 22:49:29 +01:00
endpoint . log . Error ( "error while creating bucket" , zap . String ( "bucketName" , bucketReq . Name ) , zap . Error ( err ) )
2019-11-26 11:12:37 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , "unable to create bucket" )
}
2019-07-19 16:17:34 +01:00
2020-04-14 12:50:50 +01:00
// Once we have created the bucket, we can try setting the attribution.
2020-07-24 10:40:17 +01:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . GetName ( ) ) ; err != nil {
2020-04-14 12:50:50 +01:00
return nil , err
}
2020-01-28 13:44:47 +00:00
// override RS to fit satellite settings
2020-11-10 11:56:30 +00:00
convBucket , err := convertBucketToProto ( bucket , endpoint . defaultRS )
2019-11-26 11:12:37 +00:00
if err != nil {
endpoint . log . Error ( "error while converting bucket to proto" , zap . String ( "bucketName" , bucket . Name ) , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , "unable to create bucket" )
2019-07-19 16:17:34 +01:00
}
2019-11-26 11:12:37 +00:00
return & pb . BucketCreateResponse {
Bucket : convBucket ,
} , nil
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// DeleteBucket deletes a bucket.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) DeleteBucket ( ctx context . Context , req * pb . BucketDeleteRequest ) ( resp * pb . BucketDeleteResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-08 23:32:18 +01:00
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-03-11 15:53:16 +00:00
now := time . Now ( )
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionDelete ,
Bucket : req . Name ,
2020-03-11 15:53:16 +00:00
Time : now ,
2019-07-08 23:32:18 +01:00
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
err = endpoint . validateBucket ( ctx , req . Name )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2020-03-11 15:53:16 +00:00
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Name ,
Time : now ,
} )
canRead := err == nil
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Name ,
Time : now ,
} )
canList := err == nil
2020-08-06 02:23:45 +01:00
var (
bucket storj . Bucket
convBucket * pb . Bucket
)
2020-03-11 15:53:16 +00:00
if canRead || canList {
2020-08-06 02:23:45 +01:00
// Info about deleted bucket is returned only if either Read, or List permission is granted.
2020-03-11 15:53:16 +00:00
bucket , err = endpoint . metainfo . GetBucket ( ctx , req . Name , keyInfo . ProjectID )
if err != nil {
if storj . ErrBucketNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
return nil , err
}
2020-08-06 02:23:45 +01:00
2020-11-10 11:56:30 +00:00
convBucket , err = convertBucketToProto ( bucket , endpoint . defaultRS )
2020-08-06 02:23:45 +01:00
if err != nil {
return nil , err
}
2020-03-11 15:53:16 +00:00
}
2019-07-08 23:32:18 +01:00
err = endpoint . metainfo . DeleteBucket ( ctx , req . Name , keyInfo . ProjectID )
if err != nil {
2020-03-11 15:53:16 +00:00
if ! canRead && ! canList {
2020-08-06 02:23:45 +01:00
// No error info is returned if neither Read, nor List permission is granted.
2020-04-02 08:45:51 +01:00
return & pb . BucketDeleteResponse { } , nil
2020-03-11 15:53:16 +00:00
}
2020-02-14 12:52:00 +00:00
if ErrBucketNotEmpty . Has ( err ) {
2020-08-06 02:23:45 +01:00
// List permission is required to delete all objects in a bucket.
if ! req . GetDeleteAll ( ) || ! canList {
return nil , rpcstatus . Error ( rpcstatus . FailedPrecondition , err . Error ( ) )
}
_ , deletedObjCount , err := endpoint . deleteBucketNotEmpty ( ctx , keyInfo . ProjectID , req . Name )
if err != nil {
return nil , err
}
2020-12-18 11:33:28 +00:00
return & pb . BucketDeleteResponse { Bucket : convBucket , DeletedObjectsCount : deletedObjCount } , nil
2020-08-06 02:23:45 +01:00
}
if storj . ErrBucketNotFound . Has ( err ) {
return & pb . BucketDeleteResponse { Bucket : convBucket } , nil
2020-02-14 12:52:00 +00:00
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-08 23:32:18 +01:00
}
2020-08-06 02:23:45 +01:00
return & pb . BucketDeleteResponse { Bucket : convBucket } , nil
}
2020-11-17 17:53:24 +00:00
// deleteBucketNotEmpty deletes all objects from bucket and deletes this bucket.
// On success, it returns only the number of deleted objects.
2020-12-18 11:33:28 +00:00
func ( endpoint * Endpoint ) deleteBucketNotEmpty ( ctx context . Context , projectID uuid . UUID , bucketName [ ] byte ) ( [ ] byte , int64 , error ) {
2020-11-17 17:53:24 +00:00
deletedCount , err := endpoint . deleteBucketObjects ( ctx , projectID , bucketName )
2020-03-11 15:53:16 +00:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-06 02:23:45 +01:00
return nil , 0 , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-03-11 15:53:16 +00:00
2020-08-06 02:23:45 +01:00
err = endpoint . metainfo . DeleteBucket ( ctx , bucketName , projectID )
if err != nil {
if ErrBucketNotEmpty . Has ( err ) {
2020-08-28 19:27:58 +01:00
return nil , deletedCount , rpcstatus . Error ( rpcstatus . FailedPrecondition , "cannot delete the bucket because it's being used by another process" )
2020-08-06 02:23:45 +01:00
}
if storj . ErrBucketNotFound . Has ( err ) {
return bucketName , 0 , nil
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-28 19:27:58 +01:00
return nil , deletedCount , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2020-08-06 02:23:45 +01:00
}
return bucketName , deletedCount , nil
}
2020-11-17 17:53:24 +00:00
// deleteBucketObjects deletes all objects in a bucket.
2020-12-18 11:33:28 +00:00
func ( endpoint * Endpoint ) deleteBucketObjects ( ctx context . Context , projectID uuid . UUID , bucketName [ ] byte ) ( _ int64 , err error ) {
2020-08-06 02:23:45 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-12-09 12:24:37 +00:00
bucketLocation := metabase . BucketLocation { ProjectID : projectID , BucketName : string ( bucketName ) }
2020-12-18 11:33:28 +00:00
deletedObjects , err := endpoint . metainfo . metabaseDB . DeleteBucketObjects ( ctx , metabase . DeleteBucketObjects {
2020-12-09 12:24:37 +00:00
Bucket : bucketLocation ,
DeletePieces : func ( ctx context . Context , deleted [ ] metabase . DeletedSegmentInfo ) error {
endpoint . deleteSegmentPieces ( ctx , deleted )
return nil
} ,
2020-11-17 17:53:24 +00:00
} )
2020-12-09 12:24:37 +00:00
2020-12-18 11:33:28 +00:00
return deletedObjects , Error . Wrap ( err )
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// ListBuckets returns buckets in a project where the bucket name matches the request cursor.
2019-07-08 23:32:18 +01:00
func ( endpoint * Endpoint ) ListBuckets ( ctx context . Context , req * pb . BucketListRequest ) ( resp * pb . BucketListResponse , err error ) {
2019-07-01 23:17:30 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-08 23:32:18 +01:00
action := macaroon . Action {
2020-03-11 15:53:16 +00:00
// TODO: This has to be ActionList, but it seems to be set to
// ActionRead as a hacky workaround to make bucket listing possible.
2019-07-08 23:32:18 +01:00
Op : macaroon . ActionRead ,
Time : time . Now ( ) ,
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , action )
2019-07-08 23:32:18 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-08 23:32:18 +01:00
}
2019-09-19 17:19:29 +01:00
allowedBuckets , err := getAllowedBuckets ( ctx , req . Header , action )
2019-07-08 23:32:18 +01:00
if err != nil {
return nil , err
}
listOpts := storj . BucketListOptions {
2019-07-12 13:57:02 +01:00
Cursor : string ( req . Cursor ) ,
Limit : int ( req . Limit ) ,
Direction : storj . ListDirection ( req . Direction ) ,
2019-07-08 23:32:18 +01:00
}
bucketList , err := endpoint . metainfo . ListBuckets ( ctx , keyInfo . ProjectID , listOpts , allowedBuckets )
if err != nil {
return nil , err
}
bucketItems := make ( [ ] * pb . BucketListItem , len ( bucketList . Items ) )
for i , item := range bucketList . Items {
bucketItems [ i ] = & pb . BucketListItem {
Name : [ ] byte ( item . Name ) ,
CreatedAt : item . Created ,
}
}
return & pb . BucketListResponse {
Items : bucketItems ,
More : bucketList . More ,
} , nil
2019-07-01 23:17:30 +01:00
}
2020-06-30 22:49:29 +01:00
// CountBuckets returns the number of buckets a project currently has.
// TODO: add this to the uplink client side.
func ( endpoint * Endpoint ) CountBuckets ( ctx context . Context , projectID uuid . UUID ) ( count int , err error ) {
count , err = endpoint . metainfo . CountBuckets ( ctx , projectID )
if err != nil {
return 0 , err
}
return count , nil
}
2019-09-19 17:19:29 +01:00
func getAllowedBuckets ( ctx context . Context , header * pb . RequestHeader , action macaroon . Action ) ( _ macaroon . AllowedBuckets , err error ) {
key , err := getAPIKey ( ctx , header )
2019-07-08 23:32:18 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return macaroon . AllowedBuckets { } , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "Invalid API credentials: %v" , err )
2019-07-08 23:32:18 +01:00
}
2019-07-12 13:57:02 +01:00
allowedBuckets , err := key . GetAllowedBuckets ( ctx , action )
2019-07-08 23:32:18 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return macaroon . AllowedBuckets { } , rpcstatus . Errorf ( rpcstatus . Internal , "GetAllowedBuckets: %v" , err )
2019-07-08 23:32:18 +01:00
}
return allowedBuckets , err
}
2019-07-19 16:17:34 +01:00
func convertProtoToBucket ( req * pb . BucketCreateRequest , projectID uuid . UUID ) ( bucket storj . Bucket , err error ) {
2019-07-08 23:32:18 +01:00
bucketID , err := uuid . New ( )
if err != nil {
return storj . Bucket { } , err
}
defaultRS := req . GetDefaultRedundancyScheme ( )
defaultEP := req . GetDefaultEncryptionParameters ( )
2019-07-19 16:17:34 +01:00
2019-11-26 11:12:37 +00:00
// TODO: resolve partner id
2019-07-19 16:17:34 +01:00
var partnerID uuid . UUID
err = partnerID . UnmarshalJSON ( req . GetPartnerId ( ) )
// bucket's partnerID should never be set
// it is always read back from buckets DB
if err != nil && ! partnerID . IsZero ( ) {
return bucket , errs . New ( "Invalid uuid" )
}
2019-07-08 23:32:18 +01:00
return storj . Bucket {
2020-04-02 15:18:08 +01:00
ID : bucketID ,
2019-07-08 23:32:18 +01:00
Name : string ( req . GetName ( ) ) ,
2020-04-02 15:18:08 +01:00
ProjectID : projectID ,
PartnerID : partnerID ,
2019-07-08 23:32:18 +01:00
PathCipher : storj . CipherSuite ( req . GetPathCipher ( ) ) ,
DefaultSegmentsSize : req . GetDefaultSegmentSize ( ) ,
DefaultRedundancyScheme : storj . RedundancyScheme {
Algorithm : storj . RedundancyAlgorithm ( defaultRS . GetType ( ) ) ,
ShareSize : defaultRS . GetErasureShareSize ( ) ,
RequiredShares : int16 ( defaultRS . GetMinReq ( ) ) ,
RepairShares : int16 ( defaultRS . GetRepairThreshold ( ) ) ,
OptimalShares : int16 ( defaultRS . GetSuccessThreshold ( ) ) ,
TotalShares : int16 ( defaultRS . GetTotal ( ) ) ,
} ,
DefaultEncryptionParameters : storj . EncryptionParameters {
CipherSuite : storj . CipherSuite ( defaultEP . CipherSuite ) ,
BlockSize : int32 ( defaultEP . BlockSize ) ,
} ,
} , nil
}
2020-06-30 22:49:29 +01:00
func convertBucketToProto ( bucket storj . Bucket , rs * pb . RedundancyScheme ) ( pbBucket * pb . Bucket , err error ) {
2020-03-11 15:53:16 +00:00
if bucket == ( storj . Bucket { } ) {
return nil , nil
}
2019-07-19 16:17:34 +01:00
partnerID , err := bucket . PartnerID . MarshalJSON ( )
if err != nil {
2019-09-19 05:46:39 +01:00
return pbBucket , rpcstatus . Error ( rpcstatus . Internal , "UUID marshal error" )
2019-07-19 16:17:34 +01:00
}
2020-03-04 17:38:52 +00:00
pbBucket = & pb . Bucket {
2020-01-28 13:44:47 +00:00
Name : [ ] byte ( bucket . Name ) ,
2020-03-04 17:38:52 +00:00
PathCipher : pb . CipherSuite ( bucket . PathCipher ) ,
2020-01-28 13:44:47 +00:00
PartnerId : partnerID ,
CreatedAt : bucket . Created ,
DefaultSegmentSize : bucket . DefaultSegmentsSize ,
DefaultRedundancyScheme : rs ,
2019-07-08 23:32:18 +01:00
DefaultEncryptionParameters : & pb . EncryptionParameters {
2020-03-04 17:38:52 +00:00
CipherSuite : pb . CipherSuite ( bucket . DefaultEncryptionParameters . CipherSuite ) ,
2019-07-08 23:32:18 +01:00
BlockSize : int64 ( bucket . DefaultEncryptionParameters . BlockSize ) ,
} ,
2020-03-04 17:38:52 +00:00
}
// this part is to provide default ciphers (path and encryption) for old uplinks
// new uplinks are using ciphers from encryption access
if pbBucket . PathCipher == pb . CipherSuite_ENC_UNSPECIFIED {
pbBucket . PathCipher = pb . CipherSuite_ENC_AESGCM
}
if pbBucket . DefaultEncryptionParameters . CipherSuite == pb . CipherSuite_ENC_UNSPECIFIED {
pbBucket . DefaultEncryptionParameters . CipherSuite = pb . CipherSuite_ENC_AESGCM
pbBucket . DefaultEncryptionParameters . BlockSize = int64 ( rs . ErasureShareSize * rs . MinReq )
}
return pbBucket , nil
2019-07-01 23:17:30 +01:00
}
2019-07-16 11:39:23 +01:00
2020-06-30 22:49:29 +01:00
// BeginObject begins object.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) BeginObject ( ctx context . Context , req * pb . ObjectBeginRequest ) ( resp * pb . ObjectBeginResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2020-01-03 09:27:10 +00:00
if ! req . ExpiresAt . IsZero ( ) && ! req . ExpiresAt . After ( time . Now ( ) ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "Invalid expiration time" )
}
2020-06-15 12:49:09 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2020-03-16 08:55:52 +00:00
}
// TODO this needs to be optimized to avoid DB call on each request
2021-04-02 17:19:17 +01:00
exists , err := endpoint . metainfo . HasBucket ( ctx , req . Bucket , keyInfo . ProjectID )
2020-03-16 08:55:52 +00:00
if err != nil {
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2021-04-02 17:19:17 +01:00
} else if ! exists {
return nil , rpcstatus . Error ( rpcstatus . NotFound , "bucket not found: non-existing-bucket" )
2020-03-16 08:55:52 +00:00
}
2020-07-31 12:24:40 +01:00
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
canDelete := err == nil
if canDelete {
2021-01-11 10:08:18 +00:00
_ , err = endpoint . DeleteObjectAnyStatus ( ctx , metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} )
if err != nil && ! storj . ErrObjectNotFound . Has ( err ) {
2020-07-31 12:24:40 +01:00
return nil , err
}
} else {
2020-12-22 10:44:28 +00:00
_ , err = endpoint . metainfo . metabaseDB . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} ,
} )
2020-07-31 12:24:40 +01:00
if err == nil {
return nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized API credentials" )
}
2020-01-24 13:25:38 +00:00
}
2020-11-06 11:54:52 +00:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . Bucket ) ; err != nil {
return nil , err
}
// use only satellite values for Redundancy Scheme
pbRS := endpoint . defaultRS
streamID , err := uuid . New ( )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
// TODO this will work only with newsest uplink
// figue out what to do with this
encryptionParameters := storj . EncryptionParameters {
CipherSuite : storj . CipherSuite ( req . EncryptionParameters . CipherSuite ) ,
BlockSize : int32 ( req . EncryptionParameters . BlockSize ) , // TODO check conversion
}
2020-11-30 12:33:06 +00:00
var expiresAt * time . Time
if req . ExpiresAt . IsZero ( ) {
expiresAt = nil
} else {
expiresAt = & req . ExpiresAt
}
2021-01-12 11:29:13 +00:00
object , err := endpoint . metainfo . metabaseDB . BeginObjectExactVersion ( ctx , metabase . BeginObjectExactVersion {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
StreamID : streamID ,
Version : metabase . Version ( 1 ) ,
} ,
2020-11-30 12:33:06 +00:00
ExpiresAt : expiresAt ,
2020-11-06 11:54:52 +00:00
Encryption : encryptionParameters ,
} )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-03-24 09:33:56 +00:00
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Version : int32 ( object . Version ) ,
Redundancy : pbRS ,
CreationDate : object . CreatedAt ,
ExpirationDate : req . ExpiresAt ,
StreamId : streamID [ : ] ,
MultipartObject : object . FixedSegmentSize <= 0 ,
EncryptionParameters : req . EncryptionParameters ,
2020-11-06 11:54:52 +00:00
} )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_object" ) . Mark ( 1 )
2020-01-20 18:48:26 +00:00
2019-07-16 11:39:23 +01:00
return & pb . ObjectBeginResponse {
2020-04-06 12:36:34 +01:00
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Version : req . Version ,
2020-11-06 11:54:52 +00:00
StreamId : satStreamID ,
2020-04-06 12:36:34 +01:00
RedundancyScheme : pbRS ,
2019-07-16 11:39:23 +01:00
} , nil
}
2020-06-30 22:49:29 +01:00
// CommitObject commits an object when all its segments have already been committed.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) CommitObject ( ctx context . Context , req * pb . ObjectCommitRequest ) ( resp * pb . ObjectCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-01-27 20:25:52 +00:00
return endpoint . commitObject ( ctx , req , nil )
}
func ( endpoint * Endpoint ) commitObject ( ctx context . Context , req * pb . ObjectCommitRequest , pointer * pb . Pointer ) ( resp * pb . ObjectCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-30 11:22:16 +00:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2020-06-01 21:07:31 +01:00
metadataSize := memory . Size ( len ( req . EncryptedMetadata ) )
if metadataSize > endpoint . config . MaxMetadataSize {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , fmt . Sprintf ( "Metadata is too large, got %v, maximum allowed is %v" , metadataSize , endpoint . config . MaxMetadataSize ) )
}
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
2019-08-01 10:04:31 +01:00
if err != nil {
2020-11-06 11:54:52 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-08-01 10:04:31 +01:00
}
2021-02-10 10:13:14 +00:00
// for old uplinks get Encryption from StreamMeta
streamMeta := & pb . StreamMeta { }
encryption := storj . EncryptionParameters { }
err = pb . Unmarshal ( req . EncryptedMetadata , streamMeta )
if err == nil {
encryption . CipherSuite = storj . CipherSuite ( streamMeta . EncryptionType )
encryption . BlockSize = streamMeta . EncryptionBlockSize
}
2020-11-06 11:54:52 +00:00
_ , err = endpoint . metainfo . metabaseDB . CommitObject ( ctx , metabase . CommitObject {
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( streamID . EncryptedPath ) ,
StreamID : id ,
Version : metabase . Version ( 1 ) ,
} ,
2020-11-17 14:09:04 +00:00
EncryptedMetadata : req . EncryptedMetadata ,
EncryptedMetadataNonce : req . EncryptedMetadataNonce [ : ] ,
EncryptedMetadataEncryptedKey : req . EncryptedMetadataEncryptedKey ,
2021-02-10 10:13:14 +00:00
Encryption : encryption ,
2020-11-06 11:54:52 +00:00
} )
2019-08-01 10:04:31 +01:00
if err != nil {
2020-11-06 11:54:52 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-08-01 10:04:31 +01:00
}
2019-07-16 11:39:23 +01:00
return & pb . ObjectCommitResponse { } , nil
}
2021-02-17 09:54:04 +00:00
// GetObject gets single object metadata.
2019-07-23 12:09:12 +01:00
func ( endpoint * Endpoint ) GetObject ( ctx context . Context , req * pb . ObjectGetRequest ) ( resp * pb . ObjectGetResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-23 12:09:12 +01:00
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-23 12:09:12 +01:00
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-23 12:09:12 +01:00
}
2021-02-17 09:54:04 +00:00
mbObject , err := endpoint . metainfo . metabaseDB . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
2020-11-06 11:54:52 +00:00
ObjectLocation : metabase . ObjectLocation {
2021-02-17 09:54:04 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
2020-11-06 11:54:52 +00:00
} ,
} )
2019-07-23 12:09:12 +01:00
if err != nil {
2020-11-06 11:54:52 +00:00
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-23 12:09:12 +01:00
}
2021-02-17 09:54:04 +00:00
var segmentRS * pb . RedundancyScheme
// TODO we may try to avoid additional request for inline objects
if ! req . RedundancySchemePerSegment && mbObject . SegmentCount > 0 {
segmentRS = endpoint . defaultRS
2021-02-16 15:36:09 +00:00
segment , err := endpoint . metainfo . metabaseDB . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
2021-02-17 09:54:04 +00:00
StreamID : mbObject . StreamID ,
2021-02-16 15:36:09 +00:00
Position : metabase . SegmentPosition {
Index : 0 ,
} ,
} )
if err != nil {
// don't fail because its possible that its multipart object
endpoint . log . Error ( "internal" , zap . Error ( err ) )
} else {
2021-02-17 09:54:04 +00:00
segmentRS = & pb . RedundancyScheme {
2021-02-16 15:36:09 +00:00
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
}
}
2021-02-17 09:54:04 +00:00
// monitor how many uplinks is still using this additional code
mon . Meter ( "req_get_object_rs_per_object" ) . Mark ( 1 )
2021-02-16 15:36:09 +00:00
}
2021-02-17 09:54:04 +00:00
object , err := endpoint . objectToProto ( ctx , mbObject , segmentRS )
2020-11-17 14:09:04 +00:00
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-02-17 09:54:04 +00:00
endpoint . log . Info ( "Object Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_get_object" ) . Mark ( 1 )
return & pb . ObjectGetResponse { Object : object } , nil
2019-07-23 12:09:12 +01:00
}
2021-03-31 12:08:22 +01:00
// DownloadObject gets object information, creates a download for segments and lists the object segments.
func ( endpoint * Endpoint ) DownloadObject ( ctx context . Context , req * pb . ObjectDownloadRequest ) ( resp * pb . ObjectDownloadResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
if exceeded , limit , err := endpoint . projectUsage . ExceedsBandwidthUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
endpoint . log . Error ( "Retrieving project bandwidth total failed; bandwidth limit won't be enforced" , zap . Error ( err ) )
} else if exceeded {
endpoint . log . Error ( "Monthly bandwidth limit exceeded" ,
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
)
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
}
// get the object information
object , err := endpoint . metainfo . metabaseDB . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
} ,
} )
if err != nil {
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
// get the range segments
streamRange , err := calculateStreamRange ( object , req . Range )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
segments , err := endpoint . metainfo . metabaseDB . ListStreamPositions ( ctx , metabase . ListStreamPositions {
StreamID : object . StreamID ,
Range : streamRange ,
Limit : int ( req . Limit ) ,
} )
if err != nil {
if metabase . ErrInvalidRequest . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
// get the download response for the first segment
2021-04-07 07:55:01 +01:00
downloadSegments , err := func ( ) ( [ ] * pb . SegmentDownloadResponse , error ) {
2021-03-31 12:08:22 +01:00
if len ( segments . Segments ) == 0 {
return nil , nil
}
2021-04-09 09:24:18 +01:00
if object . IsMigrated ( ) && streamRange != nil && streamRange . PlainStart > 0 {
return nil , nil
}
2021-03-31 12:08:22 +01:00
segment , err := endpoint . metainfo . metabaseDB . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
StreamID : object . StreamID ,
Position : segments . Segments [ 0 ] . Position ,
} )
if err != nil {
// object was deleted between the steps
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
if metabase . ErrInvalidRequest . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-05-13 14:31:55 +01:00
downloadSizes := endpoint . calculateDownloadSizes ( streamRange , segment , object . Encryption )
2021-04-07 12:17:59 +01:00
// Update the current bandwidth cache value incrementing the SegmentSize.
2021-05-13 14:31:55 +01:00
err = endpoint . projectUsage . UpdateProjectBandwidthUsage ( ctx , keyInfo . ProjectID , downloadSizes . encryptedSize )
2021-04-07 12:17:59 +01:00
if err != nil {
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth limits.
endpoint . log . Error ( "Could not track the new project's bandwidth usage" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . Error ( err ) )
}
2021-03-31 12:08:22 +01:00
encryptedKeyNonce , err := storj . NonceFromBytes ( segment . EncryptedKeyNonce )
if err != nil {
endpoint . log . Error ( "unable to get encryption key nonce from metadata" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
if segment . Inline ( ) {
2021-05-13 14:31:55 +01:00
err := endpoint . orders . UpdateGetInlineOrder ( ctx , object . Location ( ) . Bucket ( ) , downloadSizes . plainSize )
2021-03-31 12:08:22 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-03-31 12:08:22 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
endpoint . log . Info ( "Inline Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "inline" ) )
mon . Meter ( "req_get_inline" ) . Mark ( 1 )
2021-04-07 07:55:01 +01:00
return [ ] * pb . SegmentDownloadResponse { {
2021-03-31 12:08:22 +01:00
PlainOffset : segment . PlainOffset ,
PlainSize : int64 ( segment . PlainSize ) ,
SegmentSize : int64 ( segment . EncryptedSize ) ,
EncryptedInlineData : segment . InlineData ,
EncryptedKeyNonce : encryptedKeyNonce ,
EncryptedKey : segment . EncryptedKey ,
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2021-04-07 07:55:01 +01:00
} } , nil
2021-03-31 12:08:22 +01:00
}
2021-05-13 14:31:55 +01:00
limits , privateKey , err := endpoint . orders . CreateGetOrderLimits ( ctx , object . Location ( ) . Bucket ( ) , segment , downloadSizes . orderLimit )
2021-03-31 12:08:22 +01:00
if err != nil {
if orders . ErrDownloadFailedNotEnoughPieces . Has ( err ) {
endpoint . log . Error ( "Unable to create order limits." ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Stringer ( "API Key ID" , keyInfo . ID ) ,
zap . Error ( err ) ,
)
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-03-31 12:08:22 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
limits = sortLimits ( limits , segment )
// workaround to avoid sending nil values on top level
for i := range limits {
if limits [ i ] == nil {
limits [ i ] = & pb . AddressedOrderLimit { }
}
}
endpoint . log . Info ( "Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "remote" ) )
mon . Meter ( "req_get_remote" ) . Mark ( 1 )
2021-04-07 07:55:01 +01:00
return [ ] * pb . SegmentDownloadResponse { {
2021-03-31 12:08:22 +01:00
AddressedLimits : limits ,
PrivateKey : privateKey ,
PlainOffset : segment . PlainOffset ,
PlainSize : int64 ( segment . PlainSize ) ,
SegmentSize : int64 ( segment . EncryptedSize ) ,
EncryptedKeyNonce : encryptedKeyNonce ,
EncryptedKey : segment . EncryptedKey ,
RedundancyScheme : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
} ,
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2021-04-07 07:55:01 +01:00
} } , nil
2021-03-31 12:08:22 +01:00
} ( )
if err != nil {
return nil , err
}
// convert to response
protoObject , err := endpoint . objectToProto ( ctx , object , nil )
if err != nil {
endpoint . log . Error ( "unable to convert object to proto" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
segmentList , err := convertStreamListResults ( segments )
if err != nil {
endpoint . log . Error ( "unable to convert stream list" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
endpoint . log . Info ( "Download Object" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "download" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_download_object" ) . Mark ( 1 )
return & pb . ObjectDownloadResponse {
Object : protoObject ,
// The RPC API allows for multiple segment download responses, but for now
// we return only one. This can be changed in the future if it seems useful
// to return more than one on the initial response.
2021-04-07 07:55:01 +01:00
SegmentDownload : downloadSegments ,
2021-03-31 12:08:22 +01:00
// In the case where the client needs the segment list, it will contain
// every segment. In the case where the segment list is not needed,
// segmentListItems will be nil.
SegmentList : segmentList ,
} , nil
}
2021-05-13 14:31:55 +01:00
type downloadSizes struct {
// amount of data that uplink eventually gets
plainSize int64
// amount of data that's present after encryption
encryptedSize int64
// amount of data that's read from a storage node
orderLimit int64
}
func ( endpoint * Endpoint ) calculateDownloadSizes ( streamRange * metabase . StreamRange , segment metabase . Segment , encryptionParams storj . EncryptionParameters ) downloadSizes {
if segment . Inline ( ) {
return downloadSizes {
plainSize : int64 ( len ( segment . InlineData ) ) ,
encryptedSize : int64 ( segment . EncryptedSize ) ,
}
}
// calculate the range inside the given segment
readStart := segment . PlainOffset
if streamRange != nil && readStart <= streamRange . PlainStart {
readStart = streamRange . PlainStart
}
readLimit := segment . PlainOffset + int64 ( segment . PlainSize )
if streamRange != nil && streamRange . PlainLimit < readLimit {
readLimit = streamRange . PlainLimit
}
plainSize := readLimit - readStart
// calculate the read range given the segment start
readStart -= segment . PlainOffset
readLimit -= segment . PlainOffset
// align to encryption block size
enc , err := encryption . NewEncrypter ( encryptionParams . CipherSuite , & storj . Key { 1 } , & storj . Nonce { 1 } , int ( encryptionParams . BlockSize ) )
if err != nil {
// We ignore the error and fallback to the max amount to download.
// It's unlikely that we fail here, but if we do, we don't want to block downloading.
endpoint . log . Error ( "unable to create encrypter" , zap . Error ( err ) )
return downloadSizes {
plainSize : int64 ( segment . PlainSize ) ,
encryptedSize : int64 ( segment . EncryptedSize ) ,
orderLimit : 0 ,
}
}
encryptedStartBlock , encryptedLimitBlock := calculateBlocks ( readStart , readLimit , int64 ( enc . InBlockSize ( ) ) )
encryptedStart , encryptedLimit := encryptedStartBlock * int64 ( enc . OutBlockSize ( ) ) , encryptedLimitBlock * int64 ( enc . OutBlockSize ( ) )
encryptedSize := encryptedLimit - encryptedStart
if encryptedSize > int64 ( segment . EncryptedSize ) {
encryptedSize = int64 ( segment . EncryptedSize )
}
// align to blocks
stripeSize := int64 ( segment . Redundancy . StripeSize ( ) )
stripeStart , stripeLimit := alignToBlock ( encryptedStart , encryptedLimit , stripeSize )
// calculate how much shares we need to download from a node
stripeCount := ( stripeLimit - stripeStart ) / stripeSize
orderLimit := stripeCount * int64 ( segment . Redundancy . ShareSize )
return downloadSizes {
plainSize : plainSize ,
encryptedSize : encryptedSize ,
orderLimit : orderLimit ,
}
}
func calculateBlocks ( start , limit int64 , blockSize int64 ) ( startBlock , limitBlock int64 ) {
return start / blockSize , ( limit + blockSize - 1 ) / blockSize
}
func alignToBlock ( start , limit int64 , blockSize int64 ) ( alignedStart , alignedLimit int64 ) {
return ( start / blockSize ) * blockSize , ( ( limit + blockSize - 1 ) / blockSize ) * blockSize
}
2021-03-31 12:08:22 +01:00
func calculateStreamRange ( object metabase . Object , req * pb . Range ) ( * metabase . StreamRange , error ) {
if req == nil || req . Range == nil {
return nil , nil
}
2021-04-09 09:24:18 +01:00
if object . IsMigrated ( ) {
// The object is in old format, which does not have plain_offset specified.
// We need to fallback to returning all segments.
2021-03-31 12:08:22 +01:00
return nil , nil
}
switch r := req . Range . ( type ) {
case * pb . Range_Start :
if r . Start == nil {
return nil , Error . New ( "Start missing for Range_Start" )
}
return & metabase . StreamRange {
PlainStart : r . Start . PlainStart ,
PlainLimit : object . TotalPlainSize ,
} , nil
case * pb . Range_StartLimit :
if r . StartLimit == nil {
return nil , Error . New ( "StartEnd missing for Range_StartEnd" )
}
return & metabase . StreamRange {
PlainStart : r . StartLimit . PlainStart ,
PlainLimit : r . StartLimit . PlainLimit ,
} , nil
case * pb . Range_Suffix :
if r . Suffix == nil {
return nil , Error . New ( "Suffix missing for Range_Suffix" )
}
return & metabase . StreamRange {
PlainStart : object . TotalPlainSize - r . Suffix . PlainSuffix ,
PlainLimit : object . TotalPlainSize ,
} , nil
}
// if it's a new unsupported range type, let's return all data
return nil , nil
}
2020-06-30 22:49:29 +01:00
// ListObjects list objects according to specific parameters.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) ListObjects ( ctx context . Context , req * pb . ObjectListRequest ) ( resp * pb . ObjectListResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2019-10-24 22:05:08 +01:00
EncryptedPath : req . EncryptedPrefix ,
2019-07-16 11:39:23 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-03-16 08:55:52 +00:00
// TODO this needs to be optimized to avoid DB call on each request
2021-04-02 17:19:17 +01:00
exists , err := endpoint . metainfo . HasBucket ( ctx , req . Bucket , keyInfo . ProjectID )
2020-03-16 08:55:52 +00:00
if err != nil {
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2021-04-02 17:19:17 +01:00
} else if ! exists {
return nil , rpcstatus . Error ( rpcstatus . NotFound , "bucket not found: non-existing-bucket" )
2020-03-16 08:55:52 +00:00
}
2020-11-06 12:20:54 +00:00
limit := int ( req . Limit )
if limit < 0 {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "limit is negative" )
}
2021-06-25 09:19:32 +01:00
metabase . ListLimit . Ensure ( & limit )
2019-07-16 11:39:23 +01:00
2020-11-17 12:57:53 +00:00
var prefix metabase . ObjectKey
if len ( req . EncryptedPrefix ) != 0 {
prefix = metabase . ObjectKey ( req . EncryptedPrefix )
2020-11-17 13:37:19 +00:00
if prefix [ len ( prefix ) - 1 ] != metabase . Delimiter {
prefix += metabase . ObjectKey ( metabase . Delimiter )
2020-11-17 12:57:53 +00:00
}
}
2020-11-18 11:16:00 +00:00
// Default to Commmitted status for backward-compatibility with older uplinks.
status := metabase . Committed
if req . Status != pb . Object_INVALID {
status = metabase . ObjectStatus ( req . Status )
}
2020-12-01 14:01:44 +00:00
cursor := string ( req . EncryptedCursor )
if len ( cursor ) != 0 {
cursor = string ( prefix ) + cursor
}
2021-08-02 19:30:02 +01:00
includeMetadata := true
if req . UseObjectIncludes {
includeMetadata = req . ObjectIncludes . Metadata
}
2020-11-06 12:20:54 +00:00
resp = & pb . ObjectListResponse { }
// TODO: Replace with IterateObjectsLatestVersion when ready
2020-12-21 15:07:00 +00:00
err = endpoint . metainfo . metabaseDB . IterateObjectsAllVersionsWithStatus ( ctx ,
metabase . IterateObjectsWithStatus {
2020-11-06 12:20:54 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
2020-11-17 12:57:53 +00:00
Prefix : prefix ,
2020-11-24 09:50:02 +00:00
Cursor : metabase . IterateCursor {
2020-12-01 14:01:44 +00:00
Key : metabase . ObjectKey ( cursor ) ,
2020-11-24 09:50:02 +00:00
Version : 1 , // TODO: set to a the version from the protobuf request when it supports this
} ,
2021-08-03 02:26:19 +01:00
Recursive : req . Recursive ,
BatchSize : limit + 1 ,
Status : status ,
IncludeMetadata : includeMetadata ,
2020-11-06 12:20:54 +00:00
} , func ( ctx context . Context , it metabase . ObjectsIterator ) error {
entry := metabase . ObjectEntry { }
for len ( resp . Items ) < limit && it . Next ( ctx , & entry ) {
2021-08-02 19:30:02 +01:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , prefix , includeMetadata )
2020-12-02 11:34:41 +00:00
if err != nil {
return err
2020-11-19 12:21:51 +00:00
}
2020-11-06 12:20:54 +00:00
resp . Items = append ( resp . Items , item )
}
resp . More = it . Next ( ctx , & entry )
return nil
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2020-11-06 12:20:54 +00:00
if metabase . ErrInvalidRequest . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
endpoint . log . Error ( "unable to list objects" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object List" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "list" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_list_object" ) . Mark ( 1 )
2019-07-16 11:39:23 +01:00
2020-11-06 12:20:54 +00:00
return resp , nil
2019-07-16 11:39:23 +01:00
}
2021-01-11 12:06:04 +00:00
// ListPendingObjectStreams list pending objects according to specific parameters.
func ( endpoint * Endpoint ) ListPendingObjectStreams ( ctx context . Context , req * pb . ObjectListPendingStreamsRequest ) ( resp * pb . ObjectListPendingStreamsResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2021-01-11 12:06:04 +00:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
// TODO this needs to be optimized to avoid DB call on each request
2021-04-02 17:19:17 +01:00
exists , err := endpoint . metainfo . HasBucket ( ctx , req . Bucket , keyInfo . ProjectID )
2021-01-11 12:06:04 +00:00
if err != nil {
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2021-04-02 17:19:17 +01:00
} else if ! exists {
return nil , rpcstatus . Error ( rpcstatus . NotFound , "bucket not found: non-existing-bucket" )
2021-01-11 12:06:04 +00:00
}
cursor := metabase . StreamIDCursor { }
if req . StreamIdCursor != nil {
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamIdCursor )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
cursor . StreamID , err = uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-01-11 12:06:04 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
}
limit := int ( req . Limit )
if limit < 0 {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "limit is negative" )
}
2021-06-25 09:19:32 +01:00
metabase . ListLimit . Ensure ( & limit )
2021-01-11 12:06:04 +00:00
resp = & pb . ObjectListPendingStreamsResponse { }
resp . Items = [ ] * pb . ObjectListItem { }
err = endpoint . metainfo . metabaseDB . IteratePendingObjectsByKey ( ctx ,
metabase . IteratePendingObjectsByKey {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} ,
BatchSize : limit + 1 ,
Cursor : cursor ,
} , func ( ctx context . Context , it metabase . ObjectsIterator ) error {
entry := metabase . ObjectEntry { }
for len ( resp . Items ) < limit && it . Next ( ctx , & entry ) {
2021-08-02 19:30:02 +01:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , "" , true )
2021-01-11 12:06:04 +00:00
if err != nil {
return err
}
resp . Items = append ( resp . Items , item )
}
resp . More = it . Next ( ctx , & entry )
return nil
} ,
)
if err != nil {
if metabase . ErrInvalidRequest . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
endpoint . log . Error ( "unable to list pending object streams" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
endpoint . log . Info ( "List pending object streams" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "list" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_list_pending_object_streams" ) . Mark ( 1 )
return resp , nil
}
2019-12-10 11:15:35 +00:00
// BeginDeleteObject begins object deletion process.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) BeginDeleteObject ( ctx context . Context , req * pb . ObjectBeginDeleteRequest ) ( resp * pb . ObjectBeginDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-03-11 15:53:16 +00:00
now := time . Now ( )
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
2020-03-11 15:53:16 +00:00
Time : now ,
2019-07-16 11:39:23 +01:00
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-03-11 15:53:16 +00:00
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} )
canRead := err == nil
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : now ,
} )
canList := err == nil
2020-12-03 18:04:01 +00:00
var deletedObjects [ ] * pb . Object
if req . GetStatus ( ) == int32 ( metabase . Pending ) {
if req . StreamId == nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "StreamID missing" )
}
var pbStreamID * internalpb . StreamID
pbStreamID , err = endpoint . unmarshalSatStreamID ( ctx , * ( req . StreamId ) )
if err == nil {
var streamID uuid . UUID
streamID , err = uuid . FromBytes ( pbStreamID . StreamId )
if err == nil {
2021-05-04 14:51:40 +01:00
deletedObjects , err = endpoint . DeletePendingObject ( ctx ,
metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
Version : metabase . Version ( req . GetVersion ( ) ) ,
StreamID : streamID ,
} )
2020-12-03 18:04:01 +00:00
}
}
} else {
deletedObjects , err = endpoint . DeleteCommittedObject ( ctx , keyInfo . ProjectID , string ( req . Bucket ) , metabase . ObjectKey ( req . EncryptedPath ) )
}
2019-08-01 10:04:31 +01:00
if err != nil {
2020-03-11 15:53:16 +00:00
if ! canRead && ! canList {
// No error info is returned if neither Read, nor List permission is granted
2020-04-02 08:45:51 +01:00
return & pb . ObjectBeginDeleteResponse { } , nil
2020-03-11 15:53:16 +00:00
}
2021-08-06 21:55:34 +01:00
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . NotFound , err )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Wrap ( rpcstatus . Internal , err )
2019-08-01 10:04:31 +01:00
}
2020-07-27 21:12:14 +01:00
var object * pb . Object
if canRead || canList {
// Info about deleted object is returned only if either Read, or List permission is granted
2020-08-11 14:00:57 +01:00
if err != nil {
endpoint . log . Error ( "failed to construct deleted object information" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . String ( "Bucket" , string ( req . Bucket ) ) ,
zap . String ( "Encrypted Path" , string ( req . EncryptedPath ) ) ,
zap . Error ( err ) ,
)
}
2020-07-27 21:12:14 +01:00
if len ( deletedObjects ) > 0 {
object = deletedObjects [ 0 ]
}
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object Delete" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "delete" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_delete_object" ) . Mark ( 1 )
2020-01-20 18:48:26 +00:00
2019-07-16 11:39:23 +01:00
return & pb . ObjectBeginDeleteResponse {
2020-07-17 10:17:31 +01:00
Object : object ,
2019-07-16 11:39:23 +01:00
} , nil
}
2020-08-11 18:35:23 +01:00
// GetObjectIPs returns the IP addresses of the nodes holding the pieces for
// the provided object. This is useful for knowing the locations of the pieces.
func ( endpoint * Endpoint ) GetObjectIPs ( ctx context . Context , req * pb . ObjectGetIPsRequest ) ( resp * pb . ObjectGetIPsResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-08-13 17:43:21 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2020-12-07 15:43:57 +00:00
// TODO we may need custom metabase request to avoid two DB calls
object , err := endpoint . metainfo . metabaseDB . GetObjectLatestVersion ( ctx , metabase . GetObjectLatestVersion {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedPath ) ,
} ,
} )
2020-08-13 17:43:21 +01:00
if err != nil {
2020-12-07 15:43:57 +00:00
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
2020-08-13 17:43:21 +01:00
}
2020-12-07 15:43:57 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-13 17:43:21 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-03-08 13:09:32 +00:00
pieceCountByNodeID , err := endpoint . metainfo . metabaseDB . GetStreamPieceCountByNodeID ( ctx ,
metabase . GetStreamPieceCountByNodeID {
2020-12-07 15:43:57 +00:00
StreamID : object . StreamID ,
} )
2021-03-08 13:09:32 +00:00
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
nodeIDs := make ( [ ] storj . NodeID , 0 , len ( pieceCountByNodeID ) )
for nodeID := range pieceCountByNodeID {
nodeIDs = append ( nodeIDs , nodeID )
}
nodeIPMap , err := endpoint . overlay . GetNodeIPs ( ctx , nodeIDs )
2020-08-13 17:43:21 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-13 17:43:21 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-01-13 13:59:05 +00:00
nodeIPs := make ( [ ] [ ] byte , 0 , len ( nodeIPMap ) )
pieceCount := int64 ( 0 )
reliablePieceCount := int64 ( 0 )
for nodeID , count := range pieceCountByNodeID {
pieceCount += count
ip , reliable := nodeIPMap [ nodeID ]
if ! reliable {
continue
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
nodeIPs = append ( nodeIPs , [ ] byte ( ip ) )
reliablePieceCount += count
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
return & pb . ObjectGetIPsResponse {
Ips : nodeIPs ,
2021-01-14 12:33:00 +00:00
SegmentCount : int64 ( object . SegmentCount ) ,
2021-01-13 13:59:05 +00:00
ReliablePieceCount : reliablePieceCount ,
PieceCount : pieceCount ,
} , nil
2020-08-11 18:35:23 +01:00
}
2021-07-02 13:39:46 +01:00
// UpdateObjectMetadata replaces object metadata.
func ( endpoint * Endpoint ) UpdateObjectMetadata ( ctx context . Context , req * pb . ObjectUpdateMetadataRequest ) ( resp * pb . ObjectUpdateMetadataResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2021-07-08 15:50:37 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-07-08 15:50:37 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
err = endpoint . metainfo . metabaseDB . UpdateObjectMetadata ( ctx , metabase . UpdateObjectMetadata {
ObjectStream : metabase . ObjectStream {
2021-07-02 13:39:46 +01:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
2021-07-08 15:50:37 +01:00
Version : metabase . Version ( req . Version ) ,
StreamID : id ,
2021-07-02 13:39:46 +01:00
} ,
EncryptedMetadata : req . EncryptedMetadata ,
EncryptedMetadataNonce : req . EncryptedMetadataNonce [ : ] ,
EncryptedMetadataEncryptedKey : req . EncryptedMetadataEncryptedKey ,
} )
if err != nil {
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
return & pb . ObjectUpdateMetadataResponse { } , nil
}
2020-06-30 22:49:29 +01:00
// BeginSegment begins segment uploading.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) BeginSegment ( ctx context . Context , req * pb . SegmentBeginRequest ) ( resp * pb . SegmentBeginResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-22 15:45:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2019-07-24 12:33:23 +01:00
// no need to validate streamID fields because it was validated during BeginObject
2019-08-01 10:04:31 +01:00
if req . Position . Index < 0 {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "segment index must be greater then 0" )
2019-08-01 10:04:31 +01:00
}
2020-12-22 11:12:07 +00:00
if err := endpoint . checkExceedsStorageUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
return nil , err
2019-07-24 12:33:23 +01:00
}
redundancy , err := eestream . NewRedundancyStrategyFromProto ( streamID . Redundancy )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
maxPieceSize := eestream . CalcPieceSize ( req . MaxOrderLimit , redundancy )
2019-07-22 15:45:18 +01:00
2019-07-24 12:33:23 +01:00
request := overlay . FindStorageNodesRequest {
RequestedCount : redundancy . TotalCount ( ) ,
}
2020-05-06 14:05:31 +01:00
nodes , err := endpoint . overlay . FindStorageNodesForUpload ( ctx , request )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-08-28 12:56:09 +01:00
bucket := metabase . BucketLocation { ProjectID : keyInfo . ProjectID , BucketName : string ( streamID . Bucket ) }
rootPieceID , addressedLimits , piecePrivateKey , err := endpoint . orders . CreatePutOrderLimits ( ctx , bucket , nodes , streamID . ExpirationDate , maxPieceSize )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
pieces := metabase . Pieces { }
for i , limit := range addressedLimits {
pieces = append ( pieces , metabase . Piece {
Number : uint16 ( i ) ,
StorageNode : limit . Limit . StorageNodeId ,
} )
}
err = endpoint . metainfo . metabaseDB . BeginSegment ( ctx , metabase . BeginSegment {
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( streamID . EncryptedPath ) ,
StreamID : id ,
Version : 1 ,
} ,
Position : metabase . SegmentPosition {
Part : uint32 ( req . Position . PartNumber ) ,
Index : uint32 ( req . Position . Index ) ,
} ,
RootPieceID : rootPieceID ,
Pieces : pieces ,
} )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-10-29 16:16:25 +00:00
segmentID , err := endpoint . packSegmentID ( ctx , & internalpb . SegmentID {
2019-07-24 12:33:23 +01:00
StreamId : streamID ,
2020-11-06 11:54:52 +00:00
PartNumber : req . Position . PartNumber ,
2019-08-01 10:04:31 +01:00
Index : req . Position . Index ,
2019-07-24 12:33:23 +01:00
OriginalOrderLimits : addressedLimits ,
RootPieceId : rootPieceID ,
CreationDate : time . Now ( ) ,
} )
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Segment Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "remote" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_remote" ) . Mark ( 1 )
2019-12-02 14:39:19 +00:00
2019-07-24 12:33:23 +01:00
return & pb . SegmentBeginResponse {
2020-11-10 16:05:27 +00:00
SegmentId : segmentID ,
AddressedLimits : addressedLimits ,
PrivateKey : piecePrivateKey ,
RedundancyScheme : endpoint . defaultRS ,
2019-07-24 12:33:23 +01:00
} , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// CommitSegment commits segment after uploading.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) CommitSegment ( ctx context . Context , req * pb . SegmentCommitRequest ) ( resp * pb . SegmentCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-01-27 20:25:52 +00:00
_ , resp , err = endpoint . commitSegment ( ctx , req , true )
return resp , err
}
2020-11-06 11:54:52 +00:00
func ( endpoint * Endpoint ) commitSegment ( ctx context . Context , req * pb . SegmentCommitRequest , savePointer bool ) ( _ * pb . Pointer , resp * pb . SegmentCommitResponse , err error ) {
2020-01-27 20:25:52 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-22 15:45:18 +01:00
segmentID , err := endpoint . unmarshalSatSegmentID ( ctx , req . SegmentId )
if err != nil {
2020-01-27 20:25:52 +00:00
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
streamID := segmentID . StreamId
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , nil , err
2019-07-22 15:45:18 +01:00
}
2021-04-07 16:51:00 +01:00
// cheap basic verification
2019-10-17 19:01:40 +01:00
if numResults := len ( req . UploadResult ) ; numResults < int ( streamID . Redundancy . GetSuccessThreshold ( ) ) {
endpoint . log . Debug ( "the results of uploaded pieces for the segment is below the redundancy optimal threshold" ,
zap . Int ( "upload pieces results" , numResults ) ,
zap . Int32 ( "redundancy optimal threshold" , streamID . Redundancy . GetSuccessThreshold ( ) ) ,
2019-11-05 21:04:07 +00:00
zap . Stringer ( "Segment ID" , req . SegmentId ) ,
2019-10-17 19:01:40 +01:00
)
2020-01-27 20:25:52 +00:00
return nil , nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument ,
2019-10-17 19:01:40 +01:00
"the number of results of uploaded pieces (%d) is below the optimal threshold (%d)" ,
numResults , streamID . Redundancy . GetSuccessThreshold ( ) ,
)
}
2020-11-06 11:54:52 +00:00
rs := storj . RedundancyScheme {
2020-12-08 11:51:48 +00:00
Algorithm : storj . RedundancyAlgorithm ( endpoint . defaultRS . Type ) ,
RequiredShares : int16 ( endpoint . defaultRS . MinReq ) ,
RepairShares : int16 ( endpoint . defaultRS . RepairThreshold ) ,
OptimalShares : int16 ( endpoint . defaultRS . SuccessThreshold ) ,
TotalShares : int16 ( endpoint . defaultRS . Total ) ,
ShareSize : endpoint . defaultRS . ErasureShareSize ,
2020-11-06 11:54:52 +00:00
}
2021-04-07 16:51:00 +01:00
err = endpoint . pointerVerification . VerifySizes ( ctx , rs , req . SizeEncryptedData , req . UploadResult )
if err != nil {
endpoint . log . Debug ( "piece sizes are invalid" , zap . Error ( err ) )
return nil , nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "piece sizes are invalid: %v" , err )
}
// extract the original order limits
originalLimits := make ( [ ] * pb . OrderLimit , len ( segmentID . OriginalOrderLimits ) )
for i , orderLimit := range segmentID . OriginalOrderLimits {
originalLimits [ i ] = orderLimit . Limit
}
// verify the piece upload results
validPieces , invalidPieces , err := endpoint . pointerVerification . SelectValidPieces ( ctx , req . UploadResult , originalLimits )
if err != nil {
endpoint . log . Debug ( "pointer verification failed" , zap . Error ( err ) )
return nil , nil , rpcstatus . Errorf ( rpcstatus . InvalidArgument , "pointer verification failed: %s" , err )
}
if len ( validPieces ) < int ( rs . OptimalShares ) {
endpoint . log . Debug ( "Number of valid pieces is less than the success threshold" ,
zap . Int ( "totalReceivedPieces" , len ( req . UploadResult ) ) ,
zap . Int ( "validPieces" , len ( validPieces ) ) ,
zap . Int ( "invalidPieces" , len ( invalidPieces ) ) ,
zap . Int ( "successThreshold" , int ( rs . OptimalShares ) ) ,
)
errMsg := fmt . Sprintf ( "Number of valid pieces (%d) is less than the success threshold (%d). Found %d invalid pieces" ,
len ( validPieces ) ,
rs . OptimalShares ,
len ( invalidPieces ) ,
)
if len ( invalidPieces ) > 0 {
errMsg = fmt . Sprintf ( "%s. Invalid Pieces:" , errMsg )
for _ , p := range invalidPieces {
errMsg = fmt . Sprintf ( "%s\nNodeID: %v, PieceNum: %d, Reason: %s" ,
errMsg , p . NodeID , p . PieceNum , p . Reason ,
)
}
}
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , errMsg )
}
pieces := metabase . Pieces { }
for _ , result := range validPieces {
pieces = append ( pieces , metabase . Piece {
Number : uint16 ( result . PieceNum ) ,
StorageNode : result . NodeId ,
} )
}
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-01-27 20:25:52 +00:00
2021-06-10 11:08:21 +01:00
var expiresAt * time . Time
if ! streamID . ExpirationDate . IsZero ( ) {
expiresAt = & streamID . ExpirationDate
}
2021-04-07 15:20:05 +01:00
mbCommitSegment := metabase . CommitSegment {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( streamID . EncryptedPath ) ,
StreamID : id ,
Version : 1 ,
} ,
2021-06-10 11:08:21 +01:00
ExpiresAt : expiresAt ,
2020-11-06 11:54:52 +00:00
EncryptedKey : req . EncryptedKey ,
EncryptedKeyNonce : req . EncryptedKeyNonce [ : ] ,
2020-11-20 12:37:54 +00:00
EncryptedSize : int32 ( req . SizeEncryptedData ) , // TODO incompatible types int32 vs int64
PlainSize : int32 ( req . PlainSize ) , // TODO incompatible types int32 vs int64
2021-03-25 07:56:13 +00:00
EncryptedETag : req . EncryptedETag ,
2020-11-06 11:54:52 +00:00
Position : metabase . SegmentPosition {
Part : uint32 ( segmentID . PartNumber ) ,
Index : uint32 ( segmentID . Index ) ,
} ,
RootPieceID : segmentID . RootPieceId ,
Redundancy : rs ,
Pieces : pieces ,
2021-04-07 15:20:05 +01:00
}
2021-04-07 16:51:00 +01:00
err = endpoint . validateRemoteSegment ( ctx , mbCommitSegment , originalLimits )
2021-04-07 15:20:05 +01:00
if err != nil {
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
if err := endpoint . checkExceedsStorageUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
return nil , nil , err
}
segmentSize := req . SizeEncryptedData
totalStored := calculateSpaceUsed ( segmentSize , len ( pieces ) , rs )
// ToDo: Replace with hash & signature validation
// Ensure neither uplink or storage nodes are cheating on us
// We cannot have more redundancy than total/min
if float64 ( totalStored ) > ( float64 ( segmentSize ) / float64 ( rs . RequiredShares ) ) * float64 ( rs . TotalShares ) {
endpoint . log . Debug ( "data size mismatch" ,
zap . Int64 ( "segment" , segmentSize ) ,
zap . Int64 ( "pieces" , totalStored ) ,
zap . Int16 ( "redundancy minimum requested" , rs . RequiredShares ) ,
zap . Int16 ( "redundancy total" , rs . TotalShares ) ,
)
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "mismatched segment size and piece usage" )
}
if err := endpoint . projectUsage . AddProjectStorageUsage ( ctx , keyInfo . ProjectID , segmentSize ) ; err != nil {
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// storage limits.
endpoint . log . Error ( "Could not track new project's storage usage" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
}
err = endpoint . metainfo . metabaseDB . CommitSegment ( ctx , mbCommitSegment )
2020-11-06 11:54:52 +00:00
if err != nil {
2020-11-20 12:37:54 +00:00
if metabase . ErrInvalidRequest . Has ( err ) {
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2020-11-06 11:54:52 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2019-07-22 15:45:18 +01:00
2020-11-06 11:54:52 +00:00
return nil , & pb . SegmentCommitResponse {
SuccessfulPieces : int32 ( len ( pieces ) ) ,
2019-09-18 14:50:33 +01:00
} , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// MakeInlineSegment makes inline segment on satellite.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) MakeInlineSegment ( ctx context . Context , req * pb . SegmentMakeInlineRequest ) ( resp * pb . SegmentMakeInlineResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-01-27 20:25:52 +00:00
_ , resp , err = endpoint . makeInlineSegment ( ctx , req , true )
return resp , err
}
2020-11-11 10:54:10 +00:00
// makeInlineSegment makes inline segment on satellite.
2020-01-27 20:25:52 +00:00
func ( endpoint * Endpoint ) makeInlineSegment ( ctx context . Context , req * pb . SegmentMakeInlineRequest , savePointer bool ) ( pointer * pb . Pointer , resp * pb . SegmentMakeInlineResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-22 15:45:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
2020-01-27 20:25:52 +00:00
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , nil , err
2019-07-22 15:45:18 +01:00
}
2019-08-01 10:04:31 +01:00
if req . Position . Index < 0 {
2020-01-27 20:25:52 +00:00
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "segment index must be greater then 0" )
2019-07-24 12:33:23 +01:00
}
2020-04-01 10:15:24 +01:00
inlineUsed := int64 ( len ( req . EncryptedInlineData ) )
2020-04-09 09:19:16 +01:00
if inlineUsed > endpoint . encInlineSegmentSize {
2020-04-01 10:15:24 +01:00
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , fmt . Sprintf ( "inline segment size cannot be larger than %s" , endpoint . config . MaxInlineSegmentSize ) )
}
2020-12-22 11:12:07 +00:00
if err := endpoint . checkExceedsStorageUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
return nil , nil , err
2019-07-24 12:33:23 +01:00
}
2019-10-31 17:27:38 +00:00
if err := endpoint . projectUsage . AddProjectStorageUsage ( ctx , keyInfo . ProjectID , inlineUsed ) ; err != nil {
2020-12-22 11:12:07 +00:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth and storage limits.
endpoint . log . Error ( "Could not track new project's storage usage" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2019-07-24 12:33:23 +01:00
}
2020-11-11 10:54:10 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-11 10:54:10 +00:00
return nil , nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2021-06-10 11:08:21 +01:00
var expiresAt * time . Time
if ! streamID . ExpirationDate . IsZero ( ) {
expiresAt = & streamID . ExpirationDate
}
2020-11-11 10:54:10 +00:00
err = endpoint . metainfo . metabaseDB . CommitInlineSegment ( ctx , metabase . CommitInlineSegment {
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( streamID . EncryptedPath ) ,
StreamID : id ,
Version : 1 ,
} ,
2021-06-10 11:08:21 +01:00
ExpiresAt : expiresAt ,
2020-11-11 10:54:10 +00:00
EncryptedKey : req . EncryptedKey ,
EncryptedKeyNonce : req . EncryptedKeyNonce . Bytes ( ) ,
2020-01-27 20:25:52 +00:00
2020-11-11 10:54:10 +00:00
Position : metabase . SegmentPosition {
Part : uint32 ( req . Position . PartNumber ) ,
Index : uint32 ( req . Position . Index ) ,
} ,
2021-03-25 07:56:13 +00:00
PlainSize : int32 ( req . PlainSize ) , // TODO incompatible types int32 vs int64
EncryptedETag : req . EncryptedETag ,
2020-11-11 10:54:10 +00:00
InlineData : req . EncryptedInlineData ,
} )
if err != nil {
2020-11-20 12:37:54 +00:00
if metabase . ErrInvalidRequest . Has ( err ) {
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2020-11-11 10:54:10 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-08-28 12:56:09 +01:00
bucket := metabase . BucketLocation { ProjectID : keyInfo . ProjectID , BucketName : string ( streamID . Bucket ) }
err = endpoint . orders . UpdatePutInlineOrder ( ctx , bucket , inlineUsed )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-01-27 20:25:52 +00:00
return nil , nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2019-07-22 15:45:18 +01:00
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Inline Segment Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "inline" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_inline" ) . Mark ( 1 )
2019-12-02 14:39:19 +00:00
2020-11-11 10:54:10 +00:00
return nil , & pb . SegmentMakeInlineResponse { } , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// ListSegments list object segments.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) ListSegments ( ctx context . Context , req * pb . SegmentListRequest ) ( resp * pb . SegmentListResponse , err error ) {
2020-11-10 15:51:37 +00:00
defer mon . Task ( ) ( & ctx ) ( & err )
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2020-11-24 16:23:47 +00:00
Op : macaroon . ActionRead ,
2020-11-10 15:51:37 +00:00
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
cursor := req . CursorPosition
if cursor == nil {
cursor = & pb . SegmentPosition { }
}
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-03-08 12:10:33 +00:00
result , err := endpoint . metainfo . metabaseDB . ListStreamPositions ( ctx , metabase . ListStreamPositions {
2020-11-10 15:51:37 +00:00
StreamID : id ,
Cursor : metabase . SegmentPosition {
Part : uint32 ( cursor . PartNumber ) ,
Index : uint32 ( cursor . Index ) ,
} ,
Limit : int ( req . Limit ) ,
} )
if err != nil {
if storj . ErrObjectNotFound . Has ( err ) {
return nil , rpcstatus . Error ( rpcstatus . NotFound , err . Error ( ) )
}
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-03-31 12:08:22 +01:00
response , err := convertStreamListResults ( result )
if err != nil {
endpoint . log . Error ( "unable to convert stream list" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
response . EncryptionParameters = streamID . EncryptionParameters
return response , nil
}
func convertStreamListResults ( result metabase . ListStreamPositionsResult ) ( * pb . SegmentListResponse , error ) {
2020-11-10 15:51:37 +00:00
items := make ( [ ] * pb . SegmentListItem , len ( result . Segments ) )
for i , item := range result . Segments {
items [ i ] = & pb . SegmentListItem {
Position : & pb . SegmentPosition {
PartNumber : int32 ( item . Position . Part ) ,
Index : int32 ( item . Position . Index ) ,
} ,
2021-03-29 16:31:17 +01:00
PlainSize : int64 ( item . PlainSize ) ,
PlainOffset : item . PlainOffset ,
2020-11-10 15:51:37 +00:00
}
2021-03-12 18:21:36 +00:00
if item . CreatedAt != nil {
items [ i ] . CreatedAt = * item . CreatedAt
}
2021-03-25 07:56:13 +00:00
items [ i ] . EncryptedETag = item . EncryptedETag
2021-03-31 12:08:22 +01:00
var err error
2021-03-25 07:56:13 +00:00
items [ i ] . EncryptedKeyNonce , err = storj . NonceFromBytes ( item . EncryptedKeyNonce )
if err != nil {
2021-03-31 12:08:22 +01:00
return nil , err
2021-03-25 07:56:13 +00:00
}
items [ i ] . EncryptedKey = item . EncryptedKey
2020-11-10 15:51:37 +00:00
}
return & pb . SegmentListResponse {
2021-03-31 12:08:22 +01:00
Items : items ,
More : result . More ,
2020-11-10 15:51:37 +00:00
} , nil
2019-07-22 15:45:18 +01:00
}
2020-06-30 22:49:29 +01:00
// DownloadSegment returns data necessary to download segment.
2019-07-22 15:45:18 +01:00
func ( endpoint * Endpoint ) DownloadSegment ( ctx context . Context , req * pb . SegmentDownloadRequest ) ( resp * pb . SegmentDownloadResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2019-07-22 15:45:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-22 15:45:18 +01:00
}
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-22 15:45:18 +01:00
Op : macaroon . ActionRead ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2020-08-28 12:56:09 +01:00
bucket := metabase . BucketLocation { ProjectID : keyInfo . ProjectID , BucketName : string ( streamID . Bucket ) }
2019-07-24 12:33:23 +01:00
2020-12-22 11:12:07 +00:00
if exceeded , limit , err := endpoint . projectUsage . ExceedsBandwidthUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
endpoint . log . Error ( "Retrieving project bandwidth total failed; bandwidth limit won't be enforced" , zap . Error ( err ) )
} else if exceeded {
endpoint . log . Error ( "Monthly bandwidth limit exceeded" ,
2020-04-13 10:31:17 +01:00
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
2019-07-24 12:33:23 +01:00
)
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
2019-07-24 12:33:23 +01:00
}
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-11-06 11:54:52 +00:00
var segment metabase . Segment
if req . CursorPosition . PartNumber == 0 && req . CursorPosition . Index == - 1 {
2021-01-07 08:46:49 +00:00
if streamID . MultipartObject {
return nil , rpcstatus . Error ( rpcstatus . Unimplemented , "Used uplink version cannot download multipart objects." )
}
2020-11-06 11:54:52 +00:00
segment , err = endpoint . metainfo . metabaseDB . GetLatestObjectLastSegment ( ctx , metabase . GetLatestObjectLastSegment {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( streamID . EncryptedPath ) ,
} ,
} )
} else {
segment , err = endpoint . metainfo . metabaseDB . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
StreamID : id ,
Position : metabase . SegmentPosition {
Part : uint32 ( req . CursorPosition . PartNumber ) ,
Index : uint32 ( req . CursorPosition . Index ) ,
} ,
} )
}
2020-06-02 00:19:10 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-06-02 00:19:10 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-11-06 11:54:52 +00:00
// Update the current bandwidth cache value incrementing the SegmentSize.
err = endpoint . projectUsage . UpdateProjectBandwidthUsage ( ctx , keyInfo . ProjectID , int64 ( segment . EncryptedSize ) )
2020-06-02 09:39:41 +01:00
if err != nil {
2020-12-22 11:12:07 +00:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth limits.
endpoint . log . Error ( "Could not track the new project's bandwidth usage" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2020-06-02 09:39:41 +01:00
}
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
encryptedKeyNonce , err := storj . NonceFromBytes ( segment . EncryptedKeyNonce )
if err != nil {
endpoint . log . Error ( "unable to get encryption key nonce from metadata" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-08-01 10:04:31 +01:00
}
2019-07-24 12:33:23 +01:00
2021-01-07 15:03:16 +00:00
if segment . Inline ( ) {
2020-11-06 11:54:52 +00:00
err := endpoint . orders . UpdateGetInlineOrder ( ctx , bucket , int64 ( len ( segment . InlineData ) ) )
2019-07-24 12:33:23 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-01-29 15:03:30 +00:00
endpoint . log . Info ( "Inline Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "inline" ) )
mon . Meter ( "req_get_inline" ) . Mark ( 1 )
2019-07-24 12:33:23 +01:00
return & pb . SegmentDownloadResponse {
2021-04-06 12:52:59 +01:00
PlainOffset : segment . PlainOffset ,
2021-03-31 12:08:22 +01:00
PlainSize : int64 ( segment . PlainSize ) ,
2020-11-06 11:54:52 +00:00
SegmentSize : int64 ( segment . EncryptedSize ) ,
EncryptedInlineData : segment . InlineData ,
2019-08-01 10:04:31 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2020-11-06 11:54:52 +00:00
EncryptedKey : segment . EncryptedKey ,
2021-04-05 17:26:07 +01:00
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2019-07-24 12:33:23 +01:00
} , nil
2020-11-06 11:54:52 +00:00
}
// Remote segment
2021-05-13 14:31:55 +01:00
limits , privateKey , err := endpoint . orders . CreateGetOrderLimits ( ctx , bucket , segment , 0 )
2020-11-06 11:54:52 +00:00
if err != nil {
if orders . ErrDownloadFailedNotEnoughPieces . Has ( err ) {
endpoint . log . Error ( "Unable to create order limits." ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Stringer ( "API Key ID" , keyInfo . ID ) ,
zap . Error ( err ) ,
)
2019-07-24 12:33:23 +01:00
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2019-07-24 12:33:23 +01:00
2020-11-06 11:54:52 +00:00
limits = sortLimits ( limits , segment )
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
// workaround to avoid sending nil values on top level
for i := range limits {
if limits [ i ] == nil {
limits [ i ] = & pb . AddressedOrderLimit { }
2019-08-01 10:04:31 +01:00
}
2020-11-06 11:54:52 +00:00
}
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
endpoint . log . Info ( "Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "remote" ) )
mon . Meter ( "req_get_remote" ) . Mark ( 1 )
2019-08-01 10:04:31 +01:00
2020-11-06 11:54:52 +00:00
return & pb . SegmentDownloadResponse {
AddressedLimits : limits ,
PrivateKey : privateKey ,
2021-04-06 12:52:59 +01:00
PlainOffset : segment . PlainOffset ,
2021-03-31 12:08:22 +01:00
PlainSize : int64 ( segment . PlainSize ) ,
2020-11-06 11:54:52 +00:00
SegmentSize : int64 ( segment . EncryptedSize ) ,
2019-07-24 12:33:23 +01:00
2020-11-06 11:54:52 +00:00
EncryptedKeyNonce : encryptedKeyNonce ,
EncryptedKey : segment . EncryptedKey ,
2021-02-17 09:54:04 +00:00
RedundancyScheme : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
} ,
2021-04-05 17:26:07 +01:00
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2020-11-06 11:54:52 +00:00
} , nil
2019-07-24 12:33:23 +01:00
}
2021-08-05 08:36:32 +01:00
// DeletePart deletes a single part from satellite db and from storage nodes.
func ( endpoint * Endpoint ) DeletePart ( ctx context . Context , req * pb . PartDeleteRequest ) ( resp * pb . PartDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
_ , err = endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedPath ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
err = endpoint . metainfo . metabaseDB . DeletePart ( ctx , metabase . DeletePart {
StreamID : id ,
PartNumber : uint32 ( req . PartNumber ) ,
DeletePieces : func ( ctx context . Context , segment metabase . DeletedSegmentInfo ) error {
endpoint . deleteSegmentPieces ( ctx , [ ] metabase . DeletedSegmentInfo { segment } )
return nil
} ,
} )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
return & pb . PartDeleteResponse { } , nil
}
2020-06-30 22:49:29 +01:00
// sortLimits sorts order limits and fill missing ones with nil values.
2020-11-06 11:54:52 +00:00
func sortLimits ( limits [ ] * pb . AddressedOrderLimit , segment metabase . Segment ) [ ] * pb . AddressedOrderLimit {
sorted := make ( [ ] * pb . AddressedOrderLimit , segment . Redundancy . TotalShares )
for _ , piece := range segment . Pieces {
sorted [ piece . Number ] = getLimitByStorageNodeID ( limits , piece . StorageNode )
2019-08-01 10:04:31 +01:00
}
return sorted
}
func getLimitByStorageNodeID ( limits [ ] * pb . AddressedOrderLimit , storageNodeID storj . NodeID ) * pb . AddressedOrderLimit {
for _ , limit := range limits {
2020-01-27 20:01:37 +00:00
if limit == nil || limit . GetLimit ( ) == nil {
continue
}
2019-08-01 10:04:31 +01:00
if limit . GetLimit ( ) . StorageNodeId == storageNodeID {
return limit
}
}
return nil
}
2020-10-29 16:16:25 +00:00
func ( endpoint * Endpoint ) packStreamID ( ctx context . Context , satStreamID * internalpb . StreamID ) ( streamID storj . StreamID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-07-22 15:45:18 +01:00
2020-10-29 16:16:25 +00:00
signedStreamID , err := SignStreamID ( ctx , endpoint . satellite , satStreamID )
2019-07-24 12:33:23 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
2020-04-08 13:08:57 +01:00
encodedStreamID , err := pb . Marshal ( signedStreamID )
2019-07-24 12:33:23 +01:00
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
streamID , err = storj . StreamIDFromBytes ( encodedStreamID )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-07-24 12:33:23 +01:00
}
return streamID , nil
2019-07-22 15:45:18 +01:00
}
2020-10-29 16:16:25 +00:00
func ( endpoint * Endpoint ) packSegmentID ( ctx context . Context , satSegmentID * internalpb . SegmentID ) ( segmentID storj . SegmentID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-29 16:16:25 +00:00
signedSegmentID , err := SignSegmentID ( ctx , endpoint . satellite , satSegmentID )
2019-07-24 12:33:23 +01:00
if err != nil {
return nil , err
}
2020-04-08 13:08:57 +01:00
encodedSegmentID , err := pb . Marshal ( signedSegmentID )
2019-07-24 12:33:23 +01:00
if err != nil {
return nil , err
}
segmentID , err = storj . SegmentIDFromBytes ( encodedSegmentID )
if err != nil {
return nil , err
}
return segmentID , nil
}
2020-10-29 16:16:25 +00:00
func ( endpoint * Endpoint ) unmarshalSatStreamID ( ctx context . Context , streamID storj . StreamID ) ( _ * internalpb . StreamID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-29 16:16:25 +00:00
satStreamID := & internalpb . StreamID { }
2020-04-08 13:08:57 +01:00
err = pb . Unmarshal ( streamID , satStreamID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
2020-10-29 16:16:25 +00:00
err = VerifyStreamID ( ctx , endpoint . satellite , satStreamID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
return satStreamID , nil
}
2020-10-30 11:22:16 +00:00
func ( endpoint * Endpoint ) unmarshalSatSegmentID ( ctx context . Context , segmentID storj . SegmentID ) ( _ * internalpb . SegmentID , err error ) {
2019-07-24 12:33:23 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-10-30 11:22:16 +00:00
satSegmentID := & internalpb . SegmentID { }
2020-04-08 13:08:57 +01:00
err = pb . Unmarshal ( segmentID , satSegmentID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
2019-11-15 15:44:23 +00:00
if satSegmentID . StreamId == nil {
return nil , errs . New ( "stream ID missing" )
}
2019-07-22 15:45:18 +01:00
2020-10-30 11:22:16 +00:00
err = VerifySegmentID ( ctx , endpoint . satellite , satSegmentID )
2019-07-22 15:45:18 +01:00
if err != nil {
return nil , err
}
if satSegmentID . CreationDate . Before ( time . Now ( ) . Add ( - satIDExpiration ) ) {
return nil , errs . New ( "segment ID expired" )
}
return satSegmentID , nil
}
2019-12-11 17:44:13 +00:00
2020-12-03 18:04:01 +00:00
// DeleteCommittedObject deletes all the pieces of the storage nodes that belongs
2019-12-11 17:44:13 +00:00
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2020-12-03 18:04:01 +00:00
func ( endpoint * Endpoint ) DeleteCommittedObject (
2020-12-08 08:08:02 +00:00
ctx context . Context , projectID uuid . UUID , bucket string , object metabase . ObjectKey ,
2020-11-03 12:58:27 +00:00
) ( deletedObjects [ ] * pb . Object , err error ) {
2020-12-08 08:08:02 +00:00
defer mon . Task ( ) ( & ctx , projectID . String ( ) , bucket , object ) ( & err )
2019-12-11 17:44:13 +00:00
2020-11-03 12:58:27 +00:00
req := metabase . ObjectLocation {
2020-09-04 08:46:53 +01:00
ProjectID : projectID ,
2020-12-08 08:08:02 +00:00
BucketName : bucket ,
ObjectKey : object ,
2020-07-27 21:12:14 +01:00
}
2020-08-06 02:23:45 +01:00
2020-12-03 18:04:01 +00:00
result , err := endpoint . metainfo . metabaseDB . DeleteObjectsAllVersions ( ctx , metabase . DeleteObjectsAllVersions { Locations : [ ] metabase . ObjectLocation { req } } )
if err != nil {
return nil , err
}
deletedObjects , err = endpoint . deleteObjectsPieces ( ctx , result )
2020-07-27 21:12:14 +01:00
if err != nil {
endpoint . log . Error ( "failed to delete pointers" ,
2020-12-08 08:08:02 +00:00
zap . Stringer ( "project" , projectID ) ,
zap . String ( "bucket" , bucket ) ,
zap . Binary ( "object" , [ ] byte ( object ) ) ,
2020-07-27 21:12:14 +01:00
zap . Error ( err ) ,
)
2020-11-03 12:58:27 +00:00
return deletedObjects , err
2019-12-11 17:44:13 +00:00
}
2020-11-03 12:58:27 +00:00
return deletedObjects , nil
2020-08-06 02:23:45 +01:00
}
2021-01-11 10:08:18 +00:00
// DeleteObjectAnyStatus deletes all the pieces of the storage nodes that belongs
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
func ( endpoint * Endpoint ) DeleteObjectAnyStatus ( ctx context . Context , location metabase . ObjectLocation ,
) ( deletedObjects [ ] * pb . Object , err error ) {
defer mon . Task ( ) ( & ctx , location . ProjectID . String ( ) , location . BucketName , location . ObjectKey ) ( & err )
result , err := endpoint . metainfo . metabaseDB . DeleteObjectAnyStatusAllVersions ( ctx , metabase . DeleteObjectAnyStatusAllVersions {
ObjectLocation : location ,
} )
if err != nil {
return nil , err
}
deletedObjects , err = endpoint . deleteObjectsPieces ( ctx , result )
if err != nil {
endpoint . log . Error ( "failed to delete pointers" ,
zap . Stringer ( "project" , location . ProjectID ) ,
zap . String ( "bucket" , location . BucketName ) ,
zap . Binary ( "object" , [ ] byte ( location . ObjectKey ) ) ,
zap . Error ( err ) ,
)
return deletedObjects , err
}
return deletedObjects , nil
}
2020-12-03 18:04:01 +00:00
// DeletePendingObject deletes all the pieces of the storage nodes that belongs
// to the specified pending object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2021-05-04 14:51:40 +01:00
func ( endpoint * Endpoint ) DeletePendingObject ( ctx context . Context , stream metabase . ObjectStream ) ( deletedObjects [ ] * pb . Object , err error ) {
2020-12-03 18:04:01 +00:00
req := metabase . DeletePendingObject {
2021-05-04 14:51:40 +01:00
ObjectStream : stream ,
2020-12-03 18:04:01 +00:00
}
result , err := endpoint . metainfo . metabaseDB . DeletePendingObject ( ctx , req )
2020-08-06 02:23:45 +01:00
if err != nil {
2020-11-03 12:58:27 +00:00
return nil , err
2020-08-06 02:23:45 +01:00
}
2020-12-03 18:04:01 +00:00
return endpoint . deleteObjectsPieces ( ctx , result )
}
func ( endpoint * Endpoint ) deleteObjectsPieces ( ctx context . Context , result metabase . DeleteObjectResult ) ( deletedObjects [ ] * pb . Object , err error ) {
// We should ignore client cancelling and always try to delete segments.
ctx = context2 . WithoutCancellation ( ctx )
2020-11-03 12:58:27 +00:00
deletedObjects = make ( [ ] * pb . Object , len ( result . Objects ) )
for i , object := range result . Objects {
2021-02-16 15:36:09 +00:00
deletedObject , err := endpoint . objectToProto ( ctx , object , endpoint . defaultRS )
2020-12-02 11:34:41 +00:00
if err != nil {
return nil , err
}
deletedObjects [ i ] = deletedObject
2020-11-03 12:58:27 +00:00
}
2019-12-16 19:03:20 +00:00
2020-12-09 12:24:37 +00:00
endpoint . deleteSegmentPieces ( ctx , result . Segments )
return deletedObjects , nil
}
func ( endpoint * Endpoint ) deleteSegmentPieces ( ctx context . Context , segments [ ] metabase . DeletedSegmentInfo ) {
nodesPieces := groupPiecesByNodeID ( segments )
2019-12-16 19:03:20 +00:00
2020-11-03 12:58:27 +00:00
var requests [ ] piecedeletion . Request
for node , pieces := range nodesPieces {
requests = append ( requests , piecedeletion . Request {
Node : storj . NodeURL {
ID : node ,
} ,
Pieces : pieces ,
} )
2020-08-06 02:23:45 +01:00
}
2019-12-16 19:03:20 +00:00
2020-11-03 12:58:27 +00:00
// Only return an error if we failed to delete the objects. If we failed
// to delete pieces, let garbage collector take care of it.
2020-08-06 02:23:45 +01:00
if err := endpoint . deletePieces . Delete ( ctx , requests , deleteObjectPiecesSuccessThreshold ) ; err != nil {
endpoint . log . Error ( "failed to delete pieces" , zap . Error ( err ) )
2019-12-16 19:03:20 +00:00
}
2020-11-03 12:58:27 +00:00
}
2021-02-16 15:36:09 +00:00
func ( endpoint * Endpoint ) objectToProto ( ctx context . Context , object metabase . Object , rs * pb . RedundancyScheme ) ( * pb . Object , error ) {
2020-12-02 11:34:41 +00:00
expires := time . Time { }
if object . ExpiresAt != nil {
expires = * object . ExpiresAt
}
2021-01-14 11:47:29 +00:00
// TotalPlainSize != 0 means object was uploaded with newer uplink
multipartObject := object . TotalPlainSize != 0 && object . FixedSegmentSize <= 0
2020-12-02 11:34:41 +00:00
streamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-01-07 08:46:49 +00:00
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedPath : [ ] byte ( object . ObjectKey ) ,
Version : int32 ( object . Version ) , // TODO incomatible types
2021-01-12 11:29:13 +00:00
CreationDate : object . CreatedAt ,
ExpirationDate : expires ,
2021-01-07 08:46:49 +00:00
StreamId : object . StreamID [ : ] ,
2021-01-14 11:47:29 +00:00
MultipartObject : multipartObject ,
2021-02-16 15:36:09 +00:00
Redundancy : rs ,
2021-03-24 09:33:56 +00:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( object . Encryption . CipherSuite ) ,
BlockSize : int64 ( object . Encryption . BlockSize ) ,
} ,
2020-12-02 11:34:41 +00:00
} )
if err != nil {
return nil , err
}
var nonce storj . Nonce
if len ( object . EncryptedMetadataNonce ) > 0 {
nonce , err = storj . NonceFromBytes ( object . EncryptedMetadataNonce )
if err != nil {
return nil , err
}
}
streamMeta := & pb . StreamMeta { }
err = pb . Unmarshal ( object . EncryptedMetadata , streamMeta )
if err != nil {
return nil , err
}
// TODO is this enough to handle old uplinks
if streamMeta . EncryptionBlockSize == 0 {
streamMeta . EncryptionBlockSize = object . Encryption . BlockSize
}
if streamMeta . EncryptionType == 0 {
streamMeta . EncryptionType = int32 ( object . Encryption . CipherSuite )
}
if streamMeta . NumberOfSegments == 0 {
streamMeta . NumberOfSegments = int64 ( object . SegmentCount )
}
if streamMeta . LastSegmentMeta == nil {
streamMeta . LastSegmentMeta = & pb . SegmentMeta {
EncryptedKey : object . EncryptedMetadataEncryptedKey ,
KeyNonce : object . EncryptedMetadataNonce ,
}
}
metadataBytes , err := pb . Marshal ( streamMeta )
if err != nil {
return nil , err
}
2020-11-03 12:58:27 +00:00
result := & pb . Object {
2020-12-02 11:34:41 +00:00
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedPath : [ ] byte ( object . ObjectKey ) ,
Version : int32 ( object . Version ) , // TODO incomatible types
StreamId : streamID ,
ExpiresAt : expires ,
CreatedAt : object . CreatedAt ,
TotalSize : object . TotalEncryptedSize ,
PlainSize : object . TotalPlainSize ,
EncryptedMetadata : metadataBytes ,
EncryptedMetadataNonce : nonce ,
EncryptedMetadataEncryptedKey : object . EncryptedMetadataEncryptedKey ,
2020-11-03 12:58:27 +00:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( object . Encryption . CipherSuite ) ,
BlockSize : int64 ( object . Encryption . BlockSize ) ,
} ,
2020-12-02 11:34:41 +00:00
2021-02-16 15:36:09 +00:00
RedundancyScheme : rs ,
2020-11-03 12:58:27 +00:00
}
2020-12-02 11:34:41 +00:00
return result , nil
}
2021-08-02 19:30:02 +01:00
func ( endpoint * Endpoint ) objectEntryToProtoListItem ( ctx context . Context , bucket [ ] byte , entry metabase . ObjectEntry , prefixToPrependInSatStreamID metabase . ObjectKey , includeMetadata bool ) ( item * pb . ObjectListItem , err error ) {
2020-12-02 11:34:41 +00:00
expires := time . Time { }
if entry . ExpiresAt != nil {
expires = * entry . ExpiresAt
}
2021-08-02 19:30:02 +01:00
item = & pb . ObjectListItem {
EncryptedPath : [ ] byte ( entry . ObjectKey ) ,
Version : int32 ( entry . Version ) , // TODO incompatible types
Status : pb . Object_Status ( entry . Status ) ,
ExpiresAt : expires ,
CreatedAt : entry . CreatedAt ,
PlainSize : entry . TotalPlainSize ,
}
if includeMetadata {
var nonce storj . Nonce
if len ( entry . EncryptedMetadataNonce ) > 0 {
nonce , err = storj . NonceFromBytes ( entry . EncryptedMetadataNonce )
if err != nil {
return nil , err
}
}
streamMeta := & pb . StreamMeta { }
err = pb . Unmarshal ( entry . EncryptedMetadata , streamMeta )
2020-12-02 11:34:41 +00:00
if err != nil {
return nil , err
}
2021-08-02 19:30:02 +01:00
// TODO is this enough to handle old uplinks
if streamMeta . EncryptionBlockSize == 0 {
streamMeta . EncryptionBlockSize = entry . Encryption . BlockSize
}
if streamMeta . EncryptionType == 0 {
streamMeta . EncryptionType = int32 ( entry . Encryption . CipherSuite )
}
if streamMeta . NumberOfSegments == 0 {
streamMeta . NumberOfSegments = int64 ( entry . SegmentCount )
}
if streamMeta . LastSegmentMeta == nil {
streamMeta . LastSegmentMeta = & pb . SegmentMeta {
EncryptedKey : entry . EncryptedMetadataEncryptedKey ,
KeyNonce : entry . EncryptedMetadataNonce ,
}
2020-12-02 11:34:41 +00:00
}
2020-11-03 12:58:27 +00:00
2021-08-02 19:30:02 +01:00
metadataBytes , err := pb . Marshal ( streamMeta )
if err != nil {
return nil , err
}
2020-12-02 11:34:41 +00:00
2021-08-02 19:30:02 +01:00
item . EncryptedMetadata = metadataBytes
item . EncryptedMetadataNonce = nonce
2020-12-02 11:34:41 +00:00
}
// Add Stream ID to list items if listing is for pending objects.
// The client requires the Stream ID to use in the MultipartInfo.
if entry . Status == metabase . Pending {
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-01-12 11:29:13 +00:00
Bucket : bucket ,
2021-02-19 14:32:55 +00:00
EncryptedPath : append ( [ ] byte ( prefixToPrependInSatStreamID ) , item . EncryptedPath ... ) ,
2021-01-12 11:29:13 +00:00
Version : item . Version ,
CreationDate : item . CreatedAt ,
ExpirationDate : item . ExpiresAt ,
StreamId : entry . StreamID [ : ] ,
MultipartObject : entry . FixedSegmentSize <= 0 ,
2020-12-02 11:34:41 +00:00
// TODO: defaultRS may change while the upload is pending.
// Ideally, we should remove redundancy from satStreamID.
Redundancy : endpoint . defaultRS ,
2021-03-24 09:33:56 +00:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( entry . Encryption . CipherSuite ) ,
BlockSize : int64 ( entry . Encryption . BlockSize ) ,
} ,
2020-12-02 11:34:41 +00:00
} )
if err != nil {
return nil , err
}
item . StreamId = & satStreamID
}
2020-11-03 12:58:27 +00:00
2020-12-02 11:34:41 +00:00
return item , nil
2020-11-03 12:58:27 +00:00
}
// groupPiecesByNodeID returns a map that contains pieces with node id as the key.
func groupPiecesByNodeID ( segments [ ] metabase . DeletedSegmentInfo ) map [ storj . NodeID ] [ ] storj . PieceID {
piecesToDelete := map [ storj . NodeID ] [ ] storj . PieceID { }
for _ , segment := range segments {
for _ , piece := range segment . Pieces {
pieceID := segment . RootPieceID . Derive ( piece . StorageNode , int32 ( piece . Number ) )
piecesToDelete [ piece . StorageNode ] = append ( piecesToDelete [ piece . StorageNode ] , pieceID )
}
}
return piecesToDelete
2020-01-17 18:47:37 +00:00
}
2020-01-28 13:44:47 +00:00
2020-06-10 15:10:44 +01:00
// RevokeAPIKey handles requests to revoke an api key.
func ( endpoint * Endpoint ) RevokeAPIKey ( ctx context . Context , req * pb . RevokeAPIKeyRequest ) ( resp * pb . RevokeAPIKeyResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-02-09 22:40:23 +00:00
err = endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
if err != nil {
endpoint . log . Warn ( "unable to collect uplink version" , zap . Error ( err ) )
}
2020-06-10 15:10:44 +01:00
macToRevoke , err := macaroon . ParseMacaroon ( req . GetApiKey ( ) )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "API key to revoke is not a macaroon" )
}
keyInfo , err := endpoint . validateRevoke ( ctx , req . Header , macToRevoke )
if err != nil {
return nil , err
}
err = endpoint . revocations . Revoke ( ctx , macToRevoke . Tail ( ) , keyInfo . ID [ : ] )
if err != nil {
endpoint . log . Error ( "Failed to revoke API key" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , "Failed to revoke API key" )
}
return & pb . RevokeAPIKeyResponse { } , nil
2020-06-16 14:03:02 +01:00
}
2020-09-03 14:54:56 +01:00
2020-12-22 11:12:07 +00:00
func ( endpoint * Endpoint ) checkExceedsStorageUsage ( ctx context . Context , projectID uuid . UUID ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
exceeded , limit , err := endpoint . projectUsage . ExceedsStorageUsage ( ctx , projectID )
if err != nil {
endpoint . log . Error (
"Retrieving project storage totals failed; storage usage limit won't be enforced" ,
zap . Error ( err ) ,
)
} else if exceeded {
endpoint . log . Error ( "Monthly storage limit exceeded" ,
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , projectID ) ,
)
return rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
}
return nil
}
2020-09-03 14:54:56 +01:00
// CreatePath creates a segment key.
2020-12-14 10:11:28 +00:00
func CreatePath ( ctx context . Context , projectID uuid . UUID , segmentIndex uint32 , bucket , path [ ] byte ) ( _ metabase . SegmentLocation , err error ) {
2020-09-03 14:54:56 +01:00
// TODO rename to CreateLocation
defer mon . Task ( ) ( & ctx ) ( & err )
return metabase . SegmentLocation {
ProjectID : projectID ,
BucketName : string ( bucket ) ,
2020-12-14 10:11:28 +00:00
Position : metabase . SegmentPosition { Index : segmentIndex } ,
2020-09-03 14:54:56 +01:00
ObjectKey : metabase . ObjectKey ( path ) ,
} , nil
}