2019-03-18 10:55:06 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
2021-10-27 09:50:27 +01:00
"bytes"
2019-03-18 10:55:06 +00:00
"context"
2019-10-17 19:01:40 +01:00
"fmt"
2019-04-02 19:21:18 +01:00
"time"
2019-03-18 10:55:06 +00:00
2023-04-18 23:15:00 +01:00
"github.com/jtolio/eventkit"
2023-04-03 09:55:11 +01:00
"github.com/spacemonkeygo/monkit/v3"
2019-03-18 10:55:06 +00:00
"go.uber.org/zap"
2020-08-11 14:00:57 +01:00
"storj.io/common/context2"
2020-04-09 09:19:16 +01:00
"storj.io/common/encryption"
2021-11-23 17:50:29 +00:00
"storj.io/common/errs2"
2020-05-29 14:31:26 +01:00
"storj.io/common/macaroon"
2019-12-27 11:48:47 +00:00
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/storj"
2020-03-30 10:08:50 +01:00
"storj.io/common/uuid"
2023-04-13 13:04:07 +01:00
"storj.io/storj/satellite/buckets"
2020-10-29 16:16:25 +00:00
"storj.io/storj/satellite/internalpb"
2021-04-21 13:42:57 +01:00
"storj.io/storj/satellite/metabase"
2020-03-12 07:03:46 +00:00
"storj.io/storj/satellite/metainfo/piecedeletion"
2019-03-27 10:24:35 +00:00
"storj.io/storj/satellite/orders"
2019-03-18 10:55:06 +00:00
)
2020-06-30 22:49:29 +01:00
// BeginObject begins object.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) BeginObject ( ctx context . Context , req * pb . ObjectBeginRequest ) ( resp * pb . ObjectBeginResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-02-09 22:40:23 +00:00
2021-08-31 17:15:43 +01:00
now := time . Now ( )
var canDelete bool
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2021-08-31 17:15:43 +01:00
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2021-08-31 17:15:43 +01:00
Time : now ,
} ,
actionPermitted : & canDelete ,
optional : true ,
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2019-07-16 11:39:23 +01:00
2020-01-03 09:27:10 +00:00
if ! req . ExpiresAt . IsZero ( ) && ! req . ExpiresAt . After ( time . Now ( ) ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "Invalid expiration time" )
}
2020-06-15 12:49:09 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2020-03-16 08:55:52 +00:00
}
2022-11-23 12:15:52 +00:00
objectKeyLength := len ( req . EncryptedObjectKey )
2022-01-13 09:57:31 +00:00
if objectKeyLength > endpoint . config . MaxEncryptedObjectKeyLength {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , fmt . Sprintf ( "key length is too big, got %v, maximum allowed is %v" , objectKeyLength , endpoint . config . MaxEncryptedObjectKeyLength ) )
}
2021-12-14 13:49:33 +00:00
2022-01-13 09:57:31 +00:00
err = endpoint . checkUploadLimits ( ctx , keyInfo . ProjectID )
if err != nil {
return nil , err
2021-12-14 13:49:33 +00:00
}
2023-04-17 18:43:48 +01:00
if err := endpoint . checkObjectUploadRate ( ctx , keyInfo . ProjectID , req . Bucket , req . EncryptedObjectKey ) ; err != nil {
2023-03-29 16:12:16 +01:00
return nil , err
}
2020-03-16 08:55:52 +00:00
// TODO this needs to be optimized to avoid DB call on each request
2021-10-27 09:50:27 +01:00
placement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
2020-03-16 08:55:52 +00:00
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
2020-03-16 08:55:52 +00:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-06-02 15:07:32 +01:00
if err := endpoint . ensureAttribution ( ctx , req . Header , keyInfo , req . Bucket , nil ) ; err != nil {
2020-11-06 11:54:52 +00:00
return nil , err
}
streamID , err := uuid . New ( )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-07-28 13:41:00 +01:00
// TODO this will work only with newest uplink
2022-09-08 08:54:30 +01:00
// figure out what to do with this
2020-11-06 11:54:52 +00:00
encryptionParameters := storj . EncryptionParameters {
CipherSuite : storj . CipherSuite ( req . EncryptionParameters . CipherSuite ) ,
BlockSize : int32 ( req . EncryptionParameters . BlockSize ) , // TODO check conversion
}
2020-11-30 12:33:06 +00:00
var expiresAt * time . Time
if req . ExpiresAt . IsZero ( ) {
expiresAt = nil
} else {
expiresAt = & req . ExpiresAt
}
2022-04-15 10:43:19 +01:00
var nonce [ ] byte
if ! req . EncryptedMetadataNonce . IsZero ( ) {
nonce = req . EncryptedMetadataNonce [ : ]
}
2023-03-15 12:26:10 +00:00
object , err := endpoint . metabase . BeginObjectNextVersion ( ctx , metabase . BeginObjectNextVersion {
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
StreamID : streamID ,
Version : metabase . NextVersion ,
} ,
ExpiresAt : expiresAt ,
Encryption : encryptionParameters ,
2022-09-21 09:10:06 +01:00
2023-03-15 12:26:10 +00:00
EncryptedMetadata : req . EncryptedMetadata ,
EncryptedMetadataEncryptedKey : req . EncryptedMetadataEncryptedKey ,
EncryptedMetadataNonce : nonce ,
} )
2020-11-06 11:54:52 +00:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-11-06 11:54:52 +00:00
}
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2022-09-21 09:10:06 +01:00
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedObjectKey : [ ] byte ( object . ObjectKey ) ,
2022-05-17 14:50:12 +01:00
Version : int64 ( object . Version ) ,
2021-03-24 09:33:56 +00:00
CreationDate : object . CreatedAt ,
2022-09-21 09:10:06 +01:00
ExpirationDate : req . ExpiresAt , // TODO make ExpirationDate nullable
StreamId : object . StreamID [ : ] ,
2021-03-24 09:33:56 +00:00
MultipartObject : object . FixedSegmentSize <= 0 ,
EncryptionParameters : req . EncryptionParameters ,
2021-10-27 09:50:27 +01:00
Placement : int32 ( placement ) ,
2020-11-06 11:54:52 +00:00
} )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-11-06 11:54:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object Upload" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "put" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_put_object" ) . Mark ( 1 )
2020-01-20 18:48:26 +00:00
2019-07-16 11:39:23 +01:00
return & pb . ObjectBeginResponse {
2022-11-23 12:15:52 +00:00
Bucket : req . Bucket ,
EncryptedObjectKey : req . EncryptedObjectKey ,
Version : req . Version ,
StreamId : satStreamID ,
RedundancyScheme : endpoint . defaultRS ,
2019-07-16 11:39:23 +01:00
} , nil
}
2020-06-30 22:49:29 +01:00
// CommitObject commits an object when all its segments have already been committed.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) CommitObject ( ctx context . Context , req * pb . ObjectCommitRequest ) ( resp * pb . ObjectCommitResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-02-09 22:40:23 +00:00
2020-10-30 11:22:16 +00:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2022-09-21 09:10:06 +01:00
now := time . Now ( )
var allowDelete bool
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedObjectKey ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : streamID . Bucket ,
EncryptedPath : streamID . EncryptedObjectKey ,
Time : now ,
} ,
actionPermitted : & allowDelete ,
optional : true ,
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2023-04-18 23:15:00 +01:00
var committedObject * metabase . Object
defer func ( ) {
var tags [ ] eventkit . Tag
if committedObject != nil {
tags = [ ] eventkit . Tag {
eventkit . Bool ( "expires" , committedObject . ExpiresAt != nil ) ,
eventkit . Int64 ( "segment_count" , int64 ( committedObject . SegmentCount ) ) ,
eventkit . Int64 ( "total_plain_size" , committedObject . TotalPlainSize ) ,
eventkit . Int64 ( "total_encrypted_size" , committedObject . TotalEncryptedSize ) ,
eventkit . Int64 ( "fixed_segment_size" , int64 ( committedObject . FixedSegmentSize ) ) ,
}
}
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) , tags ... )
} ( )
2019-07-16 11:39:23 +01:00
2020-11-06 11:54:52 +00:00
id , err := uuid . FromBytes ( streamID . StreamId )
2019-08-01 10:04:31 +01:00
if err != nil {
2020-11-06 11:54:52 +00:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
2019-08-01 10:04:31 +01:00
}
2021-02-10 10:13:14 +00:00
// for old uplinks get Encryption from StreamMeta
streamMeta := & pb . StreamMeta { }
encryption := storj . EncryptionParameters { }
err = pb . Unmarshal ( req . EncryptedMetadata , streamMeta )
2023-04-18 23:15:00 +01:00
if err != nil {
// TODO: what if this is an error we don't expect?
} else {
2021-02-10 10:13:14 +00:00
encryption . CipherSuite = storj . CipherSuite ( streamMeta . EncryptionType )
encryption . BlockSize = streamMeta . EncryptionBlockSize
}
2021-10-29 12:04:55 +01:00
request := metabase . CommitObject {
2020-11-06 11:54:52 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2020-11-06 11:54:52 +00:00
StreamID : id ,
2022-09-21 09:10:06 +01:00
Version : metabase . Version ( streamID . Version ) ,
2020-11-06 11:54:52 +00:00
} ,
2021-02-10 10:13:14 +00:00
Encryption : encryption ,
2022-09-21 09:10:06 +01:00
DisallowDelete : ! allowDelete ,
2022-10-04 16:38:20 +01:00
OnDelete : func ( segments [ ] metabase . DeletedSegmentInfo ) {
endpoint . deleteSegmentPieces ( ctx , segments )
} ,
2021-10-29 12:04:55 +01:00
}
// uplink can send empty metadata with not empty key/nonce
// we need to fix it on uplink side but that part will be
// needed for backward compatibility
if len ( req . EncryptedMetadata ) != 0 {
2022-04-14 12:20:18 +01:00
request . OverrideEncryptedMetadata = true
2021-10-29 12:04:55 +01:00
request . EncryptedMetadata = req . EncryptedMetadata
request . EncryptedMetadataNonce = req . EncryptedMetadataNonce [ : ]
request . EncryptedMetadataEncryptedKey = req . EncryptedMetadataEncryptedKey
// older uplinks might send EncryptedMetadata directly with request but
// key/nonce will be part of StreamMeta
if req . EncryptedMetadataNonce . IsZero ( ) && len ( req . EncryptedMetadataEncryptedKey ) == 0 &&
streamMeta . LastSegmentMeta != nil {
request . EncryptedMetadataNonce = streamMeta . LastSegmentMeta . KeyNonce
request . EncryptedMetadataEncryptedKey = streamMeta . LastSegmentMeta . EncryptedKey
}
}
2022-08-24 08:37:12 +01:00
if err := endpoint . checkEncryptedMetadataSize ( request . EncryptedMetadata , request . EncryptedMetadataEncryptedKey ) ; err != nil {
return nil , err
}
2023-04-18 23:15:00 +01:00
object , err := endpoint . metabase . CommitObject ( ctx , request )
2019-08-01 10:04:31 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-08-01 10:04:31 +01:00
}
2023-04-18 23:15:00 +01:00
committedObject = & object
2019-07-16 11:39:23 +01:00
2023-03-14 12:05:47 +00:00
mon . Meter ( "req_commit_object" ) . Mark ( 1 )
2019-07-16 11:39:23 +01:00
return & pb . ObjectCommitResponse { } , nil
}
2021-02-17 09:54:04 +00:00
// GetObject gets single object metadata.
2019-07-23 12:09:12 +01:00
func ( endpoint * Endpoint ) GetObject ( ctx context . Context , req * pb . ObjectGetRequest ) ( resp * pb . ObjectGetResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-02-09 22:40:23 +00:00
2022-11-02 15:19:45 +00:00
now := time . Now ( )
keyInfo , err := endpoint . validateAuthAny ( ctx , req . Header ,
macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2022-11-02 15:19:45 +00:00
Time : now ,
} ,
macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2022-11-02 15:19:45 +00:00
Time : now ,
} ,
)
2019-07-23 12:09:12 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-23 12:09:12 +01:00
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2019-07-23 12:09:12 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-23 12:09:12 +01:00
}
2022-08-30 11:04:59 +01:00
mbObject , err := endpoint . metabase . GetObjectLastCommitted ( ctx , metabase . GetObjectLastCommitted {
2020-11-06 11:54:52 +00:00
ObjectLocation : metabase . ObjectLocation {
2021-02-17 09:54:04 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
2022-11-23 12:15:52 +00:00
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
2020-11-06 11:54:52 +00:00
} ,
} )
2019-07-23 12:09:12 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-07-23 12:09:12 +01:00
}
2023-04-18 23:15:00 +01:00
{
tags := [ ] eventkit . Tag {
eventkit . Bool ( "expires" , mbObject . ExpiresAt != nil ) ,
eventkit . Int64 ( "segment_count" , int64 ( mbObject . SegmentCount ) ) ,
eventkit . Int64 ( "total_plain_size" , mbObject . TotalPlainSize ) ,
eventkit . Int64 ( "total_encrypted_size" , mbObject . TotalEncryptedSize ) ,
eventkit . Int64 ( "fixed_segment_size" , int64 ( mbObject . FixedSegmentSize ) ) ,
}
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) , tags ... )
}
2021-02-17 09:54:04 +00:00
var segmentRS * pb . RedundancyScheme
2022-11-17 07:50:56 +00:00
// TODO this code is triggered only by very old uplink library and we will remove it eventually.
2021-02-17 09:54:04 +00:00
if ! req . RedundancySchemePerSegment && mbObject . SegmentCount > 0 {
segmentRS = endpoint . defaultRS
2021-09-09 16:21:42 +01:00
segment , err := endpoint . metabase . GetSegmentByPosition ( ctx , metabase . GetSegmentByPosition {
2021-02-17 09:54:04 +00:00
StreamID : mbObject . StreamID ,
2021-02-16 15:36:09 +00:00
Position : metabase . SegmentPosition {
Index : 0 ,
} ,
} )
if err != nil {
2022-11-17 07:50:56 +00:00
// add user agent to log entry to figure out tool that is using old uplink
userAgent := "unknown"
if req . Header != nil && len ( req . Header . UserAgent ) != 0 {
userAgent = string ( req . Header . UserAgent )
}
2021-02-16 15:36:09 +00:00
// don't fail because its possible that its multipart object
2022-11-17 07:50:56 +00:00
endpoint . log . Warn ( "unable to get segment metadata to get object redundancy" ,
zap . Stringer ( "StreamID" , mbObject . StreamID ) ,
zap . Stringer ( "ProjectID" , keyInfo . ProjectID ) ,
zap . String ( "User Agent" , userAgent ) ,
zap . Error ( err ) ,
)
2021-02-16 15:36:09 +00:00
} else {
2021-02-17 09:54:04 +00:00
segmentRS = & pb . RedundancyScheme {
2021-02-16 15:36:09 +00:00
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
}
}
2021-02-17 09:54:04 +00:00
// monitor how many uplinks is still using this additional code
mon . Meter ( "req_get_object_rs_per_object" ) . Mark ( 1 )
2021-02-16 15:36:09 +00:00
}
2021-02-17 09:54:04 +00:00
object , err := endpoint . objectToProto ( ctx , mbObject , segmentRS )
2020-11-17 14:09:04 +00:00
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
satellite/metainfo: Homogenize object operations logs
We log metainfo object operations and it looks that the log's message
convention is `Object {operation}`, however the `Object Download`
operation didn't match with the actual operation and the one that was
representing it had was `Download Object`.
This commit changes the log's message for the download object operation
according to the other object operations log messages format and fixes
the log message for the Get Object operation.
For finding this I executed the following command at the root of the
repository to obtain the list of lines where we log object operations.
$> ag 'log\.Info\(".*Object.*",' --no-color git:(main)
satellite/metainfo/endpoint_object.go
179: endpoint.log.Info("Object Upload", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "put"), zap.String("type", "object"))
336: endpoint.log.Info("Object Download", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "get"), zap.String("type", "object"))
557: endpoint.log.Info("Download Object", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "download"), zap.String("type", "object"))
791: endpoint.log.Info("Object List", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "list"), zap.String("type", "object"))
979: endpoint.log.Info("Object Delete", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "delete"), zap.String("type", "object"))
`ag` is a command-line tool similar to `grep`
Change-Id: I9072c5967eb42c397a2c64761d843675dd4991ec
2022-08-05 18:41:14 +01:00
endpoint . log . Info ( "Object Get" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "object" ) )
2021-02-17 09:54:04 +00:00
mon . Meter ( "req_get_object" ) . Mark ( 1 )
return & pb . ObjectGetResponse { Object : object } , nil
2019-07-23 12:09:12 +01:00
}
2021-03-31 12:08:22 +01:00
// DownloadObject gets object information, creates a download for segments and lists the object segments.
func ( endpoint * Endpoint ) DownloadObject ( ctx context . Context , req * pb . ObjectDownloadRequest ) ( resp * pb . ObjectDownloadResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2021-11-23 17:50:29 +00:00
if ctx . Err ( ) != nil {
return nil , rpcstatus . Error ( rpcstatus . Canceled , "client has closed the connection" )
}
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-03-31 12:08:22 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
if exceeded , limit , err := endpoint . projectUsage . ExceedsBandwidthUsage ( ctx , keyInfo . ProjectID ) ; err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2021-11-11 20:04:48 +00:00
endpoint . log . Error (
"Retrieving project bandwidth total failed; bandwidth limit won't be enforced" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2021-03-31 12:08:22 +01:00
} else if exceeded {
2021-11-11 12:50:14 +00:00
endpoint . log . Warn ( "Monthly bandwidth limit exceeded" ,
2021-03-31 12:08:22 +01:00
zap . Stringer ( "Limit" , limit ) ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
)
return nil , rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Usage Limit" )
}
// get the object information
2022-08-30 11:04:59 +01:00
object , err := endpoint . metabase . GetObjectLastCommitted ( ctx , metabase . GetObjectLastCommitted {
2021-03-31 12:08:22 +01:00
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
} ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-03-31 12:08:22 +01:00
}
// get the range segments
streamRange , err := calculateStreamRange ( object , req . Range )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2023-04-18 23:15:00 +01:00
{
tags := [ ] eventkit . Tag {
eventkit . Bool ( "expires" , object . ExpiresAt != nil ) ,
eventkit . Int64 ( "segment_count" , int64 ( object . SegmentCount ) ) ,
eventkit . Int64 ( "total_plain_size" , object . TotalPlainSize ) ,
eventkit . Int64 ( "total_encrypted_size" , object . TotalEncryptedSize ) ,
eventkit . Int64 ( "fixed_segment_size" , int64 ( object . FixedSegmentSize ) ) ,
}
if streamRange != nil {
tags = append ( tags ,
eventkit . Int64 ( "range_start" , streamRange . PlainStart ) ,
eventkit . Int64 ( "range_end" , streamRange . PlainLimit ) )
}
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) , tags ... )
}
2023-03-23 20:04:58 +00:00
segments , err := endpoint . metabase . ListSegments ( ctx , metabase . ListSegments {
2021-03-31 12:08:22 +01:00
StreamID : object . StreamID ,
Range : streamRange ,
Limit : int ( req . Limit ) ,
2023-03-23 20:04:58 +00:00
UpdateFirstWithAncestor : true ,
2021-03-31 12:08:22 +01:00
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-03-31 12:08:22 +01:00
}
// get the download response for the first segment
2021-04-07 07:55:01 +01:00
downloadSegments , err := func ( ) ( [ ] * pb . SegmentDownloadResponse , error ) {
2021-03-31 12:08:22 +01:00
if len ( segments . Segments ) == 0 {
return nil , nil
}
2021-04-09 09:24:18 +01:00
if object . IsMigrated ( ) && streamRange != nil && streamRange . PlainStart > 0 {
return nil , nil
}
2021-03-31 12:08:22 +01:00
2023-03-23 20:04:58 +00:00
segment := segments . Segments [ 0 ]
2021-05-13 14:31:55 +01:00
downloadSizes := endpoint . calculateDownloadSizes ( streamRange , segment , object . Encryption )
2021-04-07 12:17:59 +01:00
// Update the current bandwidth cache value incrementing the SegmentSize.
2021-05-13 14:31:55 +01:00
err = endpoint . projectUsage . UpdateProjectBandwidthUsage ( ctx , keyInfo . ProjectID , downloadSizes . encryptedSize )
2021-04-07 12:17:59 +01:00
if err != nil {
2021-11-23 17:50:29 +00:00
if errs2 . IsCanceled ( err ) {
return nil , rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2021-04-07 12:17:59 +01:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth limits.
2021-11-11 20:04:48 +00:00
endpoint . log . Error (
2021-11-23 17:50:29 +00:00
"Could not track the new project's bandwidth usage when downloading an object" ,
2021-11-11 20:04:48 +00:00
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Error ( err ) ,
)
2021-04-07 12:17:59 +01:00
}
2021-10-12 14:37:12 +01:00
encryptedKeyNonce , err := storj . NonceFromBytes ( segment . EncryptedKeyNonce )
if err != nil {
endpoint . log . Error ( "unable to get encryption key nonce from metadata" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-03-31 12:08:22 +01:00
if segment . Inline ( ) {
2021-05-13 14:31:55 +01:00
err := endpoint . orders . UpdateGetInlineOrder ( ctx , object . Location ( ) . Bucket ( ) , downloadSizes . plainSize )
2021-03-31 12:08:22 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-03-31 12:08:22 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-05-13 15:15:42 +01:00
// TODO we may think about fallback to encrypted size
// as plain size may be empty for old objects
downloaded := segment . PlainSize
if streamRange != nil {
downloaded = int32 ( streamRange . PlainLimit )
}
endpoint . versionCollector . collectTransferStats ( req . Header . UserAgent , download , int ( downloaded ) )
2021-03-31 12:08:22 +01:00
endpoint . log . Info ( "Inline Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "inline" ) )
mon . Meter ( "req_get_inline" ) . Mark ( 1 )
2021-04-07 07:55:01 +01:00
return [ ] * pb . SegmentDownloadResponse { {
2021-03-31 12:08:22 +01:00
PlainOffset : segment . PlainOffset ,
PlainSize : int64 ( segment . PlainSize ) ,
SegmentSize : int64 ( segment . EncryptedSize ) ,
EncryptedInlineData : segment . InlineData ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2021-03-31 12:08:22 +01:00
EncryptedKey : segment . EncryptedKey ,
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2021-04-07 07:55:01 +01:00
} } , nil
2021-03-31 12:08:22 +01:00
}
2023-01-24 10:22:27 +00:00
limits , privateKey , err := endpoint . orders . CreateGetOrderLimits ( ctx , object . Location ( ) . Bucket ( ) , segment , req . GetDesiredNodes ( ) , downloadSizes . orderLimit )
2021-03-31 12:08:22 +01:00
if err != nil {
if orders . ErrDownloadFailedNotEnoughPieces . Has ( err ) {
endpoint . log . Error ( "Unable to create order limits." ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . Stringer ( "API Key ID" , keyInfo . ID ) ,
zap . Error ( err ) ,
)
}
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-03-31 12:08:22 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-05-13 15:15:42 +01:00
// TODO we may think about fallback to encrypted size
// as plain size may be empty for old objects
downloaded := segment . PlainSize
if streamRange != nil {
downloaded = int32 ( streamRange . PlainLimit )
}
endpoint . versionCollector . collectTransferStats ( req . Header . UserAgent , download , int ( downloaded ) )
2021-03-31 12:08:22 +01:00
endpoint . log . Info ( "Segment Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "get" ) , zap . String ( "type" , "remote" ) )
mon . Meter ( "req_get_remote" ) . Mark ( 1 )
2021-04-07 07:55:01 +01:00
return [ ] * pb . SegmentDownloadResponse { {
2021-03-31 12:08:22 +01:00
AddressedLimits : limits ,
PrivateKey : privateKey ,
PlainOffset : segment . PlainOffset ,
PlainSize : int64 ( segment . PlainSize ) ,
SegmentSize : int64 ( segment . EncryptedSize ) ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : encryptedKeyNonce ,
2021-03-31 12:08:22 +01:00
EncryptedKey : segment . EncryptedKey ,
RedundancyScheme : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_SchemeType ( segment . Redundancy . Algorithm ) ,
ErasureShareSize : segment . Redundancy . ShareSize ,
MinReq : int32 ( segment . Redundancy . RequiredShares ) ,
RepairThreshold : int32 ( segment . Redundancy . RepairShares ) ,
SuccessThreshold : int32 ( segment . Redundancy . OptimalShares ) ,
Total : int32 ( segment . Redundancy . TotalShares ) ,
} ,
Position : & pb . SegmentPosition {
PartNumber : int32 ( segment . Position . Part ) ,
Index : int32 ( segment . Position . Index ) ,
} ,
2021-04-07 07:55:01 +01:00
} } , nil
2021-03-31 12:08:22 +01:00
} ( )
if err != nil {
return nil , err
}
// convert to response
protoObject , err := endpoint . objectToProto ( ctx , object , nil )
if err != nil {
endpoint . log . Error ( "unable to convert object to proto" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2023-03-23 20:04:58 +00:00
segmentList , err := convertSegmentListResults ( segments )
2021-03-31 12:08:22 +01:00
if err != nil {
endpoint . log . Error ( "unable to convert stream list" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
satellite/metainfo: Homogenize object operations logs
We log metainfo object operations and it looks that the log's message
convention is `Object {operation}`, however the `Object Download`
operation didn't match with the actual operation and the one that was
representing it had was `Download Object`.
This commit changes the log's message for the download object operation
according to the other object operations log messages format and fixes
the log message for the Get Object operation.
For finding this I executed the following command at the root of the
repository to obtain the list of lines where we log object operations.
$> ag 'log\.Info\(".*Object.*",' --no-color git:(main)
satellite/metainfo/endpoint_object.go
179: endpoint.log.Info("Object Upload", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "put"), zap.String("type", "object"))
336: endpoint.log.Info("Object Download", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "get"), zap.String("type", "object"))
557: endpoint.log.Info("Download Object", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "download"), zap.String("type", "object"))
791: endpoint.log.Info("Object List", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "list"), zap.String("type", "object"))
979: endpoint.log.Info("Object Delete", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "delete"), zap.String("type", "object"))
`ag` is a command-line tool similar to `grep`
Change-Id: I9072c5967eb42c397a2c64761d843675dd4991ec
2022-08-05 18:41:14 +01:00
endpoint . log . Info ( "Object Download" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "download" ) , zap . String ( "type" , "object" ) )
2021-03-31 12:08:22 +01:00
mon . Meter ( "req_download_object" ) . Mark ( 1 )
return & pb . ObjectDownloadResponse {
Object : protoObject ,
// The RPC API allows for multiple segment download responses, but for now
// we return only one. This can be changed in the future if it seems useful
// to return more than one on the initial response.
2021-04-07 07:55:01 +01:00
SegmentDownload : downloadSegments ,
2021-03-31 12:08:22 +01:00
// In the case where the client needs the segment list, it will contain
// every segment. In the case where the segment list is not needed,
// segmentListItems will be nil.
SegmentList : segmentList ,
} , nil
}
2023-03-23 20:04:58 +00:00
func convertSegmentListResults ( segments metabase . ListSegmentsResult ) ( * pb . SegmentListResponse , error ) {
items := make ( [ ] * pb . SegmentListItem , len ( segments . Segments ) )
for i , item := range segments . Segments {
items [ i ] = & pb . SegmentListItem {
Position : & pb . SegmentPosition {
PartNumber : int32 ( item . Position . Part ) ,
Index : int32 ( item . Position . Index ) ,
} ,
PlainSize : int64 ( item . PlainSize ) ,
PlainOffset : item . PlainOffset ,
CreatedAt : item . CreatedAt ,
EncryptedETag : item . EncryptedETag ,
EncryptedKey : item . EncryptedKey ,
}
var err error
items [ i ] . EncryptedKeyNonce , err = storj . NonceFromBytes ( item . EncryptedKeyNonce )
if err != nil {
return nil , err
}
}
return & pb . SegmentListResponse {
Items : items ,
More : segments . More ,
} , nil
}
2021-05-13 14:31:55 +01:00
type downloadSizes struct {
// amount of data that uplink eventually gets
plainSize int64
// amount of data that's present after encryption
encryptedSize int64
// amount of data that's read from a storage node
orderLimit int64
}
func ( endpoint * Endpoint ) calculateDownloadSizes ( streamRange * metabase . StreamRange , segment metabase . Segment , encryptionParams storj . EncryptionParameters ) downloadSizes {
if segment . Inline ( ) {
return downloadSizes {
plainSize : int64 ( len ( segment . InlineData ) ) ,
encryptedSize : int64 ( segment . EncryptedSize ) ,
}
}
// calculate the range inside the given segment
readStart := segment . PlainOffset
if streamRange != nil && readStart <= streamRange . PlainStart {
readStart = streamRange . PlainStart
}
readLimit := segment . PlainOffset + int64 ( segment . PlainSize )
if streamRange != nil && streamRange . PlainLimit < readLimit {
readLimit = streamRange . PlainLimit
}
plainSize := readLimit - readStart
// calculate the read range given the segment start
readStart -= segment . PlainOffset
readLimit -= segment . PlainOffset
// align to encryption block size
enc , err := encryption . NewEncrypter ( encryptionParams . CipherSuite , & storj . Key { 1 } , & storj . Nonce { 1 } , int ( encryptionParams . BlockSize ) )
if err != nil {
// We ignore the error and fallback to the max amount to download.
// It's unlikely that we fail here, but if we do, we don't want to block downloading.
endpoint . log . Error ( "unable to create encrypter" , zap . Error ( err ) )
return downloadSizes {
plainSize : int64 ( segment . PlainSize ) ,
encryptedSize : int64 ( segment . EncryptedSize ) ,
orderLimit : 0 ,
}
}
encryptedStartBlock , encryptedLimitBlock := calculateBlocks ( readStart , readLimit , int64 ( enc . InBlockSize ( ) ) )
encryptedStart , encryptedLimit := encryptedStartBlock * int64 ( enc . OutBlockSize ( ) ) , encryptedLimitBlock * int64 ( enc . OutBlockSize ( ) )
encryptedSize := encryptedLimit - encryptedStart
if encryptedSize > int64 ( segment . EncryptedSize ) {
encryptedSize = int64 ( segment . EncryptedSize )
}
// align to blocks
stripeSize := int64 ( segment . Redundancy . StripeSize ( ) )
stripeStart , stripeLimit := alignToBlock ( encryptedStart , encryptedLimit , stripeSize )
// calculate how much shares we need to download from a node
stripeCount := ( stripeLimit - stripeStart ) / stripeSize
orderLimit := stripeCount * int64 ( segment . Redundancy . ShareSize )
return downloadSizes {
plainSize : plainSize ,
encryptedSize : encryptedSize ,
orderLimit : orderLimit ,
}
}
func calculateBlocks ( start , limit int64 , blockSize int64 ) ( startBlock , limitBlock int64 ) {
return start / blockSize , ( limit + blockSize - 1 ) / blockSize
}
func alignToBlock ( start , limit int64 , blockSize int64 ) ( alignedStart , alignedLimit int64 ) {
return ( start / blockSize ) * blockSize , ( ( limit + blockSize - 1 ) / blockSize ) * blockSize
}
2021-03-31 12:08:22 +01:00
func calculateStreamRange ( object metabase . Object , req * pb . Range ) ( * metabase . StreamRange , error ) {
if req == nil || req . Range == nil {
2023-04-03 09:55:11 +01:00
mon . Event ( "download_range" , monkit . NewSeriesTag ( "type" , "empty" ) )
2021-03-31 12:08:22 +01:00
return nil , nil
}
2021-04-09 09:24:18 +01:00
if object . IsMigrated ( ) {
// The object is in old format, which does not have plain_offset specified.
// We need to fallback to returning all segments.
2021-03-31 12:08:22 +01:00
return nil , nil
}
switch r := req . Range . ( type ) {
case * pb . Range_Start :
if r . Start == nil {
return nil , Error . New ( "Start missing for Range_Start" )
}
2023-04-03 09:55:11 +01:00
mon . Event ( "download_range" , monkit . NewSeriesTag ( "type" , "start" ) )
2021-03-31 12:08:22 +01:00
return & metabase . StreamRange {
PlainStart : r . Start . PlainStart ,
PlainLimit : object . TotalPlainSize ,
} , nil
case * pb . Range_StartLimit :
if r . StartLimit == nil {
return nil , Error . New ( "StartEnd missing for Range_StartEnd" )
}
2023-04-03 09:55:11 +01:00
mon . Event ( "download_range" , monkit . NewSeriesTag ( "type" , "startlimit" ) )
2021-03-31 12:08:22 +01:00
return & metabase . StreamRange {
PlainStart : r . StartLimit . PlainStart ,
PlainLimit : r . StartLimit . PlainLimit ,
} , nil
case * pb . Range_Suffix :
if r . Suffix == nil {
return nil , Error . New ( "Suffix missing for Range_Suffix" )
}
2023-04-03 09:55:11 +01:00
mon . Event ( "download_range" , monkit . NewSeriesTag ( "type" , "suffix" ) )
2021-03-31 12:08:22 +01:00
return & metabase . StreamRange {
PlainStart : object . TotalPlainSize - r . Suffix . PlainSuffix ,
PlainLimit : object . TotalPlainSize ,
} , nil
}
2023-04-03 09:55:11 +01:00
mon . Event ( "download_range" , monkit . NewSeriesTag ( "type" , "unsupported" ) )
2021-03-31 12:08:22 +01:00
// if it's a new unsupported range type, let's return all data
return nil , nil
}
2020-06-30 22:49:29 +01:00
// ListObjects list objects according to specific parameters.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) ListObjects ( ctx context . Context , req * pb . ObjectListRequest ) ( resp * pb . ObjectListResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2019-09-19 17:19:29 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
2019-07-16 11:39:23 +01:00
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2019-10-24 22:05:08 +01:00
EncryptedPath : req . EncryptedPrefix ,
2019-07-16 11:39:23 +01:00
Time : time . Now ( ) ,
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2019-07-16 11:39:23 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-03-16 08:55:52 +00:00
// TODO this needs to be optimized to avoid DB call on each request
2021-10-27 09:50:27 +01:00
placement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
2020-03-16 08:55:52 +00:00
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
2020-03-16 08:55:52 +00:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2020-11-06 12:20:54 +00:00
limit := int ( req . Limit )
if limit < 0 {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "limit is negative" )
}
2021-06-25 09:19:32 +01:00
metabase . ListLimit . Ensure ( & limit )
2019-07-16 11:39:23 +01:00
2020-11-17 12:57:53 +00:00
var prefix metabase . ObjectKey
if len ( req . EncryptedPrefix ) != 0 {
prefix = metabase . ObjectKey ( req . EncryptedPrefix )
2020-11-17 13:37:19 +00:00
if prefix [ len ( prefix ) - 1 ] != metabase . Delimiter {
prefix += metabase . ObjectKey ( metabase . Delimiter )
2020-11-17 12:57:53 +00:00
}
}
2020-11-18 11:16:00 +00:00
// Default to Commmitted status for backward-compatibility with older uplinks.
status := metabase . Committed
if req . Status != pb . Object_INVALID {
status = metabase . ObjectStatus ( req . Status )
}
2023-02-14 11:41:46 +00:00
cursor := metabase . IterateCursor {
Key : metabase . ObjectKey ( req . EncryptedCursor ) ,
// TODO: set to a the version from the protobuf request when it supports this
}
if len ( cursor . Key ) != 0 {
cursor . Key = prefix + cursor . Key
2023-02-15 10:12:39 +00:00
// TODO this is a workaround to avoid duplicates while listing objects by libuplink.
// because version is not part of cursor yet and we can have object with version higher
// than 1 we cannot use hardcoded version 1 as default.
// This workaround should be in place for a longer time even if metainfo protocol will be
// fix as we still want to avoid this problem for older libuplink versions.
//
// it should be set in case of pending and committed objects
cursor . Version = metabase . MaxVersion
2020-12-01 14:01:44 +00:00
}
2021-09-28 13:36:10 +01:00
includeCustomMetadata := true
includeSystemMetadata := true
2021-08-02 19:30:02 +01:00
if req . UseObjectIncludes {
2021-09-28 13:36:10 +01:00
includeCustomMetadata = req . ObjectIncludes . Metadata
2023-02-14 11:41:46 +00:00
// because multipart upload UploadID depends on some System metadata fields we need
// to force reading it for listing pending object when its not included in options.
// This is used by libuplink ListUploads method.
includeSystemMetadata = status == metabase . Pending || ! req . ObjectIncludes . ExcludeSystemMetadata
2021-08-02 19:30:02 +01:00
}
2020-11-06 12:20:54 +00:00
resp = & pb . ObjectListResponse { }
2022-09-01 23:13:15 +01:00
if endpoint . config . TestListingQuery {
result , err := endpoint . metabase . ListObjects ( ctx ,
metabase . ListObjects {
2023-02-14 11:41:46 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
Prefix : prefix ,
Cursor : metabase . ListObjectsCursor ( cursor ) ,
2022-09-01 23:13:15 +01:00
Recursive : req . Recursive ,
Limit : limit ,
Status : status ,
IncludeCustomMetadata : includeCustomMetadata ,
2023-02-14 11:41:46 +00:00
IncludeSystemMetadata : includeSystemMetadata ,
2022-09-01 23:13:15 +01:00
} )
if err != nil {
return nil , endpoint . convertMetabaseErr ( err )
}
for _ , entry := range result . Objects {
2022-11-09 10:26:18 +00:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , prefix , includeSystemMetadata , includeCustomMetadata , placement )
2022-09-01 23:13:15 +01:00
if err != nil {
return nil , endpoint . convertMetabaseErr ( err )
2020-11-06 12:20:54 +00:00
}
2022-09-01 23:13:15 +01:00
resp . Items = append ( resp . Items , item )
}
resp . More = result . More
} else {
err = endpoint . metabase . IterateObjectsAllVersionsWithStatus ( ctx ,
metabase . IterateObjectsWithStatus {
2023-02-14 11:41:46 +00:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
Prefix : prefix ,
Cursor : cursor ,
2022-09-01 23:13:15 +01:00
Recursive : req . Recursive ,
BatchSize : limit + 1 ,
Status : status ,
IncludeCustomMetadata : includeCustomMetadata ,
2023-02-14 11:41:46 +00:00
IncludeSystemMetadata : includeSystemMetadata ,
2022-09-01 23:13:15 +01:00
} , func ( ctx context . Context , it metabase . ObjectsIterator ) error {
entry := metabase . ObjectEntry { }
for len ( resp . Items ) < limit && it . Next ( ctx , & entry ) {
2022-11-09 10:26:18 +00:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , prefix , includeSystemMetadata , includeCustomMetadata , placement )
2022-09-01 23:13:15 +01:00
if err != nil {
return err
}
resp . Items = append ( resp . Items , item )
}
resp . More = it . Next ( ctx , & entry )
return nil
} ,
)
if err != nil {
return nil , endpoint . convertMetabaseErr ( err )
}
2019-07-16 11:39:23 +01:00
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object List" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "list" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_list_object" ) . Mark ( 1 )
2019-07-16 11:39:23 +01:00
2020-11-06 12:20:54 +00:00
return resp , nil
2019-07-16 11:39:23 +01:00
}
2021-01-11 12:06:04 +00:00
// ListPendingObjectStreams list pending objects according to specific parameters.
func ( endpoint * Endpoint ) ListPendingObjectStreams ( ctx context . Context , req * pb . ObjectListPendingStreamsRequest ) ( resp * pb . ObjectListPendingStreamsResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-02-09 22:40:23 +00:00
2021-01-11 12:06:04 +00:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2021-01-11 12:06:04 +00:00
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2021-01-11 12:06:04 +00:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2021-10-27 09:50:27 +01:00
placement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
2021-01-11 12:06:04 +00:00
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
2021-01-11 12:06:04 +00:00
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
cursor := metabase . StreamIDCursor { }
if req . StreamIdCursor != nil {
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamIdCursor )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
cursor . StreamID , err = uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-01-11 12:06:04 +00:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
}
limit := int ( req . Limit )
if limit < 0 {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "limit is negative" )
}
2021-06-25 09:19:32 +01:00
metabase . ListLimit . Ensure ( & limit )
2021-01-11 12:06:04 +00:00
resp = & pb . ObjectListPendingStreamsResponse { }
resp . Items = [ ] * pb . ObjectListItem { }
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . IteratePendingObjectsByKey ( ctx ,
2021-01-11 12:06:04 +00:00
metabase . IteratePendingObjectsByKey {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
2022-11-23 12:15:52 +00:00
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
2021-01-11 12:06:04 +00:00
} ,
BatchSize : limit + 1 ,
Cursor : cursor ,
} , func ( ctx context . Context , it metabase . ObjectsIterator ) error {
entry := metabase . ObjectEntry { }
for len ( resp . Items ) < limit && it . Next ( ctx , & entry ) {
2022-11-09 10:26:18 +00:00
item , err := endpoint . objectEntryToProtoListItem ( ctx , req . Bucket , entry , "" , true , true , placement )
2021-01-11 12:06:04 +00:00
if err != nil {
return err
}
resp . Items = append ( resp . Items , item )
}
resp . More = it . Next ( ctx , & entry )
return nil
} ,
)
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-01-11 12:06:04 +00:00
}
endpoint . log . Info ( "List pending object streams" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "list" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_list_pending_object_streams" ) . Mark ( 1 )
return resp , nil
}
2019-12-10 11:15:35 +00:00
// BeginDeleteObject begins object deletion process.
2019-07-16 11:39:23 +01:00
func ( endpoint * Endpoint ) BeginDeleteObject ( ctx context . Context , req * pb . ObjectBeginDeleteRequest ) ( resp * pb . ObjectBeginDeleteResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-02-09 22:40:23 +00:00
2020-03-11 15:53:16 +00:00
now := time . Now ( )
2021-08-31 17:15:43 +01:00
var canRead , canList bool
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2021-08-31 17:15:43 +01:00
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2021-08-31 17:15:43 +01:00
Time : now ,
} ,
actionPermitted : & canRead ,
optional : true ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2021-08-31 17:15:43 +01:00
Time : now ,
} ,
actionPermitted : & canList ,
optional : true ,
} ,
)
2019-07-16 11:39:23 +01:00
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-16 11:39:23 +01:00
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2019-07-16 11:39:23 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
2019-09-19 05:46:39 +01:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
2019-07-16 11:39:23 +01:00
}
2020-12-03 18:04:01 +00:00
var deletedObjects [ ] * pb . Object
if req . GetStatus ( ) == int32 ( metabase . Pending ) {
if req . StreamId == nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "StreamID missing" )
}
var pbStreamID * internalpb . StreamID
pbStreamID , err = endpoint . unmarshalSatStreamID ( ctx , * ( req . StreamId ) )
if err == nil {
var streamID uuid . UUID
streamID , err = uuid . FromBytes ( pbStreamID . StreamId )
if err == nil {
2021-05-04 14:51:40 +01:00
deletedObjects , err = endpoint . DeletePendingObject ( ctx ,
metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
2022-09-21 09:10:06 +01:00
BucketName : string ( pbStreamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( pbStreamID . EncryptedObjectKey ) ,
Version : metabase . Version ( pbStreamID . Version ) ,
2021-05-04 14:51:40 +01:00
StreamID : streamID ,
} )
2020-12-03 18:04:01 +00:00
}
}
} else {
2022-11-23 12:15:52 +00:00
deletedObjects , err = endpoint . DeleteCommittedObject ( ctx , keyInfo . ProjectID , string ( req . Bucket ) , metabase . ObjectKey ( req . EncryptedObjectKey ) )
2020-12-03 18:04:01 +00:00
}
2019-08-01 10:04:31 +01:00
if err != nil {
2020-03-11 15:53:16 +00:00
if ! canRead && ! canList {
// No error info is returned if neither Read, nor List permission is granted
2020-04-02 08:45:51 +01:00
return & pb . ObjectBeginDeleteResponse { } , nil
2020-03-11 15:53:16 +00:00
}
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2019-08-01 10:04:31 +01:00
}
2020-07-27 21:12:14 +01:00
var object * pb . Object
if canRead || canList {
// Info about deleted object is returned only if either Read, or List permission is granted
2020-08-11 14:00:57 +01:00
if err != nil {
endpoint . log . Error ( "failed to construct deleted object information" ,
zap . Stringer ( "Project ID" , keyInfo . ProjectID ) ,
zap . String ( "Bucket" , string ( req . Bucket ) ) ,
2022-11-23 12:15:52 +00:00
zap . String ( "Encrypted Path" , string ( req . EncryptedObjectKey ) ) ,
2020-08-11 14:00:57 +01:00
zap . Error ( err ) ,
)
}
2020-07-27 21:12:14 +01:00
if len ( deletedObjects ) > 0 {
object = deletedObjects [ 0 ]
}
}
2020-01-20 18:48:26 +00:00
endpoint . log . Info ( "Object Delete" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "delete" ) , zap . String ( "type" , "object" ) )
2020-01-29 15:03:30 +00:00
mon . Meter ( "req_delete_object" ) . Mark ( 1 )
2020-01-20 18:48:26 +00:00
2019-07-16 11:39:23 +01:00
return & pb . ObjectBeginDeleteResponse {
2020-07-17 10:17:31 +01:00
Object : object ,
2019-07-16 11:39:23 +01:00
} , nil
}
2020-08-11 18:35:23 +01:00
// GetObjectIPs returns the IP addresses of the nodes holding the pieces for
// the provided object. This is useful for knowing the locations of the pieces.
func ( endpoint * Endpoint ) GetObjectIPs ( ctx context . Context , req * pb . ObjectGetIPsRequest ) ( resp * pb . ObjectGetIPsResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-02-09 22:40:23 +00:00
2022-11-02 15:19:45 +00:00
now := time . Now ( )
keyInfo , err := endpoint . validateAuthAny ( ctx , req . Header ,
macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2022-11-02 15:19:45 +00:00
Time : now ,
} ,
macaroon . Action {
Op : macaroon . ActionList ,
Bucket : req . Bucket ,
2022-11-23 12:15:52 +00:00
EncryptedPath : req . EncryptedObjectKey ,
2022-11-02 15:19:45 +00:00
Time : now ,
} ,
)
2020-08-13 17:43:21 +01:00
if err != nil {
return nil , err
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2020-08-13 17:43:21 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2020-12-07 15:43:57 +00:00
// TODO we may need custom metabase request to avoid two DB calls
2022-08-30 11:04:59 +01:00
object , err := endpoint . metabase . GetObjectLastCommitted ( ctx , metabase . GetObjectLastCommitted {
2020-12-07 15:43:57 +00:00
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
2022-11-23 12:15:52 +00:00
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
2020-12-07 15:43:57 +00:00
} ,
} )
2020-08-13 17:43:21 +01:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-08-13 17:43:21 +01:00
}
2021-09-09 16:21:42 +01:00
pieceCountByNodeID , err := endpoint . metabase . GetStreamPieceCountByNodeID ( ctx ,
2021-03-08 13:09:32 +00:00
metabase . GetStreamPieceCountByNodeID {
2020-12-07 15:43:57 +00:00
StreamID : object . StreamID ,
} )
2021-03-08 13:09:32 +00:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
nodeIDs := make ( [ ] storj . NodeID , 0 , len ( pieceCountByNodeID ) )
for nodeID := range pieceCountByNodeID {
nodeIDs = append ( nodeIDs , nodeID )
}
nodeIPMap , err := endpoint . overlay . GetNodeIPs ( ctx , nodeIDs )
2020-08-13 17:43:21 +01:00
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2020-08-13 17:43:21 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-01-13 13:59:05 +00:00
nodeIPs := make ( [ ] [ ] byte , 0 , len ( nodeIPMap ) )
pieceCount := int64 ( 0 )
reliablePieceCount := int64 ( 0 )
for nodeID , count := range pieceCountByNodeID {
pieceCount += count
ip , reliable := nodeIPMap [ nodeID ]
if ! reliable {
continue
2020-08-13 17:43:21 +01:00
}
2021-01-13 13:59:05 +00:00
nodeIPs = append ( nodeIPs , [ ] byte ( ip ) )
reliablePieceCount += count
2020-08-13 17:43:21 +01:00
}
2023-03-14 12:05:47 +00:00
mon . Meter ( "req_get_object_ips" ) . Mark ( 1 )
2021-01-13 13:59:05 +00:00
return & pb . ObjectGetIPsResponse {
Ips : nodeIPs ,
2021-01-14 12:33:00 +00:00
SegmentCount : int64 ( object . SegmentCount ) ,
2021-01-13 13:59:05 +00:00
ReliablePieceCount : reliablePieceCount ,
PieceCount : pieceCount ,
} , nil
2020-08-11 18:35:23 +01:00
}
2021-07-02 13:39:46 +01:00
// UpdateObjectMetadata replaces object metadata.
func ( endpoint * Endpoint ) UpdateObjectMetadata ( ctx context . Context , req * pb . ObjectUpdateMetadataRequest ) ( resp * pb . ObjectUpdateMetadataResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-07-02 13:39:46 +01:00
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : time . Now ( ) ,
} )
if err != nil {
return nil , err
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2021-07-02 13:39:46 +01:00
err = endpoint . validateBucket ( ctx , req . Bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2022-04-21 15:25:16 +01:00
if err := endpoint . checkEncryptedMetadataSize ( req . EncryptedMetadata , req . EncryptedMetadataEncryptedKey ) ; err != nil {
return nil , err
}
2021-07-08 15:50:37 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
id , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
2021-07-23 16:16:49 +01:00
endpoint . log . Error ( "internal" , zap . Error ( err ) )
2021-07-08 15:50:37 +01:00
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-10-29 12:04:55 +01:00
var encryptedMetadataNonce [ ] byte
if ! req . EncryptedMetadataNonce . IsZero ( ) {
encryptedMetadataNonce = req . EncryptedMetadataNonce [ : ]
}
2021-09-09 16:21:42 +01:00
err = endpoint . metabase . UpdateObjectMetadata ( ctx , metabase . UpdateObjectMetadata {
2022-08-12 18:52:02 +01:00
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
StreamID : id ,
2021-07-02 13:39:46 +01:00
EncryptedMetadata : req . EncryptedMetadata ,
2021-10-29 12:04:55 +01:00
EncryptedMetadataNonce : encryptedMetadataNonce ,
2021-07-02 13:39:46 +01:00
EncryptedMetadataEncryptedKey : req . EncryptedMetadataEncryptedKey ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-07-02 13:39:46 +01:00
}
2023-03-14 12:05:47 +00:00
mon . Meter ( "req_update_object_metadata" ) . Mark ( 1 )
2021-07-02 13:39:46 +01:00
return & pb . ObjectUpdateMetadataResponse { } , nil
}
2022-01-24 15:17:12 +00:00
func ( endpoint * Endpoint ) objectToProto ( ctx context . Context , object metabase . Object , rs * pb . RedundancyScheme ) ( * pb . Object , error ) {
expires := time . Time { }
if object . ExpiresAt != nil {
expires = * object . ExpiresAt
2019-07-22 15:45:18 +01:00
}
2022-01-24 15:17:12 +00:00
// TotalPlainSize != 0 means object was uploaded with newer uplink
multipartObject := object . TotalPlainSize != 0 && object . FixedSegmentSize <= 0
streamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedObjectKey : [ ] byte ( object . ObjectKey ) ,
2022-05-17 14:50:12 +01:00
Version : int64 ( object . Version ) ,
2022-01-24 15:17:12 +00:00
CreationDate : object . CreatedAt ,
ExpirationDate : expires ,
StreamId : object . StreamID [ : ] ,
MultipartObject : multipartObject ,
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( object . Encryption . CipherSuite ) ,
BlockSize : int64 ( object . Encryption . BlockSize ) ,
} ,
// TODO: this is the only one place where placement is not added to the StreamID
// bucket info would be required to add placement here
2019-07-22 15:45:18 +01:00
} )
if err != nil {
2020-03-10 09:58:14 +00:00
return nil , err
2019-07-22 15:45:18 +01:00
}
2022-01-24 15:17:12 +00:00
var nonce storj . Nonce
if len ( object . EncryptedMetadataNonce ) > 0 {
nonce , err = storj . NonceFromBytes ( object . EncryptedMetadataNonce )
if err != nil {
return nil , err
}
2019-07-24 12:33:23 +01:00
}
2022-01-24 15:17:12 +00:00
streamMeta := & pb . StreamMeta { }
err = pb . Unmarshal ( object . EncryptedMetadata , streamMeta )
2019-07-24 12:33:23 +01:00
if err != nil {
2022-01-24 15:17:12 +00:00
return nil , err
2019-07-24 12:33:23 +01:00
}
2022-01-24 15:17:12 +00:00
// TODO is this enough to handle old uplinks
if streamMeta . EncryptionBlockSize == 0 {
streamMeta . EncryptionBlockSize = object . Encryption . BlockSize
2019-07-24 12:33:23 +01:00
}
2022-01-24 15:17:12 +00:00
if streamMeta . EncryptionType == 0 {
streamMeta . EncryptionType = int32 ( object . Encryption . CipherSuite )
2019-07-24 12:33:23 +01:00
}
2022-01-24 15:17:12 +00:00
if streamMeta . NumberOfSegments == 0 {
streamMeta . NumberOfSegments = int64 ( object . SegmentCount )
2019-07-24 12:33:23 +01:00
}
2022-01-24 15:17:12 +00:00
if streamMeta . LastSegmentMeta == nil {
streamMeta . LastSegmentMeta = & pb . SegmentMeta {
EncryptedKey : object . EncryptedMetadataEncryptedKey ,
KeyNonce : object . EncryptedMetadataNonce ,
}
2020-11-06 11:54:52 +00:00
}
2022-01-24 15:17:12 +00:00
metadataBytes , err := pb . Marshal ( streamMeta )
2020-11-06 11:54:52 +00:00
if err != nil {
2022-01-24 15:17:12 +00:00
return nil , err
2020-11-06 11:54:52 +00:00
}
2022-01-24 15:17:12 +00:00
result := & pb . Object {
2022-11-23 12:15:52 +00:00
Bucket : [ ] byte ( object . BucketName ) ,
EncryptedObjectKey : [ ] byte ( object . ObjectKey ) ,
Version : int32 ( object . Version ) , // TODO incompatible types
StreamId : streamID ,
ExpiresAt : expires ,
CreatedAt : object . CreatedAt ,
2019-07-22 15:45:18 +01:00
2022-01-24 15:17:12 +00:00
TotalSize : object . TotalEncryptedSize ,
PlainSize : object . TotalPlainSize ,
2019-07-22 15:45:18 +01:00
2022-01-24 15:17:12 +00:00
EncryptedMetadata : metadataBytes ,
EncryptedMetadataNonce : nonce ,
EncryptedMetadataEncryptedKey : object . EncryptedMetadataEncryptedKey ,
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( object . Encryption . CipherSuite ) ,
BlockSize : int64 ( object . Encryption . BlockSize ) ,
} ,
2021-02-09 22:40:23 +00:00
2022-01-24 15:17:12 +00:00
RedundancyScheme : rs ,
2019-07-22 15:45:18 +01:00
}
2022-01-24 15:17:12 +00:00
return result , nil
}
2019-07-22 15:45:18 +01:00
2022-01-24 15:17:12 +00:00
func ( endpoint * Endpoint ) objectEntryToProtoListItem ( ctx context . Context , bucket [ ] byte ,
entry metabase . ObjectEntry , prefixToPrependInSatStreamID metabase . ObjectKey ,
2022-11-09 10:26:18 +00:00
includeSystem , includeMetadata bool , placement storj . PlacementConstraint ) ( item * pb . ObjectListItem , err error ) {
2019-10-17 19:01:40 +01:00
2022-01-24 15:17:12 +00:00
item = & pb . ObjectListItem {
2022-11-23 12:15:52 +00:00
EncryptedObjectKey : [ ] byte ( entry . ObjectKey ) ,
Version : int32 ( entry . Version ) , // TODO incompatible types
Status : pb . Object_Status ( entry . Status ) ,
2022-11-09 10:26:18 +00:00
}
expiresAt := time . Time { }
if entry . ExpiresAt != nil {
expiresAt = * entry . ExpiresAt
}
if includeSystem {
item . ExpiresAt = expiresAt
item . CreatedAt = entry . CreatedAt
item . PlainSize = entry . TotalPlainSize
2020-11-06 11:54:52 +00:00
}
2022-01-24 15:17:12 +00:00
if includeMetadata {
var nonce storj . Nonce
if len ( entry . EncryptedMetadataNonce ) > 0 {
nonce , err = storj . NonceFromBytes ( entry . EncryptedMetadataNonce )
if err != nil {
return nil , err
}
}
2021-04-07 16:51:00 +01:00
2022-01-24 15:17:12 +00:00
streamMeta := & pb . StreamMeta { }
err = pb . Unmarshal ( entry . EncryptedMetadata , streamMeta )
if err != nil {
return nil , err
}
2021-04-07 16:51:00 +01:00
2022-01-24 15:17:12 +00:00
if entry . Encryption != ( storj . EncryptionParameters { } ) {
streamMeta . EncryptionType = int32 ( entry . Encryption . CipherSuite )
streamMeta . EncryptionBlockSize = entry . Encryption . BlockSize
}
2021-04-07 16:51:00 +01:00
2022-01-24 15:17:12 +00:00
if entry . SegmentCount != 0 {
streamMeta . NumberOfSegments = int64 ( entry . SegmentCount )
}
2021-04-07 16:51:00 +01:00
2022-01-24 15:17:12 +00:00
if entry . EncryptedMetadataEncryptedKey != nil {
streamMeta . LastSegmentMeta = & pb . SegmentMeta {
EncryptedKey : entry . EncryptedMetadataEncryptedKey ,
KeyNonce : entry . EncryptedMetadataNonce ,
2021-04-07 16:51:00 +01:00
}
}
2022-01-24 15:17:12 +00:00
metadataBytes , err := pb . Marshal ( streamMeta )
if err != nil {
return nil , err
}
2021-04-07 16:51:00 +01:00
2022-01-24 15:17:12 +00:00
item . EncryptedMetadata = metadataBytes
item . EncryptedMetadataNonce = nonce
item . EncryptedMetadataEncryptedKey = entry . EncryptedMetadataEncryptedKey
2020-11-06 11:54:52 +00:00
}
2020-01-27 20:25:52 +00:00
2022-01-24 15:17:12 +00:00
// Add Stream ID to list items if listing is for pending objects.
// The client requires the Stream ID to use in the MultipartInfo.
if entry . Status == metabase . Pending {
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
Bucket : bucket ,
2022-11-09 10:26:18 +00:00
EncryptedObjectKey : append ( [ ] byte ( prefixToPrependInSatStreamID ) , [ ] byte ( entry . ObjectKey ) ... ) ,
Version : int64 ( entry . Version ) ,
CreationDate : entry . CreatedAt ,
ExpirationDate : expiresAt ,
2022-01-24 15:17:12 +00:00
StreamId : entry . StreamID [ : ] ,
MultipartObject : entry . FixedSegmentSize <= 0 ,
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( entry . Encryption . CipherSuite ) ,
BlockSize : int64 ( entry . Encryption . BlockSize ) ,
} ,
Placement : int32 ( placement ) ,
} )
if err != nil {
return nil , err
}
item . StreamId = & satStreamID
2021-06-10 11:08:21 +01:00
}
2022-01-24 15:17:12 +00:00
return item , nil
}
2020-11-20 12:37:54 +00:00
2022-01-24 15:17:12 +00:00
// DeleteCommittedObject deletes all the pieces of the storage nodes that belongs
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2023-04-18 23:15:00 +01:00
// TODO: see note on DeleteObjectAnyStatus.
2022-01-24 15:17:12 +00:00
func ( endpoint * Endpoint ) DeleteCommittedObject (
ctx context . Context , projectID uuid . UUID , bucket string , object metabase . ObjectKey ,
) ( deletedObjects [ ] * pb . Object , err error ) {
defer mon . Task ( ) ( & ctx , projectID . String ( ) , bucket , object ) ( & err )
2021-03-25 07:56:13 +00:00
2022-01-24 15:17:12 +00:00
req := metabase . ObjectLocation {
ProjectID : projectID ,
BucketName : bucket ,
ObjectKey : object ,
2021-04-07 15:20:05 +01:00
}
2022-03-14 15:53:00 +00:00
var result metabase . DeleteObjectResult
if endpoint . config . ServerSideCopy {
2022-09-21 09:10:06 +01:00
result , err = endpoint . metabase . DeleteObjectLastCommitted ( ctx , metabase . DeleteObjectLastCommitted {
2022-03-14 15:53:00 +00:00
ObjectLocation : req ,
} )
} else {
result , err = endpoint . metabase . DeleteObjectsAllVersions ( ctx , metabase . DeleteObjectsAllVersions { Locations : [ ] metabase . ObjectLocation { req } } )
}
2021-04-07 15:20:05 +01:00
if err != nil {
2022-01-24 15:17:12 +00:00
return nil , Error . Wrap ( err )
2021-04-07 15:20:05 +01:00
}
2022-01-24 15:17:12 +00:00
deletedObjects , err = endpoint . deleteObjectsPieces ( ctx , result )
2020-11-06 11:54:52 +00:00
if err != nil {
2022-01-24 15:17:12 +00:00
endpoint . log . Error ( "failed to delete pointers" ,
zap . Stringer ( "project" , projectID ) ,
zap . String ( "bucket" , bucket ) ,
zap . Binary ( "object" , [ ] byte ( object ) ) ,
zap . Error ( err ) ,
)
return deletedObjects , Error . Wrap ( err )
2019-12-11 17:44:13 +00:00
}
2020-11-03 12:58:27 +00:00
return deletedObjects , nil
2020-08-06 02:23:45 +01:00
}
2021-01-11 10:08:18 +00:00
// DeleteObjectAnyStatus deletes all the pieces of the storage nodes that belongs
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2023-04-18 23:15:00 +01:00
// TODO regarding the above note: exporting for testing is fine, but we should name
// it something that will definitely never ever be added to the rpc set in DRPC
// definitions. If we ever decide to add an RPC method called "DeleteObjectAnyStatus",
// DRPC interface definitions is all that is standing in the way from someone
// remotely calling this. We should name this InternalDeleteObjectAnyStatus or
// something.
2021-01-11 10:08:18 +00:00
func ( endpoint * Endpoint ) DeleteObjectAnyStatus ( ctx context . Context , location metabase . ObjectLocation ,
) ( deletedObjects [ ] * pb . Object , err error ) {
defer mon . Task ( ) ( & ctx , location . ProjectID . String ( ) , location . BucketName , location . ObjectKey ) ( & err )
2022-03-14 15:53:00 +00:00
var result metabase . DeleteObjectResult
if endpoint . config . ServerSideCopy {
result , err = endpoint . metabase . DeleteObjectExactVersion ( ctx , metabase . DeleteObjectExactVersion {
ObjectLocation : location ,
Version : metabase . DefaultVersion ,
} )
} else {
result , err = endpoint . metabase . DeleteObjectAnyStatusAllVersions ( ctx , metabase . DeleteObjectAnyStatusAllVersions {
ObjectLocation : location ,
} )
}
2021-01-11 10:08:18 +00:00
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , Error . Wrap ( err )
2021-01-11 10:08:18 +00:00
}
deletedObjects , err = endpoint . deleteObjectsPieces ( ctx , result )
if err != nil {
endpoint . log . Error ( "failed to delete pointers" ,
zap . Stringer ( "project" , location . ProjectID ) ,
zap . String ( "bucket" , location . BucketName ) ,
zap . Binary ( "object" , [ ] byte ( location . ObjectKey ) ) ,
zap . Error ( err ) ,
)
return deletedObjects , err
}
return deletedObjects , nil
}
2020-12-03 18:04:01 +00:00
// DeletePendingObject deletes all the pieces of the storage nodes that belongs
// to the specified pending object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
2023-04-18 23:15:00 +01:00
// TODO: see note on DeleteObjectAnyStatus.
2021-05-04 14:51:40 +01:00
func ( endpoint * Endpoint ) DeletePendingObject ( ctx context . Context , stream metabase . ObjectStream ) ( deletedObjects [ ] * pb . Object , err error ) {
2020-12-03 18:04:01 +00:00
req := metabase . DeletePendingObject {
2021-05-04 14:51:40 +01:00
ObjectStream : stream ,
2020-12-03 18:04:01 +00:00
}
2021-09-09 16:21:42 +01:00
result , err := endpoint . metabase . DeletePendingObject ( ctx , req )
2020-08-06 02:23:45 +01:00
if err != nil {
2020-11-03 12:58:27 +00:00
return nil , err
2020-08-06 02:23:45 +01:00
}
2020-12-03 18:04:01 +00:00
return endpoint . deleteObjectsPieces ( ctx , result )
}
func ( endpoint * Endpoint ) deleteObjectsPieces ( ctx context . Context , result metabase . DeleteObjectResult ) ( deletedObjects [ ] * pb . Object , err error ) {
2021-10-21 07:47:45 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2020-12-03 18:04:01 +00:00
// We should ignore client cancelling and always try to delete segments.
ctx = context2 . WithoutCancellation ( ctx )
2020-11-03 12:58:27 +00:00
deletedObjects = make ( [ ] * pb . Object , len ( result . Objects ) )
for i , object := range result . Objects {
2021-02-16 15:36:09 +00:00
deletedObject , err := endpoint . objectToProto ( ctx , object , endpoint . defaultRS )
2020-12-02 11:34:41 +00:00
if err != nil {
return nil , err
}
deletedObjects [ i ] = deletedObject
2020-11-03 12:58:27 +00:00
}
2019-12-16 19:03:20 +00:00
2020-12-09 12:24:37 +00:00
endpoint . deleteSegmentPieces ( ctx , result . Segments )
return deletedObjects , nil
}
func ( endpoint * Endpoint ) deleteSegmentPieces ( ctx context . Context , segments [ ] metabase . DeletedSegmentInfo ) {
2021-10-21 07:47:45 +01:00
var err error
defer mon . Task ( ) ( & ctx ) ( & err )
2020-12-09 12:24:37 +00:00
nodesPieces := groupPiecesByNodeID ( segments )
2019-12-16 19:03:20 +00:00
2020-11-03 12:58:27 +00:00
var requests [ ] piecedeletion . Request
for node , pieces := range nodesPieces {
requests = append ( requests , piecedeletion . Request {
Node : storj . NodeURL {
ID : node ,
} ,
Pieces : pieces ,
} )
2020-08-06 02:23:45 +01:00
}
2019-12-16 19:03:20 +00:00
2020-11-03 12:58:27 +00:00
// Only return an error if we failed to delete the objects. If we failed
// to delete pieces, let garbage collector take care of it.
2022-10-15 01:38:09 +01:00
err = endpoint . deletePieces . Delete ( ctx , requests )
2021-10-21 07:47:45 +01:00
if err != nil {
2020-08-06 02:23:45 +01:00
endpoint . log . Error ( "failed to delete pieces" , zap . Error ( err ) )
2019-12-16 19:03:20 +00:00
}
2020-11-03 12:58:27 +00:00
}
// groupPiecesByNodeID returns a map that contains pieces with node id as the key.
func groupPiecesByNodeID ( segments [ ] metabase . DeletedSegmentInfo ) map [ storj . NodeID ] [ ] storj . PieceID {
piecesToDelete := map [ storj . NodeID ] [ ] storj . PieceID { }
for _ , segment := range segments {
2022-05-18 10:32:38 +01:00
deriver := segment . RootPieceID . Deriver ( )
2020-11-03 12:58:27 +00:00
for _ , piece := range segment . Pieces {
2022-05-18 10:32:38 +01:00
pieceID := deriver . Derive ( piece . StorageNode , int32 ( piece . Number ) )
2020-11-03 12:58:27 +00:00
piecesToDelete [ piece . StorageNode ] = append ( piecesToDelete [ piece . StorageNode ] , pieceID )
}
}
return piecesToDelete
2020-01-17 18:47:37 +00:00
}
2020-01-28 13:44:47 +00:00
2021-08-16 13:04:33 +01:00
// Server side move.
// BeginMoveObject begins moving object to different key.
func ( endpoint * Endpoint ) BeginMoveObject ( ctx context . Context , req * pb . ObjectBeginMoveRequest ) ( resp * pb . ObjectBeginMoveResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-08-16 13:04:33 +01:00
now := time . Now ( )
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionDelete ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . NewBucket ,
EncryptedPath : req . NewEncryptedObjectKey ,
Time : now ,
} ,
} ,
)
if err != nil {
return nil , err
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2021-08-16 13:04:33 +01:00
for _ , bucket := range [ ] [ ] byte { req . Bucket , req . NewBucket } {
err = endpoint . validateBucket ( ctx , bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
}
2021-10-27 09:50:27 +01:00
// if source and target buckets are different, we need to check their geofencing configs
if ! bytes . Equal ( req . Bucket , req . NewBucket ) {
2022-02-28 15:31:17 +00:00
// TODO we may try to combine those two DB calls into single one
2021-10-27 09:50:27 +01:00
oldBucketPlacement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2021-12-07 11:11:03 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
2021-10-27 09:50:27 +01:00
}
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-02-28 15:31:17 +00:00
newBucketPlacement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . NewBucket , keyInfo . ProjectID )
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2022-02-28 15:31:17 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . NewBucket )
}
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2021-10-27 09:50:27 +01:00
if oldBucketPlacement != newBucketPlacement {
2022-02-28 15:31:17 +00:00
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "copying object to bucket with different placement policy is not (yet) supported" )
2021-10-27 09:50:27 +01:00
}
}
2021-08-16 13:04:33 +01:00
result , err := endpoint . metabase . BeginMoveObject ( ctx , metabase . BeginMoveObject {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
} ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-08-16 13:04:33 +01:00
}
response , err := convertBeginMoveObjectResults ( result )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
2021-08-25 20:00:55 +01:00
Bucket : req . Bucket ,
EncryptedObjectKey : req . EncryptedObjectKey ,
2022-09-21 09:10:06 +01:00
Version : int64 ( result . Version ) ,
2021-08-25 20:00:55 +01:00
StreamId : result . StreamID [ : ] ,
2021-08-16 13:04:33 +01:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( result . EncryptionParameters . CipherSuite ) ,
BlockSize : int64 ( result . EncryptionParameters . BlockSize ) ,
} ,
} )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-08-25 22:58:47 +01:00
endpoint . log . Info ( "Object Move Begins" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "move" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_move_object_begins" ) . Mark ( 1 )
2021-08-16 13:04:33 +01:00
response . StreamId = satStreamID
return response , nil
}
func convertBeginMoveObjectResults ( result metabase . BeginMoveObjectResult ) ( * pb . ObjectBeginMoveResponse , error ) {
keys := make ( [ ] * pb . EncryptedKeyAndNonce , len ( result . EncryptedKeysNonces ) )
for i , key := range result . EncryptedKeysNonces {
2021-10-29 12:04:55 +01:00
var nonce storj . Nonce
var err error
if len ( key . EncryptedKeyNonce ) != 0 {
nonce , err = storj . NonceFromBytes ( key . EncryptedKeyNonce )
if err != nil {
return nil , err
}
2021-10-12 14:37:12 +01:00
}
2021-08-16 13:04:33 +01:00
keys [ i ] = & pb . EncryptedKeyAndNonce {
2021-09-20 09:06:36 +01:00
Position : & pb . SegmentPosition {
PartNumber : int32 ( key . Position . Part ) ,
Index : int32 ( key . Position . Index ) ,
} ,
2021-08-16 13:04:33 +01:00
EncryptedKey : key . EncryptedKey ,
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : nonce ,
2021-08-16 13:04:33 +01:00
}
}
2022-07-28 13:41:00 +01:00
// TODO we need this because of an uplink issue with how we are storing key and nonce
2021-08-16 13:04:33 +01:00
if result . EncryptedMetadataKey == nil {
streamMeta := & pb . StreamMeta { }
err := pb . Unmarshal ( result . EncryptedMetadata , streamMeta )
if err != nil {
return nil , err
}
if streamMeta . LastSegmentMeta != nil {
result . EncryptedMetadataKey = streamMeta . LastSegmentMeta . EncryptedKey
2021-10-12 14:37:12 +01:00
result . EncryptedMetadataKeyNonce = streamMeta . LastSegmentMeta . KeyNonce
2021-08-16 13:04:33 +01:00
}
}
2021-10-29 12:04:55 +01:00
var metadataNonce storj . Nonce
var err error
if len ( result . EncryptedMetadataKeyNonce ) != 0 {
metadataNonce , err = storj . NonceFromBytes ( result . EncryptedMetadataKeyNonce )
if err != nil {
return nil , err
}
2021-10-12 14:37:12 +01:00
}
2021-10-29 12:04:55 +01:00
2021-08-16 13:04:33 +01:00
return & pb . ObjectBeginMoveResponse {
EncryptedMetadataKey : result . EncryptedMetadataKey ,
2021-10-12 14:37:12 +01:00
EncryptedMetadataKeyNonce : metadataNonce ,
2021-08-16 13:04:33 +01:00
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( result . EncryptionParameters . CipherSuite ) ,
BlockSize : int64 ( result . EncryptionParameters . BlockSize ) ,
} ,
SegmentKeys : keys ,
} , nil
}
2021-08-31 12:44:18 +01:00
// FinishMoveObject accepts new encryption keys for moved object and updates the corresponding object ObjectKey and segments EncryptedKey.
func ( endpoint * Endpoint ) FinishMoveObject ( ctx context . Context , req * pb . ObjectFinishMoveRequest ) ( resp * pb . ObjectFinishMoveResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2021-08-31 12:44:18 +01:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionWrite ,
Time : time . Now ( ) ,
Bucket : req . NewBucket ,
EncryptedPath : req . NewEncryptedObjectKey ,
} )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . Unauthenticated , err . Error ( ) )
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2021-08-31 12:44:18 +01:00
2021-09-22 08:50:24 +01:00
err = endpoint . validateBucket ( ctx , req . NewBucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
exists , err := endpoint . buckets . HasBucket ( ctx , req . NewBucket , keyInfo . ProjectID )
if err != nil {
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
} else if ! exists {
2021-11-24 17:47:36 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "target bucket not found: %s" , req . NewBucket )
2021-09-22 08:50:24 +01:00
}
2021-08-31 12:44:18 +01:00
streamUUID , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
err = endpoint . metabase . FinishMoveObject ( ctx , metabase . FinishMoveObject {
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
2021-08-25 20:00:55 +01:00
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2022-08-08 16:14:08 +01:00
Version : metabase . Version ( streamID . Version ) ,
2021-08-31 12:44:18 +01:00
StreamID : streamUUID ,
} ,
NewSegmentKeys : protobufkeysToMetabase ( req . NewSegmentKeys ) ,
2021-09-27 09:41:13 +01:00
NewBucket : string ( req . NewBucket ) ,
2021-08-31 12:44:18 +01:00
NewEncryptedObjectKey : req . NewEncryptedObjectKey ,
2022-03-23 16:06:14 +00:00
NewEncryptedMetadataKeyNonce : req . NewEncryptedMetadataKeyNonce ,
2021-08-31 12:44:18 +01:00
NewEncryptedMetadataKey : req . NewEncryptedMetadataKey ,
} )
if err != nil {
2021-10-22 09:42:52 +01:00
return nil , endpoint . convertMetabaseErr ( err )
2021-08-31 12:44:18 +01:00
}
2022-08-25 22:58:47 +01:00
endpoint . log . Info ( "Object Move Finished" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "move" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_move_object_finished" ) . Mark ( 1 )
2021-08-31 12:44:18 +01:00
return & pb . ObjectFinishMoveResponse { } , nil
}
2022-01-28 00:17:33 +00:00
// Server side copy.
// BeginCopyObject begins copying object to different key.
func ( endpoint * Endpoint ) BeginCopyObject ( ctx context . Context , req * pb . ObjectBeginCopyRequest ) ( resp * pb . ObjectBeginCopyResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-06-24 00:20:36 +01:00
if ! endpoint . config . ServerSideCopy || endpoint . config . ServerSideCopyDisabled {
2022-02-28 10:19:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Unimplemented , "Unimplemented" )
}
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2022-01-28 00:17:33 +00:00
now := time . Now ( )
keyInfo , err := endpoint . validateAuthN ( ctx , req . Header ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionRead ,
Bucket : req . Bucket ,
EncryptedPath : req . EncryptedObjectKey ,
Time : now ,
} ,
} ,
verifyPermission {
action : macaroon . Action {
Op : macaroon . ActionWrite ,
Bucket : req . NewBucket ,
EncryptedPath : req . NewEncryptedObjectKey ,
Time : now ,
} ,
} ,
)
if err != nil {
return nil , err
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2022-01-28 00:17:33 +00:00
for _ , bucket := range [ ] [ ] byte { req . Bucket , req . NewBucket } {
err = endpoint . validateBucket ( ctx , bucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
}
// if source and target buckets are different, we need to check their geofencing configs
if ! bytes . Equal ( req . Bucket , req . NewBucket ) {
2022-02-28 15:31:17 +00:00
// TODO we may try to combine those two DB calls into single one
2022-01-28 00:17:33 +00:00
oldBucketPlacement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . Bucket , keyInfo . ProjectID )
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2022-01-28 00:17:33 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . Bucket )
}
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-02-28 15:31:17 +00:00
newBucketPlacement , err := endpoint . buckets . GetBucketPlacement ( ctx , req . NewBucket , keyInfo . ProjectID )
if err != nil {
2023-04-13 13:04:07 +01:00
if buckets . ErrBucketNotFound . Has ( err ) {
2022-02-28 15:31:17 +00:00
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "bucket not found: %s" , req . NewBucket )
}
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-01-28 00:17:33 +00:00
if oldBucketPlacement != newBucketPlacement {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "copying object to bucket with different placement policy is not (yet) supported" )
}
}
result , err := endpoint . metabase . BeginCopyObject ( ctx , metabase . BeginCopyObject {
ObjectLocation : metabase . ObjectLocation {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( req . Bucket ) ,
ObjectKey : metabase . ObjectKey ( req . EncryptedObjectKey ) ,
} ,
2022-06-22 12:33:03 +01:00
VerifyLimits : func ( encryptedObjectSize int64 , nSegments int64 ) error {
return endpoint . checkUploadLimitsForNewObject ( ctx , keyInfo . ProjectID , encryptedObjectSize , nSegments )
} ,
2022-01-28 00:17:33 +00:00
} )
if err != nil {
return nil , endpoint . convertMetabaseErr ( err )
}
response , err := convertBeginCopyObjectResults ( result )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
satStreamID , err := endpoint . packStreamID ( ctx , & internalpb . StreamID {
Bucket : req . Bucket ,
EncryptedObjectKey : req . EncryptedObjectKey ,
2022-09-21 09:10:06 +01:00
Version : int64 ( result . Version ) ,
2022-01-28 00:17:33 +00:00
StreamId : result . StreamID [ : ] ,
EncryptionParameters : & pb . EncryptionParameters {
CipherSuite : pb . CipherSuite ( result . EncryptionParameters . CipherSuite ) ,
BlockSize : int64 ( result . EncryptionParameters . BlockSize ) ,
} ,
} )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-08-25 22:58:47 +01:00
endpoint . log . Info ( "Object Copy Begins" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "copy" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_copy_object_begins" ) . Mark ( 1 )
2022-01-28 00:17:33 +00:00
response . StreamId = satStreamID
return response , nil
}
func convertBeginCopyObjectResults ( result metabase . BeginCopyObjectResult ) ( * pb . ObjectBeginCopyResponse , error ) {
2022-09-14 12:55:58 +01:00
beginMoveObjectResult , err := convertBeginMoveObjectResults ( metabase . BeginMoveObjectResult ( result ) )
if err != nil {
return nil , err
2022-01-28 00:17:33 +00:00
}
return & pb . ObjectBeginCopyResponse {
2022-09-14 12:55:58 +01:00
EncryptedMetadataKeyNonce : beginMoveObjectResult . EncryptedMetadataKeyNonce ,
EncryptedMetadataKey : beginMoveObjectResult . EncryptedMetadataKey ,
SegmentKeys : beginMoveObjectResult . SegmentKeys ,
EncryptionParameters : beginMoveObjectResult . EncryptionParameters ,
2022-01-28 00:17:33 +00:00
} , nil
}
// FinishCopyObject accepts new encryption keys for object copy and updates the corresponding object ObjectKey and segments EncryptedKey.
func ( endpoint * Endpoint ) FinishCopyObject ( ctx context . Context , req * pb . ObjectFinishCopyRequest ) ( resp * pb . ObjectFinishCopyResponse , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2022-06-24 00:20:36 +01:00
if ! endpoint . config . ServerSideCopy || endpoint . config . ServerSideCopyDisabled {
2022-02-28 10:19:52 +00:00
return nil , rpcstatus . Error ( rpcstatus . Unimplemented , "Unimplemented" )
}
2022-04-21 15:48:13 +01:00
endpoint . versionCollector . collect ( req . Header . UserAgent , mon . Func ( ) . ShortName ( ) )
2022-01-28 00:17:33 +00:00
streamID , err := endpoint . unmarshalSatStreamID ( ctx , req . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
keyInfo , err := endpoint . validateAuth ( ctx , req . Header , macaroon . Action {
Op : macaroon . ActionWrite ,
Time : time . Now ( ) ,
Bucket : req . NewBucket ,
EncryptedPath : req . NewEncryptedMetadataKey ,
} )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . Unauthenticated , err . Error ( ) )
}
2023-04-18 23:15:00 +01:00
endpoint . usageTracking ( keyInfo , req . Header , fmt . Sprintf ( "%T" , req ) )
2022-01-28 00:17:33 +00:00
err = endpoint . validateBucket ( ctx , req . NewBucket )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2022-04-21 15:25:16 +01:00
if err := endpoint . checkEncryptedMetadataSize ( req . NewEncryptedMetadata , req . NewEncryptedMetadataKey ) ; err != nil {
return nil , err
}
2022-01-28 00:17:33 +00:00
exists , err := endpoint . buckets . HasBucket ( ctx , req . NewBucket , keyInfo . ProjectID )
if err != nil {
endpoint . log . Error ( "unable to check bucket" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
} else if ! exists {
return nil , rpcstatus . Errorf ( rpcstatus . NotFound , "target bucket not found: %s" , req . NewBucket )
}
streamUUID , err := uuid . FromBytes ( streamID . StreamId )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
newStreamID , err := uuid . New ( )
if err != nil {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , err . Error ( ) )
}
2022-02-24 10:54:57 +00:00
object , err := endpoint . metabase . FinishCopyObject ( ctx , metabase . FinishCopyObject {
2022-01-28 00:17:33 +00:00
ObjectStream : metabase . ObjectStream {
ProjectID : keyInfo . ProjectID ,
BucketName : string ( streamID . Bucket ) ,
ObjectKey : metabase . ObjectKey ( streamID . EncryptedObjectKey ) ,
2022-08-08 16:14:08 +01:00
Version : metabase . Version ( streamID . Version ) ,
2022-01-28 00:17:33 +00:00
StreamID : streamUUID ,
} ,
NewStreamID : newStreamID ,
NewSegmentKeys : protobufkeysToMetabase ( req . NewSegmentKeys ) ,
NewBucket : string ( req . NewBucket ) ,
2022-03-04 11:28:04 +00:00
NewEncryptedObjectKey : metabase . ObjectKey ( req . NewEncryptedObjectKey ) ,
OverrideMetadata : req . OverrideMetadata ,
NewEncryptedMetadata : req . NewEncryptedMetadata ,
2022-03-23 16:06:14 +00:00
NewEncryptedMetadataKeyNonce : req . NewEncryptedMetadataKeyNonce ,
2022-01-28 00:17:33 +00:00
NewEncryptedMetadataKey : req . NewEncryptedMetadataKey ,
2022-06-22 12:33:03 +01:00
VerifyLimits : func ( encryptedObjectSize int64 , nSegments int64 ) error {
return endpoint . addStorageUsageUpToLimit ( ctx , keyInfo . ProjectID , encryptedObjectSize , nSegments )
} ,
2022-01-28 00:17:33 +00:00
} )
if err != nil {
return nil , endpoint . convertMetabaseErr ( err )
}
2022-02-24 10:54:57 +00:00
// we can return nil redundancy because this request won't be used for downloading
protoObject , err := endpoint . objectToProto ( ctx , object , nil )
if err != nil {
endpoint . log . Error ( "internal" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . Internal , err . Error ( ) )
}
2022-08-25 22:58:47 +01:00
endpoint . log . Info ( "Object Copy Finished" , zap . Stringer ( "Project ID" , keyInfo . ProjectID ) , zap . String ( "operation" , "copy" ) , zap . String ( "type" , "object" ) )
mon . Meter ( "req_copy_object_finished" ) . Mark ( 1 )
2022-02-24 10:54:57 +00:00
return & pb . ObjectFinishCopyResponse {
Object : protoObject ,
} , nil
2022-01-28 00:17:33 +00:00
}
2021-08-31 12:44:18 +01:00
// protobufkeysToMetabase converts []*pb.EncryptedKeyAndNonce to []metabase.EncryptedKeyAndNonce.
func protobufkeysToMetabase ( protoKeys [ ] * pb . EncryptedKeyAndNonce ) [ ] metabase . EncryptedKeyAndNonce {
keys := make ( [ ] metabase . EncryptedKeyAndNonce , len ( protoKeys ) )
for i , key := range protoKeys {
position := metabase . SegmentPosition { }
if key . Position != nil {
position = metabase . SegmentPosition {
Part : uint32 ( key . Position . PartNumber ) ,
Index : uint32 ( key . Position . Index ) ,
}
}
keys [ i ] = metabase . EncryptedKeyAndNonce {
2021-10-12 14:37:12 +01:00
EncryptedKeyNonce : key . EncryptedKeyNonce . Bytes ( ) ,
2021-08-31 12:44:18 +01:00
EncryptedKey : key . EncryptedKey ,
Position : position ,
}
}
return keys
}