2022-01-27 00:01:03 +00:00
|
|
|
// Copyright (C) 2022 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"errors"
|
2022-02-16 23:24:38 +00:00
|
|
|
"time"
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2022-01-27 00:01:03 +00:00
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/uuid"
|
2022-02-16 23:24:38 +00:00
|
|
|
"storj.io/private/dbutil/pgutil"
|
2022-01-27 00:01:03 +00:00
|
|
|
"storj.io/private/dbutil/txutil"
|
|
|
|
"storj.io/private/tagsql"
|
|
|
|
)
|
|
|
|
|
|
|
|
// BeginCopyObjectResult holds data needed to finish copy object.
|
|
|
|
type BeginCopyObjectResult struct {
|
|
|
|
StreamID uuid.UUID
|
|
|
|
EncryptedMetadata []byte
|
|
|
|
EncryptedMetadataKeyNonce []byte
|
|
|
|
EncryptedMetadataKey []byte
|
|
|
|
EncryptedKeysNonces []EncryptedKeyAndNonce
|
|
|
|
EncryptionParameters storj.EncryptionParameters
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginCopyObject holds all data needed begin copy object method.
|
|
|
|
type BeginCopyObject struct {
|
|
|
|
Version Version
|
|
|
|
ObjectLocation
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginCopyObject collects all data needed to begin object copy procedure.
|
|
|
|
func (db *DB) BeginCopyObject(ctx context.Context, opts BeginCopyObject) (result BeginCopyObjectResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.ObjectLocation.Verify(); err != nil {
|
|
|
|
return BeginCopyObjectResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Version <= 0 {
|
|
|
|
return BeginCopyObjectResult{}, ErrInvalidRequest.New("Version invalid: %v", opts.Version)
|
|
|
|
}
|
|
|
|
|
|
|
|
var segmentCount int64
|
|
|
|
|
|
|
|
err = db.db.QueryRowContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
stream_id, encryption, segment_count,
|
|
|
|
encrypted_metadata_encrypted_key, encrypted_metadata_nonce, encrypted_metadata
|
|
|
|
FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
version = $4 AND
|
|
|
|
status = `+committedStatus,
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version).
|
|
|
|
Scan(
|
|
|
|
&result.StreamID,
|
|
|
|
encryptionParameters{&result.EncryptionParameters},
|
|
|
|
&segmentCount,
|
|
|
|
&result.EncryptedMetadataKey, &result.EncryptedMetadataKeyNonce, &result.EncryptedMetadata,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return BeginCopyObjectResult{}, storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return BeginCopyObjectResult{}, Error.New("unable to query object status: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if segmentCount > CopySegmentLimit {
|
|
|
|
return BeginCopyObjectResult{}, Error.New("object to copy has too many segments (%d). Limit is %d.", segmentCount, CopySegmentLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
position, encrypted_key_nonce, encrypted_key
|
|
|
|
FROM segments
|
|
|
|
WHERE stream_id = $1
|
|
|
|
ORDER BY stream_id, position ASC
|
|
|
|
`, result.StreamID))(func(rows tagsql.Rows) error {
|
|
|
|
for rows.Next() {
|
|
|
|
var keys EncryptedKeyAndNonce
|
|
|
|
|
|
|
|
err = rows.Scan(&keys.Position, &keys.EncryptedKeyNonce, &keys.EncryptedKey)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to scan segments: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.EncryptedKeysNonces = append(result.EncryptedKeysNonces, keys)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return BeginCopyObjectResult{}, Error.New("unable to fetch object segments: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FinishCopyObject holds all data needed to finish object copy.
|
|
|
|
type FinishCopyObject struct {
|
|
|
|
ObjectStream
|
2022-03-04 11:28:04 +00:00
|
|
|
NewBucket string
|
|
|
|
NewEncryptedObjectKey ObjectKey
|
|
|
|
NewStreamID uuid.UUID
|
|
|
|
|
|
|
|
OverrideMetadata bool
|
|
|
|
NewEncryptedMetadata []byte
|
2022-03-23 16:06:14 +00:00
|
|
|
NewEncryptedMetadataKeyNonce storj.Nonce
|
2022-01-27 00:01:03 +00:00
|
|
|
NewEncryptedMetadataKey []byte
|
2022-03-04 11:28:04 +00:00
|
|
|
|
|
|
|
NewSegmentKeys []EncryptedKeyAndNonce
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify verifies metabase.FinishCopyObject data.
|
|
|
|
func (finishCopy FinishCopyObject) Verify() error {
|
|
|
|
if err := finishCopy.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case len(finishCopy.NewBucket) == 0:
|
|
|
|
return ErrInvalidRequest.New("NewBucket is missing")
|
2022-03-16 10:17:27 +00:00
|
|
|
case finishCopy.NewStreamID.IsZero():
|
|
|
|
return ErrInvalidRequest.New("NewStreamID is missing")
|
2022-01-27 00:01:03 +00:00
|
|
|
case finishCopy.ObjectStream.StreamID == finishCopy.NewStreamID:
|
|
|
|
return ErrInvalidRequest.New("StreamIDs are identical")
|
|
|
|
case len(finishCopy.NewEncryptedObjectKey) == 0:
|
|
|
|
return ErrInvalidRequest.New("NewEncryptedObjectKey is missing")
|
2022-03-04 11:28:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if finishCopy.OverrideMetadata {
|
2022-03-23 16:06:14 +00:00
|
|
|
if finishCopy.NewEncryptedMetadata == nil && (!finishCopy.NewEncryptedMetadataKeyNonce.IsZero() || finishCopy.NewEncryptedMetadataKey != nil) {
|
2022-03-04 11:28:04 +00:00
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be not set if EncryptedMetadata is not set")
|
2022-03-23 16:06:14 +00:00
|
|
|
} else if finishCopy.NewEncryptedMetadata != nil && (finishCopy.NewEncryptedMetadataKeyNonce.IsZero() || finishCopy.NewEncryptedMetadataKey == nil) {
|
2022-03-04 11:28:04 +00:00
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be set if EncryptedMetadata is set")
|
|
|
|
}
|
2022-03-30 12:12:08 +01:00
|
|
|
} else {
|
|
|
|
switch {
|
|
|
|
case finishCopy.NewEncryptedMetadataKeyNonce.IsZero() && len(finishCopy.NewEncryptedMetadataKey) != 0:
|
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataKeyNonce is missing")
|
|
|
|
case len(finishCopy.NewEncryptedMetadataKey) == 0 && !finishCopy.NewEncryptedMetadataKeyNonce.IsZero():
|
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataKey is missing")
|
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FinishCopyObject accepts new encryption keys for copied object and insert the corresponding new object ObjectKey and segments EncryptedKey.
|
2022-05-17 16:25:48 +01:00
|
|
|
// It returns the object at the destination location.
|
2022-02-24 10:54:57 +00:00
|
|
|
func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (object Object, err error) {
|
2022-01-27 00:01:03 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
2022-02-24 10:54:57 +00:00
|
|
|
return Object{}, err
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
newObject := Object{}
|
2022-06-24 00:15:42 +01:00
|
|
|
var copyMetadata []byte
|
2022-02-18 00:03:50 +00:00
|
|
|
|
2022-06-24 00:15:42 +01:00
|
|
|
err = txutil.WithTx(ctx, db.db, nil, func(ctx context.Context, tx tagsql.Tx) (err error) {
|
2022-05-17 16:25:48 +01:00
|
|
|
|
|
|
|
sourceObject, ancestorStreamID, objectAtDestination, err := getObjectAtCopySourceAndDestination(ctx, tx, opts)
|
2022-06-24 00:15:42 +01:00
|
|
|
if err != nil {
|
2022-05-17 16:25:48 +01:00
|
|
|
return err
|
2022-02-18 00:03:50 +00:00
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
if int(sourceObject.SegmentCount) != len(opts.NewSegmentKeys) {
|
|
|
|
return ErrInvalidRequest.New("wrong number of segments keys received (received %d, need %d)", len(opts.NewSegmentKeys), sourceObject.SegmentCount)
|
2022-06-24 00:15:42 +01:00
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-06-24 00:15:42 +01:00
|
|
|
var newSegments struct {
|
|
|
|
Positions []int64
|
|
|
|
EncryptedKeys [][]byte
|
|
|
|
EncryptedKeyNonces [][]byte
|
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-06-24 00:15:42 +01:00
|
|
|
for _, u := range opts.NewSegmentKeys {
|
|
|
|
newSegments.EncryptedKeys = append(newSegments.EncryptedKeys, u.EncryptedKey)
|
|
|
|
newSegments.EncryptedKeyNonces = append(newSegments.EncryptedKeyNonces, u.EncryptedKeyNonce)
|
|
|
|
newSegments.Positions = append(newSegments.Positions, int64(u.Position.Encode()))
|
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
positions := make([]int64, sourceObject.SegmentCount)
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
rootPieceIDs := make([][]byte, sourceObject.SegmentCount)
|
2022-02-16 23:24:38 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
expiresAts := make([]*time.Time, sourceObject.SegmentCount)
|
|
|
|
encryptedSizes := make([]int32, sourceObject.SegmentCount)
|
|
|
|
plainSizes := make([]int32, sourceObject.SegmentCount)
|
|
|
|
plainOffsets := make([]int64, sourceObject.SegmentCount)
|
|
|
|
inlineDatas := make([][]byte, sourceObject.SegmentCount)
|
2022-02-16 23:24:38 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
redundancySchemes := make([]int64, sourceObject.SegmentCount)
|
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
2022-01-27 00:01:03 +00:00
|
|
|
SELECT
|
|
|
|
position,
|
2022-02-16 23:24:38 +00:00
|
|
|
expires_at,
|
|
|
|
root_piece_id,
|
2022-01-27 00:01:03 +00:00
|
|
|
encrypted_size, plain_offset, plain_size,
|
|
|
|
redundancy,
|
2022-02-16 23:24:38 +00:00
|
|
|
inline_data
|
2022-01-27 00:01:03 +00:00
|
|
|
FROM segments
|
|
|
|
WHERE stream_id = $1
|
|
|
|
ORDER BY position ASC
|
|
|
|
LIMIT $2
|
2022-05-17 16:25:48 +01:00
|
|
|
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
|
2022-06-24 00:15:42 +01:00
|
|
|
index := 0
|
|
|
|
for rows.Next() {
|
2022-06-23 23:31:12 +01:00
|
|
|
err := rows.Scan(
|
2022-06-24 00:15:42 +01:00
|
|
|
&positions[index],
|
|
|
|
&expiresAts[index],
|
|
|
|
&rootPieceIDs[index],
|
|
|
|
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
|
|
|
|
&redundancySchemes[index],
|
|
|
|
&inlineDatas[index],
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
index++
|
|
|
|
}
|
|
|
|
|
2022-06-23 23:31:12 +01:00
|
|
|
if err := rows.Err(); err != nil {
|
2022-01-27 00:01:03 +00:00
|
|
|
return err
|
|
|
|
}
|
2022-06-23 23:31:12 +01:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
if index != int(sourceObject.SegmentCount) {
|
2022-06-23 23:31:12 +01:00
|
|
|
return Error.New("could not load all of the segment information")
|
|
|
|
}
|
|
|
|
|
2022-06-24 00:15:42 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("unable to copy object: %w", err)
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
2022-06-24 00:15:42 +01:00
|
|
|
onlyInlineSegments := true
|
|
|
|
for index := range positions {
|
|
|
|
if newSegments.Positions[index] != positions[index] {
|
|
|
|
return Error.New("missing new segment keys for segment %d", positions[index])
|
|
|
|
}
|
|
|
|
if onlyInlineSegments && (encryptedSizes[index] > 0) && len(inlineDatas[index]) == 0 {
|
|
|
|
onlyInlineSegments = false
|
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
2022-06-24 00:15:42 +01:00
|
|
|
if opts.OverrideMetadata {
|
|
|
|
copyMetadata = opts.NewEncryptedMetadata
|
2022-05-17 16:25:48 +01:00
|
|
|
} else {
|
|
|
|
copyMetadata = sourceObject.EncryptedMetadata
|
2022-02-24 00:17:35 +00:00
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
if objectAtDestination != nil {
|
|
|
|
deletedObjects, err := db.deleteObjectExactVersionServerSideCopy(
|
|
|
|
ctx, DeleteObjectExactVersion{
|
|
|
|
Version: opts.Version,
|
|
|
|
ObjectLocation: ObjectLocation{
|
|
|
|
ProjectID: objectAtDestination.ProjectID,
|
|
|
|
BucketName: objectAtDestination.BucketName,
|
|
|
|
ObjectKey: objectAtDestination.ObjectKey,
|
|
|
|
},
|
|
|
|
}, tx,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("unable to delete existing object at copy destination: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The object at the destination was the ancestor!
|
|
|
|
// Now that the ancestor of the source object is removed, we need to change the target ancestor.
|
|
|
|
if ancestorStreamID == objectAtDestination.StreamID {
|
|
|
|
if len(deletedObjects) == 0 {
|
|
|
|
return Error.New("ancestor is gone, please retry operation")
|
|
|
|
}
|
|
|
|
|
|
|
|
ancestorStreamID = *deletedObjects[0].PromotedAncestor
|
|
|
|
}
|
|
|
|
}
|
2022-03-04 11:28:04 +00:00
|
|
|
|
2022-01-27 00:01:03 +00:00
|
|
|
// TODO we need to handle metadata correctly (copy from original object or replace)
|
2022-06-13 18:47:07 +01:00
|
|
|
row := tx.QueryRowContext(ctx, `
|
2022-02-24 10:54:57 +00:00
|
|
|
INSERT INTO objects (
|
|
|
|
project_id, bucket_name, object_key, version, stream_id,
|
|
|
|
expires_at, status, segment_count,
|
|
|
|
encryption,
|
|
|
|
encrypted_metadata, encrypted_metadata_nonce, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
zombie_deletion_deadline
|
|
|
|
) VALUES (
|
|
|
|
$1, $2, $3, $4, $5,
|
|
|
|
$6,`+committedStatus+`, $7,
|
|
|
|
$8,
|
|
|
|
$9, $10, $11,
|
|
|
|
$12, $13, $14, null
|
2022-05-17 16:25:48 +01:00
|
|
|
)
|
2022-03-23 22:00:40 +00:00
|
|
|
RETURNING
|
2022-05-17 16:25:48 +01:00
|
|
|
created_at`,
|
2022-02-24 10:54:57 +00:00
|
|
|
opts.ProjectID, opts.NewBucket, opts.NewEncryptedObjectKey, opts.Version, opts.NewStreamID,
|
2022-05-17 16:25:48 +01:00
|
|
|
sourceObject.ExpiresAt, sourceObject.SegmentCount,
|
|
|
|
encryptionParameters{&sourceObject.Encryption},
|
2022-03-04 11:28:04 +00:00
|
|
|
copyMetadata, opts.NewEncryptedMetadataKeyNonce, opts.NewEncryptedMetadataKey,
|
2022-05-17 16:25:48 +01:00
|
|
|
sourceObject.TotalPlainSize, sourceObject.TotalEncryptedSize, sourceObject.FixedSegmentSize,
|
2022-01-27 00:01:03 +00:00
|
|
|
)
|
2022-03-23 22:00:40 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
newObject = sourceObject
|
|
|
|
err = row.Scan(&newObject.CreatedAt)
|
2022-03-23 22:00:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.New("unable to copy object: %w", err)
|
|
|
|
}
|
2022-04-17 19:58:19 +01:00
|
|
|
|
2022-06-13 18:47:07 +01:00
|
|
|
_, err = tx.ExecContext(ctx, `
|
2022-03-23 22:00:40 +00:00
|
|
|
INSERT INTO segments (
|
2022-04-17 19:58:19 +01:00
|
|
|
stream_id, position, expires_at,
|
2022-03-23 22:00:40 +00:00
|
|
|
encrypted_key_nonce, encrypted_key,
|
|
|
|
root_piece_id,
|
|
|
|
redundancy,
|
|
|
|
encrypted_size, plain_offset, plain_size,
|
|
|
|
inline_data
|
|
|
|
) SELECT
|
2022-04-17 19:58:19 +01:00
|
|
|
$1, UNNEST($2::INT8[]), UNNEST($3::timestamptz[]),
|
|
|
|
UNNEST($4::BYTEA[]), UNNEST($5::BYTEA[]),
|
|
|
|
UNNEST($6::BYTEA[]),
|
|
|
|
UNNEST($7::INT8[]),
|
|
|
|
UNNEST($8::INT4[]), UNNEST($9::INT8[]), UNNEST($10::INT4[]),
|
|
|
|
UNNEST($11::BYTEA[])
|
2022-04-17 19:58:19 +01:00
|
|
|
`, opts.NewStreamID, pgutil.Int8Array(newSegments.Positions), pgutil.NullTimestampTZArray(expiresAts),
|
2022-02-16 23:24:38 +00:00
|
|
|
pgutil.ByteaArray(newSegments.EncryptedKeyNonces), pgutil.ByteaArray(newSegments.EncryptedKeys),
|
|
|
|
pgutil.ByteaArray(rootPieceIDs),
|
|
|
|
pgutil.Int8Array(redundancySchemes),
|
|
|
|
pgutil.Int4Array(encryptedSizes), pgutil.Int8Array(plainOffsets), pgutil.Int4Array(plainSizes),
|
|
|
|
pgutil.ByteaArray(inlineDatas),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("unable to copy segments: %w", err)
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
2022-02-24 00:17:35 +00:00
|
|
|
if onlyInlineSegments {
|
|
|
|
return nil
|
|
|
|
}
|
2022-05-17 16:25:48 +01:00
|
|
|
|
2022-06-13 18:47:07 +01:00
|
|
|
_, err = tx.ExecContext(ctx, `
|
2022-02-24 00:17:35 +00:00
|
|
|
INSERT INTO segment_copies (
|
|
|
|
stream_id, ancestor_stream_id
|
|
|
|
) VALUES (
|
|
|
|
$1, $2
|
|
|
|
)
|
|
|
|
`, opts.NewStreamID, ancestorStreamID)
|
2022-01-27 00:01:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.New("unable to copy object: %w", err)
|
|
|
|
}
|
2022-05-17 16:25:48 +01:00
|
|
|
|
2022-01-27 00:01:03 +00:00
|
|
|
return nil
|
|
|
|
})
|
2022-02-18 00:03:50 +00:00
|
|
|
|
2022-01-27 00:01:03 +00:00
|
|
|
if err != nil {
|
2022-02-24 10:54:57 +00:00
|
|
|
return Object{}, err
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
2022-02-24 10:54:57 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
newObject.StreamID = opts.NewStreamID
|
|
|
|
newObject.BucketName = opts.NewBucket
|
|
|
|
newObject.ObjectKey = opts.NewEncryptedObjectKey
|
|
|
|
newObject.EncryptedMetadata = copyMetadata
|
|
|
|
newObject.EncryptedMetadataEncryptedKey = opts.NewEncryptedMetadataKey
|
2022-03-23 16:06:14 +00:00
|
|
|
if !opts.NewEncryptedMetadataKeyNonce.IsZero() {
|
2022-05-17 16:25:48 +01:00
|
|
|
newObject.EncryptedMetadataNonce = opts.NewEncryptedMetadataKeyNonce[:]
|
2022-03-23 16:06:14 +00:00
|
|
|
}
|
2022-02-24 10:54:57 +00:00
|
|
|
|
2022-01-27 00:01:03 +00:00
|
|
|
mon.Meter("finish_copy_object").Mark(1)
|
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
return newObject, nil
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
2022-03-23 22:00:40 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
// Fetch the following in a single query:
|
|
|
|
// - object at copy source location (error if it's not there)
|
|
|
|
// - source ancestor stream id (if any)
|
|
|
|
// - object at copy destination location (if any).
|
|
|
|
func getObjectAtCopySourceAndDestination(
|
|
|
|
ctx context.Context, tx tagsql.Tx, opts FinishCopyObject,
|
|
|
|
) (sourceObject Object, ancestorStreamID uuid.UUID, destinationObject *Object, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2022-03-23 22:00:40 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
var ancestorStreamIDBytes []byte
|
|
|
|
// get objects at source and destination (if any)
|
|
|
|
rows, err := tx.QueryContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
objects.stream_id,
|
|
|
|
bucket_name,
|
|
|
|
object_key,
|
|
|
|
expires_at,
|
|
|
|
segment_count,
|
|
|
|
encrypted_metadata,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption,
|
|
|
|
segment_copies.ancestor_stream_id
|
|
|
|
FROM objects
|
|
|
|
LEFT JOIN segment_copies ON objects.stream_id = segment_copies.stream_id
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $3 AND
|
|
|
|
object_key = $4 AND
|
|
|
|
version = $2 AND
|
|
|
|
status = `+committedStatus+`
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
|
|
objects.stream_id,
|
|
|
|
bucket_name,
|
|
|
|
object_key,
|
|
|
|
expires_at,
|
|
|
|
segment_count,
|
|
|
|
encrypted_metadata,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption,
|
|
|
|
NULL
|
|
|
|
FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $5 AND
|
|
|
|
object_key = $6 AND
|
|
|
|
version = $2 AND
|
|
|
|
status = `+committedStatus,
|
|
|
|
opts.ProjectID, opts.Version,
|
|
|
|
[]byte(opts.BucketName), opts.ObjectKey,
|
|
|
|
opts.NewBucket, opts.NewEncryptedObjectKey)
|
|
|
|
if err != nil {
|
|
|
|
return Object{}, uuid.UUID{}, nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, rows.Err())
|
|
|
|
err = errs.Combine(err, rows.Close())
|
|
|
|
}()
|
2022-03-23 22:00:40 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
if !rows.Next() {
|
|
|
|
return Object{}, uuid.UUID{}, nil, storj.ErrObjectNotFound.New("source object not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rows.Scan(
|
|
|
|
&sourceObject.StreamID,
|
|
|
|
&sourceObject.BucketName,
|
|
|
|
&sourceObject.ObjectKey,
|
|
|
|
&sourceObject.ExpiresAt,
|
|
|
|
&sourceObject.SegmentCount,
|
|
|
|
&sourceObject.EncryptedMetadata,
|
|
|
|
&sourceObject.TotalPlainSize, &sourceObject.TotalEncryptedSize, &sourceObject.FixedSegmentSize,
|
|
|
|
encryptionParameters{&sourceObject.Encryption},
|
|
|
|
&ancestorStreamIDBytes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Object{}, uuid.UUID{}, nil, Error.New("unable to query object status: %w", err)
|
|
|
|
}
|
|
|
|
if sourceObject.BucketName != opts.BucketName || sourceObject.ObjectKey != opts.ObjectKey {
|
|
|
|
return Object{}, uuid.UUID{}, nil, Error.New("source object is gone")
|
|
|
|
}
|
|
|
|
|
|
|
|
sourceObject.ProjectID = opts.ProjectID
|
|
|
|
sourceObject.Version = opts.Version
|
|
|
|
sourceObject.Status = Committed
|
|
|
|
|
|
|
|
if len(ancestorStreamIDBytes) != 0 {
|
|
|
|
// Source object already was a copy, the new copy becomes yet another copy of the existing ancestor
|
|
|
|
ancestorStreamID, err = uuid.FromBytes(ancestorStreamIDBytes)
|
2022-03-23 22:00:40 +00:00
|
|
|
if err != nil {
|
2022-05-17 16:25:48 +01:00
|
|
|
return Object{}, uuid.UUID{}, nil, err
|
2022-03-23 22:00:40 +00:00
|
|
|
}
|
2022-05-17 16:25:48 +01:00
|
|
|
} else {
|
|
|
|
// Source object was not a copy, it will now become an ancestor (unless it has only inline segments)
|
|
|
|
ancestorStreamID = sourceObject.StreamID
|
|
|
|
}
|
2022-03-23 22:00:40 +00:00
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
if rows.Next() {
|
|
|
|
var _bogusBytes []byte
|
|
|
|
destinationObject = &Object{}
|
|
|
|
destinationObject.ProjectID = opts.ProjectID
|
|
|
|
destinationObject.BucketName = opts.NewBucket
|
|
|
|
destinationObject.ObjectKey = opts.NewEncryptedObjectKey
|
|
|
|
// There is an object at the destination.
|
|
|
|
// We will delete it before doing the copy
|
|
|
|
err := rows.Scan(
|
|
|
|
&destinationObject.StreamID,
|
|
|
|
&destinationObject.BucketName,
|
|
|
|
&destinationObject.ObjectKey,
|
|
|
|
&destinationObject.ExpiresAt,
|
|
|
|
&destinationObject.SegmentCount,
|
|
|
|
&destinationObject.EncryptedMetadata,
|
|
|
|
&destinationObject.TotalPlainSize, &destinationObject.TotalEncryptedSize, &destinationObject.FixedSegmentSize,
|
|
|
|
encryptionParameters{&destinationObject.Encryption},
|
|
|
|
&_bogusBytes,
|
|
|
|
)
|
2022-03-23 22:00:40 +00:00
|
|
|
if err != nil {
|
2022-05-17 16:25:48 +01:00
|
|
|
return Object{}, uuid.UUID{}, nil, Error.New("error while reading existing object at destination: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if destinationObject.BucketName != opts.NewBucket || destinationObject.ObjectKey != opts.NewEncryptedObjectKey {
|
|
|
|
return Object{}, uuid.UUID{}, nil, Error.New("unexpected")
|
2022-03-23 22:00:40 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-17 16:25:48 +01:00
|
|
|
|
|
|
|
if rows.Next() {
|
|
|
|
return Object{}, uuid.UUID{}, nil, Error.New("expected 1 or 2 rows, got 3 or more")
|
|
|
|
}
|
|
|
|
|
|
|
|
return sourceObject, ancestorStreamID, destinationObject, nil
|
2022-03-23 22:00:40 +00:00
|
|
|
}
|