2021-08-16 13:04:33 +01:00
|
|
|
// Copyright (C) 2021 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"errors"
|
|
|
|
|
2022-01-27 10:30:45 +00:00
|
|
|
pgxerrcode "github.com/jackc/pgerrcode"
|
|
|
|
|
2021-08-16 13:04:33 +01:00
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/uuid"
|
2021-09-21 12:59:01 +01:00
|
|
|
"storj.io/private/dbutil/pgutil"
|
2022-01-27 10:30:45 +00:00
|
|
|
"storj.io/private/dbutil/pgutil/pgerrcode"
|
2021-08-31 12:44:18 +01:00
|
|
|
"storj.io/private/dbutil/txutil"
|
2021-08-16 13:04:33 +01:00
|
|
|
"storj.io/private/tagsql"
|
|
|
|
)
|
|
|
|
|
2022-09-14 12:55:58 +01:00
|
|
|
// BeginMoveObjectResult holds data needed to begin move object.
|
|
|
|
type BeginMoveObjectResult BeginMoveCopyResults
|
2021-08-16 13:04:33 +01:00
|
|
|
|
|
|
|
// EncryptedKeyAndNonce holds single segment position, encrypted key and nonce.
|
|
|
|
type EncryptedKeyAndNonce struct {
|
|
|
|
Position SegmentPosition
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce []byte
|
2021-08-16 13:04:33 +01:00
|
|
|
EncryptedKey []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginMoveObject holds all data needed begin move object method.
|
|
|
|
type BeginMoveObject struct {
|
|
|
|
ObjectLocation
|
|
|
|
}
|
|
|
|
|
2022-09-14 12:55:58 +01:00
|
|
|
// BeginMoveCopyResults holds all data needed to begin move and copy object methods.
|
|
|
|
type BeginMoveCopyResults struct {
|
|
|
|
StreamID uuid.UUID
|
|
|
|
Version Version
|
|
|
|
EncryptedMetadata []byte
|
|
|
|
EncryptedMetadataKeyNonce []byte
|
|
|
|
EncryptedMetadataKey []byte
|
|
|
|
EncryptedKeysNonces []EncryptedKeyAndNonce
|
|
|
|
EncryptionParameters storj.EncryptionParameters
|
|
|
|
}
|
|
|
|
|
2021-08-16 13:04:33 +01:00
|
|
|
// BeginMoveObject collects all data needed to begin object move procedure.
|
2022-09-14 12:55:58 +01:00
|
|
|
func (db *DB) BeginMoveObject(ctx context.Context, opts BeginMoveObject) (_ BeginMoveObjectResult, err error) {
|
|
|
|
result, err := db.beginMoveCopyObject(ctx, opts.ObjectLocation, MoveSegmentLimit, nil)
|
|
|
|
if err != nil {
|
|
|
|
return BeginMoveObjectResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return BeginMoveObjectResult(result), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// beginMoveCopyObject collects all data needed to begin object move/copy procedure.
|
|
|
|
func (db *DB) beginMoveCopyObject(ctx context.Context, location ObjectLocation, segmentLimit int64, verifyLimits func(encryptedObjectSize int64, nSegments int64) error) (result BeginMoveCopyResults, err error) {
|
2021-08-16 13:04:33 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2022-09-14 12:55:58 +01:00
|
|
|
if err := location.Verify(); err != nil {
|
|
|
|
return BeginMoveCopyResults{}, err
|
2021-08-16 13:04:33 +01:00
|
|
|
}
|
|
|
|
|
2022-08-08 16:14:08 +01:00
|
|
|
object, err := db.GetObjectLastCommitted(ctx, GetObjectLastCommitted{
|
|
|
|
ObjectLocation: ObjectLocation{
|
2022-09-14 12:55:58 +01:00
|
|
|
ProjectID: location.ProjectID,
|
|
|
|
BucketName: location.BucketName,
|
|
|
|
ObjectKey: location.ObjectKey,
|
2022-08-08 16:14:08 +01:00
|
|
|
},
|
|
|
|
})
|
2021-08-16 13:04:33 +01:00
|
|
|
if err != nil {
|
2022-09-14 12:55:58 +01:00
|
|
|
return BeginMoveCopyResults{}, err
|
2021-08-16 13:04:33 +01:00
|
|
|
}
|
|
|
|
|
2022-09-14 12:55:58 +01:00
|
|
|
if int64(object.SegmentCount) > segmentLimit {
|
|
|
|
return BeginMoveCopyResults{}, ErrInvalidRequest.New("object has too many segments (%d). Limit is %d.", object.SegmentCount, CopySegmentLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if verifyLimits != nil {
|
|
|
|
err = verifyLimits(object.TotalEncryptedSize, int64(object.SegmentCount))
|
|
|
|
if err != nil {
|
|
|
|
return BeginMoveCopyResults{}, err
|
|
|
|
}
|
2021-08-16 13:04:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
position, encrypted_key_nonce, encrypted_key
|
|
|
|
FROM segments
|
|
|
|
WHERE stream_id = $1
|
|
|
|
ORDER BY stream_id, position ASC
|
2022-08-08 16:14:08 +01:00
|
|
|
`, object.StreamID))(func(rows tagsql.Rows) error {
|
2021-08-16 13:04:33 +01:00
|
|
|
for rows.Next() {
|
|
|
|
var keys EncryptedKeyAndNonce
|
|
|
|
|
|
|
|
err = rows.Scan(&keys.Position, &keys.EncryptedKeyNonce, &keys.EncryptedKey)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to scan segments: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.EncryptedKeysNonces = append(result.EncryptedKeysNonces, keys)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
2022-09-14 12:55:58 +01:00
|
|
|
return BeginMoveCopyResults{}, Error.New("unable to fetch object segments: %w", err)
|
2021-08-16 13:04:33 +01:00
|
|
|
}
|
|
|
|
|
2022-08-08 16:14:08 +01:00
|
|
|
result.StreamID = object.StreamID
|
|
|
|
result.Version = object.Version
|
|
|
|
result.EncryptionParameters = object.Encryption
|
|
|
|
result.EncryptedMetadata = object.EncryptedMetadata
|
|
|
|
result.EncryptedMetadataKey = object.EncryptedMetadataEncryptedKey
|
|
|
|
result.EncryptedMetadataKeyNonce = object.EncryptedMetadataNonce
|
|
|
|
|
2021-08-16 13:04:33 +01:00
|
|
|
return result, nil
|
|
|
|
}
|
2021-08-31 12:44:18 +01:00
|
|
|
|
|
|
|
// FinishMoveObject holds all data needed to finish object move.
|
|
|
|
type FinishMoveObject struct {
|
|
|
|
ObjectStream
|
2022-06-07 12:20:45 +01:00
|
|
|
NewBucket string
|
|
|
|
NewSegmentKeys []EncryptedKeyAndNonce
|
|
|
|
NewEncryptedObjectKey []byte
|
|
|
|
// Optional. Required if object has metadata.
|
2022-03-23 16:06:14 +00:00
|
|
|
NewEncryptedMetadataKeyNonce storj.Nonce
|
2021-08-31 12:44:18 +01:00
|
|
|
NewEncryptedMetadataKey []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify verifies metabase.FinishMoveObject data.
|
|
|
|
func (finishMove FinishMoveObject) Verify() error {
|
|
|
|
if err := finishMove.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
2021-09-22 08:50:24 +01:00
|
|
|
case len(finishMove.NewBucket) == 0:
|
|
|
|
return ErrInvalidRequest.New("NewBucket is missing")
|
2021-08-31 12:44:18 +01:00
|
|
|
case len(finishMove.NewEncryptedObjectKey) == 0:
|
|
|
|
return ErrInvalidRequest.New("NewEncryptedObjectKey is missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FinishMoveObject accepts new encryption keys for moved object and updates the corresponding object ObjectKey and segments EncryptedKey.
|
|
|
|
func (db *DB) FinishMoveObject(ctx context.Context, opts FinishMoveObject) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = txutil.WithTx(ctx, db.db, nil, func(ctx context.Context, tx tagsql.Tx) (err error) {
|
2022-12-13 11:31:29 +00:00
|
|
|
targetVersion := opts.Version
|
|
|
|
|
2023-03-15 12:26:10 +00:00
|
|
|
useNewVersion := false
|
|
|
|
highestVersion := Version(0)
|
|
|
|
err = withRows(tx.QueryContext(ctx, `
|
2022-12-13 11:31:29 +00:00
|
|
|
SELECT version, status
|
|
|
|
FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3
|
|
|
|
ORDER BY version ASC
|
|
|
|
`, opts.ProjectID, []byte(opts.NewBucket), opts.NewEncryptedObjectKey))(func(rows tagsql.Rows) error {
|
2023-03-15 12:26:10 +00:00
|
|
|
for rows.Next() {
|
|
|
|
var status ObjectStatus
|
|
|
|
var version Version
|
|
|
|
|
|
|
|
err = rows.Scan(&version, &status)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to scan objects: %w", err)
|
2022-12-13 11:31:29 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 17:13:18 +01:00
|
|
|
if status == CommittedUnversioned {
|
2023-03-15 12:26:10 +00:00
|
|
|
return ErrObjectAlreadyExists.New("")
|
|
|
|
} else if status == Pending && version == opts.Version {
|
|
|
|
useNewVersion = true
|
|
|
|
}
|
|
|
|
highestVersion = version
|
2022-12-13 11:31:29 +00:00
|
|
|
}
|
|
|
|
|
2023-03-15 12:26:10 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if useNewVersion {
|
|
|
|
targetVersion = highestVersion + 1
|
2022-12-13 11:31:29 +00:00
|
|
|
}
|
|
|
|
|
2021-08-31 12:44:18 +01:00
|
|
|
updateObjectsQuery := `
|
|
|
|
UPDATE objects SET
|
2021-09-22 08:50:24 +01:00
|
|
|
bucket_name = $1,
|
|
|
|
object_key = $2,
|
2022-12-13 11:31:29 +00:00
|
|
|
version = $9,
|
2022-06-07 12:20:45 +01:00
|
|
|
encrypted_metadata_encrypted_key = CASE WHEN objects.encrypted_metadata IS NOT NULL
|
|
|
|
THEN $3
|
|
|
|
ELSE objects.encrypted_metadata_encrypted_key
|
|
|
|
END,
|
|
|
|
encrypted_metadata_nonce = CASE WHEN objects.encrypted_metadata IS NOT NULL
|
|
|
|
THEN $4
|
|
|
|
ELSE objects.encrypted_metadata_nonce
|
|
|
|
END
|
2021-08-31 12:44:18 +01:00
|
|
|
WHERE
|
2021-09-22 08:50:24 +01:00
|
|
|
project_id = $5 AND
|
|
|
|
bucket_name = $6 AND
|
|
|
|
object_key = $7 AND
|
2022-06-29 05:43:51 +01:00
|
|
|
version = $8
|
2021-08-31 12:44:18 +01:00
|
|
|
RETURNING
|
2022-12-13 11:31:29 +00:00
|
|
|
segment_count,
|
2022-06-29 05:43:51 +01:00
|
|
|
objects.encrypted_metadata IS NOT NULL AND LENGTH(objects.encrypted_metadata) > 0 AS has_metadata,
|
|
|
|
stream_id
|
2021-08-31 12:44:18 +01:00
|
|
|
`
|
|
|
|
|
|
|
|
var segmentsCount int
|
2022-06-07 12:20:45 +01:00
|
|
|
var hasMetadata bool
|
2022-06-29 05:43:51 +01:00
|
|
|
var streamID uuid.UUID
|
2022-06-07 12:20:45 +01:00
|
|
|
|
2022-12-13 11:31:29 +00:00
|
|
|
row := tx.QueryRowContext(ctx, updateObjectsQuery, []byte(opts.NewBucket), opts.NewEncryptedObjectKey, opts.NewEncryptedMetadataKey, opts.NewEncryptedMetadataKeyNonce, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, targetVersion)
|
2022-06-29 05:43:51 +01:00
|
|
|
if err = row.Scan(&segmentsCount, &hasMetadata, &streamID); err != nil {
|
2022-01-27 10:30:45 +00:00
|
|
|
if code := pgerrcode.FromError(err); code == pgxerrcode.UniqueViolation {
|
|
|
|
return Error.Wrap(ErrObjectAlreadyExists.New(""))
|
|
|
|
} else if errors.Is(err, sql.ErrNoRows) {
|
2023-04-19 10:54:16 +01:00
|
|
|
return ErrObjectNotFound.New("object not found")
|
2021-08-31 12:44:18 +01:00
|
|
|
}
|
|
|
|
return Error.New("unable to update object: %w", err)
|
|
|
|
}
|
2022-06-29 05:43:51 +01:00
|
|
|
if streamID != opts.StreamID {
|
2023-04-19 10:54:16 +01:00
|
|
|
return ErrObjectNotFound.New("object was changed during move")
|
2022-06-29 05:43:51 +01:00
|
|
|
}
|
2021-08-31 12:44:18 +01:00
|
|
|
if segmentsCount != len(opts.NewSegmentKeys) {
|
2022-06-29 05:43:51 +01:00
|
|
|
return ErrInvalidRequest.New("wrong number of segments keys received")
|
2021-08-31 12:44:18 +01:00
|
|
|
}
|
2022-06-07 12:20:45 +01:00
|
|
|
if hasMetadata {
|
|
|
|
switch {
|
|
|
|
case opts.NewEncryptedMetadataKeyNonce.IsZero() && len(opts.NewEncryptedMetadataKey) != 0:
|
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataKeyNonce is missing")
|
|
|
|
case len(opts.NewEncryptedMetadataKey) == 0 && !opts.NewEncryptedMetadataKeyNonce.IsZero():
|
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataKey is missing")
|
|
|
|
}
|
|
|
|
}
|
2021-08-31 12:44:18 +01:00
|
|
|
|
2021-09-21 12:59:01 +01:00
|
|
|
var newSegmentKeys struct {
|
|
|
|
Positions []int64
|
|
|
|
EncryptedKeys [][]byte
|
|
|
|
EncryptedKeyNonces [][]byte
|
|
|
|
}
|
2021-08-31 12:44:18 +01:00
|
|
|
|
2021-09-21 12:59:01 +01:00
|
|
|
for _, u := range opts.NewSegmentKeys {
|
|
|
|
newSegmentKeys.EncryptedKeys = append(newSegmentKeys.EncryptedKeys, u.EncryptedKey)
|
2021-10-12 14:37:12 +01:00
|
|
|
newSegmentKeys.EncryptedKeyNonces = append(newSegmentKeys.EncryptedKeyNonces, u.EncryptedKeyNonce)
|
2021-09-21 12:59:01 +01:00
|
|
|
newSegmentKeys.Positions = append(newSegmentKeys.Positions, int64(u.Position.Encode()))
|
|
|
|
}
|
2021-08-31 12:44:18 +01:00
|
|
|
|
2022-06-14 10:04:44 +01:00
|
|
|
updateResult, err := tx.ExecContext(ctx, `
|
2021-09-21 12:59:01 +01:00
|
|
|
UPDATE segments SET
|
|
|
|
encrypted_key_nonce = P.encrypted_key_nonce,
|
|
|
|
encrypted_key = P.encrypted_key
|
|
|
|
FROM (SELECT unnest($2::INT8[]), unnest($3::BYTEA[]), unnest($4::BYTEA[])) as P(position, encrypted_key_nonce, encrypted_key)
|
|
|
|
WHERE
|
|
|
|
stream_id = $1 AND
|
|
|
|
segments.position = P.position
|
|
|
|
`, opts.StreamID, pgutil.Int8Array(newSegmentKeys.Positions), pgutil.ByteaArray(newSegmentKeys.EncryptedKeyNonces), pgutil.ByteaArray(newSegmentKeys.EncryptedKeys))
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2021-08-31 12:44:18 +01:00
|
|
|
|
2021-09-21 12:59:01 +01:00
|
|
|
affected, err := updateResult.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to get rows affected: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if affected != int64(len(newSegmentKeys.Positions)) {
|
|
|
|
return Error.New("segment is missing")
|
2021-08-31 12:44:18 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
mon.Meter("finish_move_object").Mark(1)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|