satellite/metabase: drop pending objects table support

We decided that we won't use seprate table for handling pending
objects. We need to remove related code.

https://github.com/storj/storj/issues/6421

Change-Id: I442b0f58da75409f725e08e2cd83d29ed4f91ec6
This commit is contained in:
Michal Niewrzal 2023-11-15 15:11:26 +01:00 committed by Storj Robot
parent cd9518a3c3
commit 6834c04539
19 changed files with 114 additions and 4812 deletions

View File

@ -47,11 +47,6 @@ type BeginObjectNextVersion struct {
EncryptedMetadataEncryptedKey []byte // optional EncryptedMetadataEncryptedKey []byte // optional
Encryption storj.EncryptionParameters Encryption storj.EncryptionParameters
// UsePendingObjectsTable was added to options not metabase configuration
// to be able to test scenarios with pending object in pending_objects and
// objects table with the same test case.
UsePendingObjectsTable bool
} }
// Verify verifies get object request fields. // Verify verifies get object request fields.
@ -73,7 +68,6 @@ func (opts *BeginObjectNextVersion) Verify() error {
} }
// BeginObjectNextVersion adds a pending object to the database, with automatically assigned version. // BeginObjectNextVersion adds a pending object to the database, with automatically assigned version.
// TODO at the end of transition to pending_objects table we can rename this metod to just BeginObject.
func (db *DB) BeginObjectNextVersion(ctx context.Context, opts BeginObjectNextVersion) (object Object, err error) { func (db *DB) BeginObjectNextVersion(ctx context.Context, opts BeginObjectNextVersion) (object Object, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -98,34 +92,7 @@ func (db *DB) BeginObjectNextVersion(ctx context.Context, opts BeginObjectNextVe
ZombieDeletionDeadline: opts.ZombieDeletionDeadline, ZombieDeletionDeadline: opts.ZombieDeletionDeadline,
} }
if opts.UsePendingObjectsTable { if err := db.db.QueryRowContext(ctx, `
object.Status = Pending
object.Version = PendingVersion
if err := db.db.QueryRowContext(ctx, `
INSERT INTO pending_objects (
project_id, bucket_name, object_key, stream_id,
expires_at, encryption,
zombie_deletion_deadline,
encrypted_metadata, encrypted_metadata_nonce, encrypted_metadata_encrypted_key
) VALUES (
$1, $2, $3, $4,
$5, $6,
$7,
$8, $9, $10)
RETURNING created_at
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID,
opts.ExpiresAt, encryptionParameters{&opts.Encryption},
opts.ZombieDeletionDeadline,
opts.EncryptedMetadata, opts.EncryptedMetadataNonce, opts.EncryptedMetadataEncryptedKey,
).Scan(&object.CreatedAt); err != nil {
if code := pgerrcode.FromError(err); code == pgxerrcode.UniqueViolation {
return Object{}, Error.Wrap(ErrObjectAlreadyExists.New(""))
}
return Object{}, Error.New("unable to insert object: %w", err)
}
} else {
if err := db.db.QueryRowContext(ctx, `
INSERT INTO objects ( INSERT INTO objects (
project_id, bucket_name, object_key, version, stream_id, project_id, bucket_name, object_key, version, stream_id,
expires_at, encryption, expires_at, encryption,
@ -145,12 +112,11 @@ func (db *DB) BeginObjectNextVersion(ctx context.Context, opts BeginObjectNextVe
$8, $9, $10) $8, $9, $10)
RETURNING status, version, created_at RETURNING status, version, created_at
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID, `, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID,
opts.ExpiresAt, encryptionParameters{&opts.Encryption}, opts.ExpiresAt, encryptionParameters{&opts.Encryption},
opts.ZombieDeletionDeadline, opts.ZombieDeletionDeadline,
opts.EncryptedMetadata, opts.EncryptedMetadataNonce, opts.EncryptedMetadataEncryptedKey, opts.EncryptedMetadata, opts.EncryptedMetadataNonce, opts.EncryptedMetadataEncryptedKey,
).Scan(&object.Status, &object.Version, &object.CreatedAt); err != nil { ).Scan(&object.Status, &object.Version, &object.CreatedAt); err != nil {
return Object{}, Error.New("unable to insert object: %w", err) return Object{}, Error.New("unable to insert object: %w", err)
}
} }
mon.Meter("object_begin").Mark(1) mon.Meter("object_begin").Mark(1)
@ -258,8 +224,6 @@ type BeginSegment struct {
RootPieceID storj.PieceID RootPieceID storj.PieceID
Pieces Pieces Pieces Pieces
UsePendingObjectsTable bool
} }
// BeginSegment verifies, whether a new segment upload can be started. // BeginSegment verifies, whether a new segment upload can be started.
@ -283,24 +247,14 @@ func (db *DB) BeginSegment(ctx context.Context, opts BeginSegment) (err error) {
// Verify that object exists and is partial. // Verify that object exists and is partial.
var exists bool var exists bool
if opts.UsePendingObjectsTable { err = db.db.QueryRowContext(ctx, `
err = db.db.QueryRowContext(ctx, ` SELECT EXISTS (
SELECT EXISTS ( SELECT 1
SELECT 1 FROM objects
FROM pending_objects WHERE (project_id, bucket_name, object_key, version, stream_id) = ($1, $2, $3, $4, $5) AND
WHERE (project_id, bucket_name, object_key, stream_id) = ($1, $2, $3, $4) status = `+statusPending+`
) )`,
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID).Scan(&exists) opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID).Scan(&exists)
} else {
err = db.db.QueryRowContext(ctx, `
SELECT EXISTS (
SELECT 1
FROM objects
WHERE (project_id, bucket_name, object_key, version, stream_id) = ($1, $2, $3, $4, $5) AND
status = `+statusPending+`
)`,
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID).Scan(&exists)
}
if err != nil { if err != nil {
return Error.New("unable to query object status: %w", err) return Error.New("unable to query object status: %w", err)
} }
@ -336,8 +290,6 @@ type CommitSegment struct {
Pieces Pieces Pieces Pieces
Placement storj.PlacementConstraint Placement storj.PlacementConstraint
UsePendingObjectsTable bool
} }
// CommitSegment commits segment to the database. // CommitSegment commits segment to the database.
@ -378,84 +330,45 @@ func (db *DB) CommitSegment(ctx context.Context, opts CommitSegment) (err error)
return Error.New("unable to convert pieces to aliases: %w", err) return Error.New("unable to convert pieces to aliases: %w", err)
} }
// second part will be removed when there will be no pending_objects in objects table.
// Verify that object exists and is partial. // Verify that object exists and is partial.
if opts.UsePendingObjectsTable {
_, err = db.db.ExecContext(ctx, ` _, err = db.db.ExecContext(ctx, `
INSERT INTO segments ( INSERT INTO segments (
stream_id, position, expires_at, stream_id, position, expires_at,
root_piece_id, encrypted_key_nonce, encrypted_key, root_piece_id, encrypted_key_nonce, encrypted_key,
encrypted_size, plain_offset, plain_size, encrypted_etag, encrypted_size, plain_offset, plain_size, encrypted_etag,
redundancy, redundancy,
remote_alias_pieces, remote_alias_pieces,
placement placement
) VALUES ( ) VALUES (
( (
SELECT stream_id SELECT stream_id
FROM pending_objects FROM objects
WHERE (project_id, bucket_name, object_key, stream_id) = ($12, $13, $14, $15) WHERE (project_id, bucket_name, object_key, version, stream_id) = ($12, $13, $14, $15, $16) AND
), $1, $2, status = `+statusPending+`
$3, $4, $5, ), $1, $2,
$6, $7, $8, $9, $3, $4, $5,
$10, $6, $7, $8, $9,
$11, $10,
$16 $11,
) $17
ON CONFLICT(stream_id, position) )
DO UPDATE SET ON CONFLICT(stream_id, position)
expires_at = $2, DO UPDATE SET
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5, expires_at = $2,
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9, root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
redundancy = $10, encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
remote_alias_pieces = $11, redundancy = $10,
placement = $16 remote_alias_pieces = $11,
placement = $17
`, opts.Position, opts.ExpiresAt, `, opts.Position, opts.ExpiresAt,
opts.RootPieceID, opts.EncryptedKeyNonce, opts.EncryptedKey, opts.RootPieceID, opts.EncryptedKeyNonce, opts.EncryptedKey,
opts.EncryptedSize, opts.PlainOffset, opts.PlainSize, opts.EncryptedETag, opts.EncryptedSize, opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
redundancyScheme{&opts.Redundancy}, redundancyScheme{&opts.Redundancy},
aliasPieces, aliasPieces,
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
opts.Placement, opts.Placement,
) )
} else {
_, err = db.db.ExecContext(ctx, `
INSERT INTO segments (
stream_id, position, expires_at,
root_piece_id, encrypted_key_nonce, encrypted_key,
encrypted_size, plain_offset, plain_size, encrypted_etag,
redundancy,
remote_alias_pieces,
placement
) VALUES (
(
SELECT stream_id
FROM objects
WHERE (project_id, bucket_name, object_key, version, stream_id) = ($12, $13, $14, $15, $16) AND
status = `+statusPending+`
), $1, $2,
$3, $4, $5,
$6, $7, $8, $9,
$10,
$11,
$17
)
ON CONFLICT(stream_id, position)
DO UPDATE SET
expires_at = $2,
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
redundancy = $10,
remote_alias_pieces = $11,
placement = $17
`, opts.Position, opts.ExpiresAt,
opts.RootPieceID, opts.EncryptedKeyNonce, opts.EncryptedKey,
opts.EncryptedSize, opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
redundancyScheme{&opts.Redundancy},
aliasPieces,
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
opts.Placement,
)
}
if err != nil { if err != nil {
if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation { if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
return ErrPendingObjectMissing.New("") return ErrPendingObjectMissing.New("")
@ -485,8 +398,6 @@ type CommitInlineSegment struct {
EncryptedETag []byte EncryptedETag []byte
InlineData []byte InlineData []byte
UsePendingObjectsTable bool
} }
// CommitInlineSegment commits inline segment to the database. // CommitInlineSegment commits inline segment to the database.
@ -511,8 +422,7 @@ func (db *DB) CommitInlineSegment(ctx context.Context, opts CommitInlineSegment)
return ErrInvalidRequest.New("PlainOffset negative") return ErrInvalidRequest.New("PlainOffset negative")
} }
if opts.UsePendingObjectsTable { _, err = db.db.ExecContext(ctx, `
_, err = db.db.ExecContext(ctx, `
INSERT INTO segments ( INSERT INTO segments (
stream_id, position, expires_at, stream_id, position, expires_at,
root_piece_id, encrypted_key_nonce, encrypted_key, root_piece_id, encrypted_key_nonce, encrypted_key,
@ -521,9 +431,11 @@ func (db *DB) CommitInlineSegment(ctx context.Context, opts CommitInlineSegment)
) VALUES ( ) VALUES (
( (
SELECT stream_id SELECT stream_id
FROM pending_objects FROM objects
WHERE (project_id, bucket_name, object_key, stream_id) = ($11, $12, $13, $14) WHERE (project_id, bucket_name, object_key, version, stream_id) = ($11, $12, $13, $14, $15) AND
), $1, $2, status = `+statusPending+`
),
$1, $2,
$3, $4, $5, $3, $4, $5,
$6, $7, $8, $9, $6, $7, $8, $9,
$10 $10
@ -535,43 +447,11 @@ func (db *DB) CommitInlineSegment(ctx context.Context, opts CommitInlineSegment)
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9, encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
inline_data = $10 inline_data = $10
`, opts.Position, opts.ExpiresAt, `, opts.Position, opts.ExpiresAt,
storj.PieceID{}, opts.EncryptedKeyNonce, opts.EncryptedKey, storj.PieceID{}, opts.EncryptedKeyNonce, opts.EncryptedKey,
len(opts.InlineData), opts.PlainOffset, opts.PlainSize, opts.EncryptedETag, len(opts.InlineData), opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
opts.InlineData, opts.InlineData,
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
) )
} else {
_, err = db.db.ExecContext(ctx, `
INSERT INTO segments (
stream_id, position, expires_at,
root_piece_id, encrypted_key_nonce, encrypted_key,
encrypted_size, plain_offset, plain_size, encrypted_etag,
inline_data
) VALUES (
(
SELECT stream_id
FROM objects
WHERE (project_id, bucket_name, object_key, version, stream_id) = ($11, $12, $13, $14, $15) AND
status = `+statusPending+`
),
$1, $2,
$3, $4, $5,
$6, $7, $8, $9,
$10
)
ON CONFLICT(stream_id, position)
DO UPDATE SET
expires_at = $2,
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
inline_data = $10
`, opts.Position, opts.ExpiresAt,
storj.PieceID{}, opts.EncryptedKeyNonce, opts.EncryptedKey,
len(opts.InlineData), opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
opts.InlineData,
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
)
}
if err != nil { if err != nil {
if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation { if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
return ErrPendingObjectMissing.New("") return ErrPendingObjectMissing.New("")
@ -602,8 +482,6 @@ type CommitObject struct {
DisallowDelete bool DisallowDelete bool
UsePendingObjectsTable bool
// Versioned indicates whether an object is allowed to have multiple versions. // Versioned indicates whether an object is allowed to have multiple versions.
Versioned bool Versioned bool
} }
@ -676,8 +554,6 @@ func (db *DB) CommitObject(ctx context.Context, opts CommitObject) (object Objec
totalEncryptedSize += int64(seg.EncryptedSize) totalEncryptedSize += int64(seg.EncryptedSize)
} }
const versionArgIndex = 3
nextStatus := committedWhereVersioned(opts.Versioned) nextStatus := committedWhereVersioned(opts.Versioned)
args := []interface{}{ args := []interface{}{
@ -699,136 +575,63 @@ func (db *DB) CommitObject(ctx context.Context, opts CommitObject) (object Objec
return Error.Wrap(err) return Error.Wrap(err)
} }
if opts.UsePendingObjectsTable { nextVersion := opts.Version
opts.Version = precommit.HighestVersion + 1 if nextVersion < precommit.HighestVersion {
args[versionArgIndex] = opts.Version nextVersion = precommit.HighestVersion + 1
}
args = append(args, nextVersion)
opts.Version = nextVersion
metadataColumns := ""
if opts.OverrideEncryptedMetadata {
args = append(args, args = append(args,
opts.EncryptedMetadataNonce, opts.EncryptedMetadataNonce,
opts.EncryptedMetadata, opts.EncryptedMetadata,
opts.EncryptedMetadataEncryptedKey, opts.EncryptedMetadataEncryptedKey,
opts.OverrideEncryptedMetadata,
) )
metadataColumns = `,
encrypted_metadata_nonce = $13,
encrypted_metadata = $14,
encrypted_metadata_encrypted_key = $15
`
}
err = tx.QueryRowContext(ctx, `
UPDATE objects SET
version = $12,
status = $6,
segment_count = $7,
err = tx.QueryRowContext(ctx, ` total_plain_size = $8,
WITH delete_pending_object AS ( total_encrypted_size = $9,
DELETE FROM pending_objects fixed_segment_size = $10,
WHERE (project_id, bucket_name, object_key, stream_id) = ($1, $2, $3, $5) zombie_deletion_deadline = NULL,
RETURNING expires_at, encryption, encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key
) -- TODO should we allow to override existing encryption parameters or return error if don't match with opts?
INSERT INTO objects ( encryption = CASE
project_id, bucket_name, object_key, version, stream_id, WHEN objects.encryption = 0 AND $11 <> 0 THEN $11
status, segment_count, total_plain_size, total_encrypted_size, WHEN objects.encryption = 0 AND $11 = 0 THEN NULL
fixed_segment_size, zombie_deletion_deadline, expires_at, ELSE objects.encryption
encryption, END
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key `+metadataColumns+`
) WHERE (project_id, bucket_name, object_key, version, stream_id) = ($1, $2, $3, $4, $5) AND
SELECT status = `+statusPending+`
$1 as project_id, $2 as bucket_name, $3 as object_key, $4::INT4 as version, $5 as stream_id, RETURNING
$6 as status, $7::INT4 as segment_count, $8::INT8 as total_plain_size, $9::INT8 as total_encrypted_size, created_at, expires_at,
$10::INT4 as fixed_segment_size, NULL::timestamp as zombie_deletion_deadline, expires_at, encrypted_metadata, encrypted_metadata_encrypted_key, encrypted_metadata_nonce,
-- TODO should we allow to override existing encryption parameters or return error if don't match with opts? encryption
CASE
WHEN encryption = 0 AND $11 <> 0 THEN $11
WHEN encryption = 0 AND $11 = 0 THEN NULL
ELSE encryption
END as
encryption,
CASE
WHEN $15::BOOL = true THEN $12
ELSE encrypted_metadata_nonce
END as
encrypted_metadata_nonce,
CASE
WHEN $15::BOOL = true THEN $13
ELSE encrypted_metadata
END as
encrypted_metadata,
CASE
WHEN $15::BOOL = true THEN $14
ELSE encrypted_metadata_encrypted_key
END as
encrypted_metadata_encrypted_key
FROM delete_pending_object
-- we don't want ON CONFLICT clause to update existign object
-- as this way we may miss removing old object segments
RETURNING
created_at, expires_at,
encrypted_metadata, encrypted_metadata_encrypted_key, encrypted_metadata_nonce,
encryption
`, args...).Scan( `, args...).Scan(
&object.CreatedAt, &object.ExpiresAt, &object.CreatedAt, &object.ExpiresAt,
&object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey, &object.EncryptedMetadataNonce, &object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey, &object.EncryptedMetadataNonce,
encryptionParameters{&object.Encryption}, encryptionParameters{&object.Encryption},
) )
if err != nil { if err != nil {
if errors.Is(err, sql.ErrNoRows) { if errors.Is(err, sql.ErrNoRows) {
return ErrObjectNotFound.Wrap(Error.New("object with specified version and pending status is missing")) return ErrObjectNotFound.Wrap(Error.New("object with specified version and pending status is missing"))
} else if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation { } else if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
// TODO maybe we should check message if 'encryption' label is there // TODO maybe we should check message if 'encryption' label is there
return ErrInvalidRequest.New("Encryption is missing") return ErrInvalidRequest.New("Encryption is missing")
}
return Error.New("failed to update object: %w", err)
}
} else {
nextVersion := opts.Version
if nextVersion < precommit.HighestVersion {
nextVersion = precommit.HighestVersion + 1
}
args = append(args, nextVersion)
opts.Version = nextVersion
metadataColumns := ""
if opts.OverrideEncryptedMetadata {
args = append(args,
opts.EncryptedMetadataNonce,
opts.EncryptedMetadata,
opts.EncryptedMetadataEncryptedKey,
)
metadataColumns = `,
encrypted_metadata_nonce = $13,
encrypted_metadata = $14,
encrypted_metadata_encrypted_key = $15
`
}
err = tx.QueryRowContext(ctx, `
UPDATE objects SET
version = $12,
status = $6,
segment_count = $7,
total_plain_size = $8,
total_encrypted_size = $9,
fixed_segment_size = $10,
zombie_deletion_deadline = NULL,
-- TODO should we allow to override existing encryption parameters or return error if don't match with opts?
encryption = CASE
WHEN objects.encryption = 0 AND $11 <> 0 THEN $11
WHEN objects.encryption = 0 AND $11 = 0 THEN NULL
ELSE objects.encryption
END
`+metadataColumns+`
WHERE (project_id, bucket_name, object_key, version, stream_id) = ($1, $2, $3, $4, $5) AND
status = `+statusPending+`
RETURNING
created_at, expires_at,
encrypted_metadata, encrypted_metadata_encrypted_key, encrypted_metadata_nonce,
encryption
`, args...).Scan(
&object.CreatedAt, &object.ExpiresAt,
&object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey, &object.EncryptedMetadataNonce,
encryptionParameters{&object.Encryption},
)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrObjectNotFound.Wrap(Error.New("object with specified version and pending status is missing"))
} else if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
// TODO maybe we should check message if 'encryption' label is there
return ErrInvalidRequest.New("Encryption is missing")
}
return Error.New("failed to update object: %w", err)
} }
return Error.New("failed to update object: %w", err)
} }
object.StreamID = opts.StreamID object.StreamID = opts.StreamID

File diff suppressed because it is too large Load Diff

View File

@ -205,47 +205,6 @@ func (db *DB) DeletePendingObject(ctx context.Context, opts DeletePendingObject)
return result, nil return result, nil
} }
// DeletePendingObjectNew deletes a pending object.
// TODO DeletePendingObjectNew will replace DeletePendingObject when objects table will be free from pending objects.
func (db *DB) DeletePendingObjectNew(ctx context.Context, opts DeletePendingObject) (result DeleteObjectResult, err error) {
defer mon.Task()(&ctx)(&err)
if err := opts.Verify(); err != nil {
return DeleteObjectResult{}, err
}
err = withRows(db.db.QueryContext(ctx, `
WITH deleted_objects AS (
DELETE FROM pending_objects
WHERE
(project_id, bucket_name, object_key, stream_id) = ($1, $2, $3, $4)
RETURNING
stream_id, created_at, expires_at,
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
encryption
), deleted_segments AS (
DELETE FROM segments
WHERE segments.stream_id IN (SELECT deleted_objects.stream_id FROM deleted_objects)
RETURNING segments.stream_id
)
SELECT * FROM deleted_objects
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID))(func(rows tagsql.Rows) error {
result.Removed, err = db.scanPendingObjectDeletion(ctx, opts.Location(), rows)
return err
})
if err != nil {
return DeleteObjectResult{}, err
}
if len(result.Removed) == 0 {
return DeleteObjectResult{}, ErrObjectNotFound.Wrap(Error.New("no rows deleted"))
}
mon.Meter("object_delete").Mark(len(result.Removed))
return result, nil
}
// DeleteObjectsAllVersions deletes all versions of multiple objects from the same bucket. // DeleteObjectsAllVersions deletes all versions of multiple objects from the same bucket.
func (db *DB) DeleteObjectsAllVersions(ctx context.Context, opts DeleteObjectsAllVersions) (result DeleteObjectResult, err error) { func (db *DB) DeleteObjectsAllVersions(ctx context.Context, opts DeleteObjectsAllVersions) (result DeleteObjectResult, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -375,32 +334,6 @@ func (db *DB) scanMultipleObjectsDeletion(ctx context.Context, rows tagsql.Rows)
return objects, nil return objects, nil
} }
func (db *DB) scanPendingObjectDeletion(ctx context.Context, location ObjectLocation, rows tagsql.Rows) (objects []Object, err error) {
defer mon.Task()(&ctx)(&err)
objects = make([]Object, 0, 10)
var object Object
for rows.Next() {
object.ProjectID = location.ProjectID
object.BucketName = location.BucketName
object.ObjectKey = location.ObjectKey
err = rows.Scan(&object.StreamID,
&object.CreatedAt, &object.ExpiresAt,
&object.EncryptedMetadataNonce, &object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey,
encryptionParameters{&object.Encryption},
)
if err != nil {
return nil, Error.New("unable to delete pending object: %w", err)
}
object.Status = Pending
objects = append(objects, object)
}
return objects, nil
}
// DeleteObjectLastCommitted contains arguments necessary for deleting last committed version of object. // DeleteObjectLastCommitted contains arguments necessary for deleting last committed version of object.
type DeleteObjectLastCommitted struct { type DeleteObjectLastCommitted struct {
ObjectLocation ObjectLocation

View File

@ -32,7 +32,6 @@ func (db *DB) DeleteBucketObjects(ctx context.Context, opts DeleteBucketObjects)
deleteBatchSizeLimit.Ensure(&opts.BatchSize) deleteBatchSizeLimit.Ensure(&opts.BatchSize)
// TODO we may think about doing pending and committed objects in parallel
deletedBatchCount := int64(opts.BatchSize) deletedBatchCount := int64(opts.BatchSize)
for deletedBatchCount > 0 { for deletedBatchCount > 0 {
if err := ctx.Err(); err != nil { if err := ctx.Err(); err != nil {
@ -47,20 +46,6 @@ func (db *DB) DeleteBucketObjects(ctx context.Context, opts DeleteBucketObjects)
} }
} }
deletedBatchCount = int64(opts.BatchSize)
for deletedBatchCount > 0 {
if err := ctx.Err(); err != nil {
return deletedObjectCount, err
}
deletedBatchCount, err = db.deleteBucketPendingObjects(ctx, opts)
deletedObjectCount += deletedBatchCount
if err != nil {
return deletedObjectCount, err
}
}
return deletedObjectCount, nil return deletedObjectCount, nil
} }
@ -116,55 +101,3 @@ func (db *DB) deleteBucketObjects(ctx context.Context, opts DeleteBucketObjects)
return deletedObjectCount, nil return deletedObjectCount, nil
} }
func (db *DB) deleteBucketPendingObjects(ctx context.Context, opts DeleteBucketObjects) (deletedObjectCount int64, err error) {
defer mon.Task()(&ctx)(&err)
var query string
// TODO handle number of deleted segments
switch db.impl {
case dbutil.Cockroach:
query = `
WITH deleted_objects AS (
DELETE FROM pending_objects
WHERE (project_id, bucket_name) = ($1, $2)
LIMIT $3
RETURNING pending_objects.stream_id
), deleted_segments AS (
DELETE FROM segments
WHERE segments.stream_id IN (SELECT deleted_objects.stream_id FROM deleted_objects)
RETURNING segments.stream_id
)
SELECT COUNT(1) FROM deleted_objects
`
case dbutil.Postgres:
query = `
WITH deleted_objects AS (
DELETE FROM pending_objects
WHERE stream_id IN (
SELECT stream_id FROM pending_objects
WHERE (project_id, bucket_name) = ($1, $2)
LIMIT $3
)
RETURNING pending_objects.stream_id
), deleted_segments AS (
DELETE FROM segments
WHERE segments.stream_id IN (SELECT deleted_objects.stream_id FROM deleted_objects)
RETURNING segments.stream_id
)
SELECT COUNT(1) FROM deleted_objects
`
default:
return 0, Error.New("unhandled database: %v", db.impl)
}
err = db.db.QueryRowContext(ctx, query, opts.Bucket.ProjectID, []byte(opts.Bucket.BucketName), opts.BatchSize).Scan(&deletedObjectCount)
if err != nil {
return 0, Error.Wrap(err)
}
mon.Meter("object_delete").Mark64(deletedObjectCount)
return deletedObjectCount, nil
}

View File

@ -238,53 +238,6 @@ func TestDeleteBucketObjects(t *testing.T) {
metabasetest.Verify{}.Check(ctx, t, db) metabasetest.Verify{}.Check(ctx, t, db)
}) })
t.Run("pending and committed objects", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.CreateObject(ctx, t, db, obj1, 2)
obj1.ObjectKey = "some key"
obj1.Version = metabase.NextVersion
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj1,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
BatchSize: 2,
},
Deleted: 2,
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
// object only in pending_objects table
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj1,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
BatchSize: 2,
},
Deleted: 1,
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
}) })
} }

View File

@ -290,119 +290,3 @@ func (db *DB) deleteInactiveObjectsAndSegments(ctx context.Context, objects []Ob
return nil return nil
} }
// DeleteInactivePendingObjects deletes all pending objects that are inactive. Inactive means that zombie deletion deadline passed
// and no new segmets were uploaded after opts.InactiveDeadline.
func (db *DB) DeleteInactivePendingObjects(ctx context.Context, opts DeleteZombieObjects) (err error) {
defer mon.Task()(&ctx)(&err)
return db.deleteObjectsAndSegmentsBatch(ctx, opts.BatchSize, func(startAfter ObjectStream, batchsize int) (last ObjectStream, err error) {
defer mon.Task()(&ctx)(&err)
query := `
SELECT
project_id, bucket_name, object_key, stream_id
FROM pending_objects
` + db.impl.AsOfSystemInterval(opts.AsOfSystemInterval) + `
WHERE
(project_id, bucket_name, object_key, stream_id) > ($1, $2, $3, $4)
AND (zombie_deletion_deadline IS NULL OR zombie_deletion_deadline < $5)
ORDER BY project_id, bucket_name, object_key, stream_id
LIMIT $6;`
objects := make([]ObjectStream, 0, batchsize)
scanErrClass := errs.Class("DB rows scan has failed")
err = withRows(db.db.QueryContext(ctx, query,
startAfter.ProjectID, []byte(startAfter.BucketName), []byte(startAfter.ObjectKey), startAfter.StreamID,
opts.DeadlineBefore,
batchsize),
)(func(rows tagsql.Rows) error {
for rows.Next() {
err = rows.Scan(&last.ProjectID, &last.BucketName, &last.ObjectKey, &last.StreamID)
if err != nil {
return scanErrClass.Wrap(err)
}
db.log.Debug("selected zombie object for deleting it",
zap.Stringer("Project", last.ProjectID),
zap.String("Bucket", last.BucketName),
zap.String("Object Key", string(last.ObjectKey)),
zap.Stringer("StreamID", last.StreamID),
)
objects = append(objects, last)
}
return nil
})
if err != nil {
if scanErrClass.Has(err) {
return ObjectStream{}, Error.New("unable to select zombie objects for deletion: %w", err)
}
db.log.Warn("unable to select zombie objects for deletion", zap.Error(Error.Wrap(err)))
return ObjectStream{}, nil
}
err = db.deleteInactiveObjectsAndSegmentsNew(ctx, objects, opts)
if err != nil {
db.log.Warn("delete from DB zombie objects", zap.Error(err))
return ObjectStream{}, nil
}
return last, nil
})
}
func (db *DB) deleteInactiveObjectsAndSegmentsNew(ctx context.Context, objects []ObjectStream, opts DeleteZombieObjects) (err error) {
defer mon.Task()(&ctx)(&err)
if len(objects) == 0 {
return nil
}
err = pgxutil.Conn(ctx, db.db, func(conn *pgx.Conn) error {
var batch pgx.Batch
for _, obj := range objects {
batch.Queue(`
WITH check_segments AS (
SELECT 1 FROM segments
WHERE stream_id = $4::BYTEA AND created_at > $5
), deleted_objects AS (
DELETE FROM pending_objects
WHERE
(project_id, bucket_name, object_key, stream_id) = ($1::BYTEA, $2::BYTEA, $3::BYTEA, $4) AND
NOT EXISTS (SELECT 1 FROM check_segments)
RETURNING stream_id
)
DELETE FROM segments
WHERE segments.stream_id IN (SELECT stream_id FROM deleted_objects)
`, obj.ProjectID, []byte(obj.BucketName), []byte(obj.ObjectKey), obj.StreamID, opts.InactiveDeadline)
}
results := conn.SendBatch(ctx, &batch)
defer func() { err = errs.Combine(err, results.Close()) }()
var segmentsDeleted int64
var errlist errs.Group
for i := 0; i < batch.Len(); i++ {
result, err := results.Exec()
errlist.Add(err)
if err == nil {
segmentsDeleted += result.RowsAffected()
}
}
// TODO calculate deleted objects
mon.Meter("zombie_segment_delete").Mark64(segmentsDeleted)
mon.Meter("segment_delete").Mark64(segmentsDeleted)
return errlist.Err()
})
if err != nil {
return Error.New("unable to delete zombie objects: %w", err)
}
return nil
}

View File

@ -482,311 +482,3 @@ func TestDeleteZombieObjects(t *testing.T) {
}) })
}) })
} }
func TestDeleteInactivePendingObjects(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj1 := metabasetest.RandObjectStream()
obj1.Version = metabase.NextVersion
obj2 := metabasetest.RandObjectStream()
obj2.Version = metabase.NextVersion
obj3 := metabasetest.RandObjectStream()
obj3.Version = metabase.NextVersion
now := time.Now()
zombieDeadline := now.Add(24 * time.Hour)
pastTime := now.Add(-1 * time.Hour)
futureTime := now.Add(1 * time.Hour)
t.Run("none", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.DeleteInactivePendingObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now,
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("partial objects", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
// zombie object with default deadline
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj1,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
// zombie object with deadline time in the past
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj2,
ZombieDeletionDeadline: &pastTime,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
// pending object with expiration time in the future
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj3,
ZombieDeletionDeadline: &futureTime,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
metabasetest.DeleteInactivePendingObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now,
InactiveDeadline: now,
},
}.Check(ctx, t, db)
metabasetest.Verify{ // the object with zombie deadline time in the past is gone
PendingObjects: []metabase.RawPendingObject{
{
PendingObjectStream: metabasetest.ObjectStreamToPending(obj1),
CreatedAt: now,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &zombieDeadline,
},
{
PendingObjectStream: metabasetest.ObjectStreamToPending(obj3),
CreatedAt: now,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &futureTime,
},
},
}.Check(ctx, t, db)
})
t.Run("partial object with segment", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj1,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &now,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
metabasetest.BeginSegment{
Opts: metabase.BeginSegment{
ObjectStream: obj1,
RootPieceID: storj.PieceID{1},
Pieces: []metabase.Piece{{
Number: 1,
StorageNode: testrand.NodeID(),
}},
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
metabasetest.CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj1,
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
// object will be checked if is inactive but inactive time is in future
metabasetest.DeleteInactivePendingObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now.Add(1 * time.Hour),
InactiveDeadline: now.Add(-1 * time.Hour),
},
}.Check(ctx, t, db)
metabasetest.Verify{
PendingObjects: []metabase.RawPendingObject{
{
PendingObjectStream: metabasetest.ObjectStreamToPending(obj1),
CreatedAt: now,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &now,
},
},
Segments: []metabase.RawSegment{
{
StreamID: obj1.StreamID,
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
CreatedAt: now,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
},
},
}.Check(ctx, t, db)
// object will be checked if is inactive and will be deleted with segment
metabasetest.DeleteInactivePendingObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now.Add(1 * time.Hour),
InactiveDeadline: now.Add(2 * time.Hour),
AsOfSystemInterval: -1 * time.Microsecond,
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("batch size", func(t *testing.T) {
for i := 0; i < 33; i++ {
obj := metabasetest.RandObjectStream()
obj.Version = metabase.NextVersion
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
// use default 24h zombie deletion deadline
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
for i := byte(0); i < 3; i++ {
metabasetest.BeginSegment{
Opts: metabase.BeginSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
RootPieceID: storj.PieceID{i + 1},
Pieces: []metabase.Piece{{
Number: 1,
StorageNode: testrand.NodeID(),
}},
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
metabasetest.CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
}
}
metabasetest.DeleteInactivePendingObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now.Add(25 * time.Hour),
InactiveDeadline: now.Add(48 * time.Hour),
BatchSize: 4,
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("committed objects", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
obj1 := obj1
obj1.Version = metabase.DefaultVersion
object1, _ := metabasetest.CreateTestObject{}.Run(ctx, t, db, obj1, 1)
obj2 := obj2
obj2.Version = metabase.DefaultVersion
object2 := object1
object2.ObjectStream = obj2
metabasetest.CreateTestObject{
BeginObjectExactVersion: &metabase.BeginObjectExactVersion{
ObjectStream: object2.ObjectStream,
ZombieDeletionDeadline: &pastTime,
Encryption: metabasetest.DefaultEncryption,
},
}.Run(ctx, t, db, object2.ObjectStream, 1)
obj3 := obj3
obj3.Version = metabase.DefaultVersion
object3, _ := metabasetest.CreateTestObject{
BeginObjectExactVersion: &metabase.BeginObjectExactVersion{
ObjectStream: obj3,
ZombieDeletionDeadline: &futureTime,
Encryption: metabasetest.DefaultEncryption,
},
}.Run(ctx, t, db, obj3, 1)
expectedObj1Segment := metabase.Segment{
StreamID: obj1.StreamID,
RootPieceID: storj.PieceID{1},
CreatedAt: now,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1060,
PlainSize: 512,
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
Redundancy: metabasetest.DefaultRedundancy,
}
expectedObj2Segment := expectedObj1Segment
expectedObj2Segment.StreamID = object2.StreamID
expectedObj3Segment := expectedObj1Segment
expectedObj3Segment.StreamID = object3.StreamID
metabasetest.DeleteInactivePendingObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now,
InactiveDeadline: now.Add(1 * time.Hour),
},
}.Check(ctx, t, db)
metabasetest.Verify{ // all committed objects should NOT be deleted
Objects: []metabase.RawObject{
metabase.RawObject(object1),
metabase.RawObject(object2),
metabase.RawObject(object3),
},
Segments: []metabase.RawSegment{
metabase.RawSegment(expectedObj1Segment),
metabase.RawSegment(expectedObj2Segment),
metabase.RawSegment(expectedObj3Segment),
},
}.Check(ctx, t, db)
})
})
}

View File

@ -259,279 +259,6 @@ func TestDeletePendingObject(t *testing.T) {
}) })
} }
func TestDeletePendingObjectNew(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj := metabasetest.RandObjectStream()
now := time.Now()
zombieDeadline := now.Add(24 * time.Hour)
for _, test := range metabasetest.InvalidObjectStreams(obj) {
test := test
t.Run(test.Name, func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: test.ObjectStream,
},
ErrClass: test.ErrClass,
ErrText: test.ErrText,
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
}
t.Run("object missing", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: obj,
},
ErrClass: &metabase.ErrObjectNotFound,
ErrText: "metabase: no rows deleted",
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("non existing object version", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.BeginObjectExactVersion{
Opts: metabase.BeginObjectExactVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
},
}.Check(ctx, t, db)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: metabase.ObjectStream{
ProjectID: obj.ProjectID,
BucketName: obj.BucketName,
ObjectKey: obj.ObjectKey,
Version: 33,
StreamID: obj.StreamID,
},
},
ErrClass: &metabase.ErrObjectNotFound,
ErrText: "metabase: no rows deleted",
}.Check(ctx, t, db)
metabasetest.Verify{
Objects: []metabase.RawObject{
{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.Pending,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &zombieDeadline,
},
},
}.Check(ctx, t, db)
})
t.Run("delete committed object", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
object := metabasetest.CreateObject(ctx, t, db, obj, 0)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: object.ObjectStream,
},
ErrClass: &metabase.ErrObjectNotFound,
ErrText: "metabase: no rows deleted",
}.Check(ctx, t, db)
metabasetest.Verify{
Objects: []metabase.RawObject{
{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.CommittedUnversioned,
Encryption: metabasetest.DefaultEncryption,
},
},
}.Check(ctx, t, db)
})
t.Run("without segments with wrong StreamID", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.BeginObjectExactVersion{
Opts: metabase.BeginObjectExactVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
},
}.Check(ctx, t, db)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: metabase.ObjectStream{
ProjectID: obj.ProjectID,
BucketName: obj.BucketName,
ObjectKey: obj.ObjectKey,
Version: obj.Version,
StreamID: uuid.UUID{33},
},
},
Result: metabase.DeleteObjectResult{},
ErrClass: &metabase.ErrObjectNotFound,
ErrText: "metabase: no rows deleted",
}.Check(ctx, t, db)
metabasetest.Verify{
Objects: []metabase.RawObject{
{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.Pending,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &zombieDeadline,
},
},
}.Check(ctx, t, db)
})
t.Run("without segments", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
obj := obj
obj.Version = metabase.NextVersion
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
object := metabase.RawObject{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.Pending,
Encryption: metabasetest.DefaultEncryption,
}
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: obj,
},
Result: metabase.DeleteObjectResult{
Removed: []metabase.Object{metabase.Object(object)},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("with segments", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
obj := obj
obj.Version = metabase.NextVersion
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
metabasetest.CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: 0},
RootPieceID: testrand.PieceID(),
Pieces: metabase.Pieces{{
Number: 1,
StorageNode: testrand.NodeID(),
}},
EncryptedKey: testrand.Bytes(32),
EncryptedKeyNonce: testrand.Bytes(32),
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: obj,
},
Result: metabase.DeleteObjectResult{
Removed: []metabase.Object{
{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.Pending,
Encryption: metabasetest.DefaultEncryption,
},
},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("with inline segment", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
obj := obj
obj.Version = metabase.NextVersion
metabasetest.BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
metabasetest.CommitInlineSegment{
Opts: metabase.CommitInlineSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: 0},
EncryptedKey: testrand.Bytes(32),
EncryptedKeyNonce: testrand.Bytes(32),
InlineData: testrand.Bytes(1024),
PlainSize: 512,
PlainOffset: 0,
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
metabasetest.DeletePendingObjectNew{
Opts: metabase.DeletePendingObject{
ObjectStream: obj,
},
Result: metabase.DeleteObjectResult{
Removed: []metabase.Object{
{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.Pending,
Encryption: metabasetest.DefaultEncryption,
},
},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
})
}
func TestDeleteObjectExactVersion(t *testing.T) { func TestDeleteObjectExactVersion(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) { metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj := metabasetest.RandObjectStream() obj := metabasetest.RandObjectStream()

View File

@ -30,9 +30,6 @@ func (obj *Object) IsMigrated() bool {
return obj.TotalPlainSize <= 0 return obj.TotalPlainSize <= 0
} }
// PendingObject pending object metadata.
type PendingObject RawPendingObject
// Segment segment metadata. // Segment segment metadata.
// TODO define separated struct. // TODO define separated struct.
type Segment RawSegment type Segment RawSegment
@ -320,9 +317,7 @@ func (db *DB) BucketEmpty(ctx context.Context, opts BucketEmpty) (empty bool, er
var value bool var value bool
err = db.db.QueryRowContext(ctx, ` err = db.db.QueryRowContext(ctx, `
SELECT SELECT EXISTS (SELECT 1 FROM objects WHERE (project_id, bucket_name) = ($1, $2))
(SELECT EXISTS (SELECT 1 FROM objects WHERE (project_id, bucket_name) = ($1, $2))) OR
(SELECT EXISTS (SELECT 1 FROM pending_objects WHERE (project_id, bucket_name) = ($1, $2)))
`, opts.ProjectID, []byte(opts.BucketName)).Scan(&value) `, opts.ProjectID, []byte(opts.BucketName)).Scan(&value)
if err != nil { if err != nil {
return false, Error.New("unable to query objects: %w", err) return false, Error.New("unable to query objects: %w", err)
@ -404,23 +399,6 @@ func (db *DB) TestingAllObjects(ctx context.Context) (objects []Object, err erro
return objects, nil return objects, nil
} }
// TestingAllPendingObjects gets all pending objects.
// Use only for testing purposes.
func (db *DB) TestingAllPendingObjects(ctx context.Context) (objects []PendingObject, err error) {
defer mon.Task()(&ctx)(&err)
rawObjects, err := db.testingGetAllPendingObjects(ctx)
if err != nil {
return nil, Error.Wrap(err)
}
for _, o := range rawObjects {
objects = append(objects, PendingObject(o))
}
return objects, nil
}
// TestingAllSegments gets all segments. // TestingAllSegments gets all segments.
// Use only for testing purposes. // Use only for testing purposes.
func (db *DB) TestingAllSegments(ctx context.Context) (segments []Segment, err error) { func (db *DB) TestingAllSegments(ctx context.Context) (segments []Segment, err error) {

View File

@ -1863,60 +1863,5 @@ func TestBucketEmpty(t *testing.T) {
}, },
}.Check(ctx, t, db) }.Check(ctx, t, db)
}) })
t.Run("object in pending_objects", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.CreatePendingObjectNew(ctx, t, db, obj, 0)
metabasetest.BucketEmpty{
Opts: metabase.BucketEmpty{
ProjectID: obj.ProjectID,
BucketName: obj.BucketName,
},
Result: false,
}.Check(ctx, t, db)
metabasetest.Verify{
PendingObjects: []metabase.RawPendingObject{
{
PendingObjectStream: metabasetest.ObjectStreamToPending(obj),
CreatedAt: now,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &zombieDeadline,
},
},
}.Check(ctx, t, db)
})
t.Run("object in pending_objects and in object", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
object := metabasetest.CreateObject(ctx, t, db, obj, 0)
metabasetest.CreatePendingObjectNew(ctx, t, db, obj, 0)
metabasetest.BucketEmpty{
Opts: metabase.BucketEmpty{
ProjectID: obj.ProjectID,
BucketName: obj.BucketName,
},
Result: false,
}.Check(ctx, t, db)
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(object),
},
PendingObjects: []metabase.RawPendingObject{
{
PendingObjectStream: metabasetest.ObjectStreamToPending(obj),
CreatedAt: now,
Encryption: metabasetest.DefaultEncryption,
ZombieDeletionDeadline: &zombieDeadline,
},
},
}.Check(ctx, t, db)
})
}) })
} }

View File

@ -169,23 +169,3 @@ func (opts *IteratePendingObjects) Verify() error {
} }
return nil return nil
} }
// IteratePendingObjects iterates through all pending objects.
func (db *DB) IteratePendingObjects(ctx context.Context, opts IteratePendingObjects, fn func(context.Context, PendingObjectsIterator) error) (err error) {
defer mon.Task()(&ctx)(&err)
if err = opts.Verify(); err != nil {
return err
}
return iterateAllPendingObjects(ctx, db, opts, fn)
}
// IteratePendingObjectsByKeyNew iterates through all streams of pending objects with the same ObjectKey.
// TODO should be refactored to IteratePendingObjectsByKey after full transition to pending_objects table.
func (db *DB) IteratePendingObjectsByKeyNew(ctx context.Context, opts IteratePendingObjectsByKey, fn func(context.Context, PendingObjectsIterator) error) (err error) {
defer mon.Task()(&ctx)(&err)
if err := opts.Verify(); err != nil {
return err
}
return iteratePendingObjectsByKeyNew(ctx, db, opts, fn)
}

View File

@ -36,8 +36,6 @@ func (step Verify) Check(ctx *testcontext.Context, t testing.TB, db *metabase.DB
sortRawObjects(state.Objects) sortRawObjects(state.Objects)
sortRawObjects(step.Objects) sortRawObjects(step.Objects)
sortRawPendingObjects(state.PendingObjects)
sortRawPendingObjects(step.PendingObjects)
sortRawSegments(state.Segments) sortRawSegments(state.Segments)
sortRawSegments(step.Segments) sortRawSegments(step.Segments)
@ -68,12 +66,6 @@ func sortRawObjects(objects []metabase.RawObject) {
}) })
} }
func sortRawPendingObjects(objects []metabase.RawPendingObject) {
sort.Slice(objects, func(i, j int) bool {
return objects[i].StreamID.Less(objects[j].StreamID)
})
}
func sortRawSegments(segments []metabase.RawSegment) { func sortRawSegments(segments []metabase.RawSegment) {
sort.Slice(segments, func(i, j int) bool { sort.Slice(segments, func(i, j int) bool {
if segments[i].StreamID == segments[j].StreamID { if segments[i].StreamID == segments[j].StreamID {

View File

@ -54,54 +54,6 @@ func CreatePendingObject(ctx *testcontext.Context, t testing.TB, db *metabase.DB
return object return object
} }
// CreatePendingObjectNew creates a new pending object with the specified number of segments.
// TODO CreatePendingObject will be removed when transition to pending_objects table will be complete.
func CreatePendingObjectNew(ctx *testcontext.Context, t testing.TB, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) {
obj.Version = metabase.NextVersion
BeginObjectNextVersion{
Opts: metabase.BeginObjectNextVersion{
ObjectStream: obj,
Encryption: DefaultEncryption,
UsePendingObjectsTable: true,
},
Version: metabase.PendingVersion,
}.Check(ctx, t, db)
for i := byte(0); i < numberOfSegments; i++ {
BeginSegment{
Opts: metabase.BeginSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
RootPieceID: storj.PieceID{i + 1},
Pieces: []metabase.Piece{{
Number: 1,
StorageNode: testrand.NodeID(),
}},
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: DefaultRedundancy,
UsePendingObjectsTable: true,
},
}.Check(ctx, t, db)
}
}
// CreateObject creates a new committed object with the specified number of segments. // CreateObject creates a new committed object with the specified number of segments.
func CreateObject(ctx *testcontext.Context, t testing.TB, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) metabase.Object { func CreateObject(ctx *testcontext.Context, t testing.TB, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) metabase.Object {
CreatePendingObject(ctx, t, db, obj, numberOfSegments) CreatePendingObject(ctx, t, db, obj, numberOfSegments)

View File

@ -468,21 +468,6 @@ func (step DeletePendingObject) Check(ctx *testcontext.Context, t testing.TB, db
compareDeleteObjectResult(t, result, step.Result) compareDeleteObjectResult(t, result, step.Result)
} }
// DeletePendingObjectNew is for testing metabase.DeletePendingObjectNew.
type DeletePendingObjectNew struct {
Opts metabase.DeletePendingObject
Result metabase.DeleteObjectResult
ErrClass *errs.Class
ErrText string
}
// Check runs the test.
func (step DeletePendingObjectNew) Check(ctx *testcontext.Context, t testing.TB, db *metabase.DB) {
result, err := db.DeletePendingObjectNew(ctx, step.Opts)
checkError(t, err, step.ErrClass, step.ErrText)
compareDeleteObjectResult(t, result, step.Result)
}
// DeleteObjectsAllVersions is for testing metabase.DeleteObjectsAllVersions. // DeleteObjectsAllVersions is for testing metabase.DeleteObjectsAllVersions.
type DeleteObjectsAllVersions struct { type DeleteObjectsAllVersions struct {
Opts metabase.DeleteObjectsAllVersions Opts metabase.DeleteObjectsAllVersions
@ -526,20 +511,6 @@ func (step DeleteZombieObjects) Check(ctx *testcontext.Context, t testing.TB, db
checkError(t, err, step.ErrClass, step.ErrText) checkError(t, err, step.ErrClass, step.ErrText)
} }
// DeleteInactivePendingObjects is for testing metabase.DeleteInactivePendingObjects.
type DeleteInactivePendingObjects struct {
Opts metabase.DeleteZombieObjects
ErrClass *errs.Class
ErrText string
}
// Check runs the test.
func (step DeleteInactivePendingObjects) Check(ctx *testcontext.Context, t testing.TB, db *metabase.DB) {
err := db.DeleteInactivePendingObjects(ctx, step.Opts)
checkError(t, err, step.ErrClass, step.ErrText)
}
// IterateCollector is for testing metabase.IterateCollector. // IterateCollector is for testing metabase.IterateCollector.
type IterateCollector []metabase.ObjectEntry type IterateCollector []metabase.ObjectEntry
@ -601,28 +572,6 @@ func (step IteratePendingObjectsByKey) Check(ctx *testcontext.Context, t *testin
require.Zero(t, diff) require.Zero(t, diff)
} }
// IteratePendingObjectsByKeyNew is for testing metabase.IteratePendingObjectsByKeyNew.
type IteratePendingObjectsByKeyNew struct {
Opts metabase.IteratePendingObjectsByKey
Result []metabase.PendingObjectEntry
ErrClass *errs.Class
ErrText string
}
// Check runs the test.
func (step IteratePendingObjectsByKeyNew) Check(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
var collector PendingObjectsCollector
err := db.IteratePendingObjectsByKeyNew(ctx, step.Opts, collector.Add)
checkError(t, err, step.ErrClass, step.ErrText)
result := []metabase.PendingObjectEntry(collector)
diff := cmp.Diff(step.Result, result, DefaultTimeDiff())
require.Zero(t, diff)
}
// IterateObjectsWithStatus is for testing metabase.IterateObjectsWithStatus. // IterateObjectsWithStatus is for testing metabase.IterateObjectsWithStatus.
type IterateObjectsWithStatus struct { type IterateObjectsWithStatus struct {
Opts metabase.IterateObjectsWithStatus Opts metabase.IterateObjectsWithStatus
@ -643,26 +592,6 @@ func (step IterateObjectsWithStatus) Check(ctx *testcontext.Context, t testing.T
require.Zero(t, diff) require.Zero(t, diff)
} }
// IteratePendingObjects is for testing metabase.IteratePendingObjects.
type IteratePendingObjects struct {
Opts metabase.IteratePendingObjects
Result []metabase.PendingObjectEntry
ErrClass *errs.Class
ErrText string
}
// Check runs the test.
func (step IteratePendingObjects) Check(ctx *testcontext.Context, t testing.TB, db *metabase.DB) {
var result PendingObjectsCollector
err := db.IteratePendingObjects(ctx, step.Opts, result.Add)
checkError(t, err, step.ErrClass, step.ErrText)
diff := cmp.Diff(step.Result, []metabase.PendingObjectEntry(result), DefaultTimeDiff())
require.Zero(t, diff)
}
// IterateLoopObjects is for testing metabase.IterateLoopObjects. // IterateLoopObjects is for testing metabase.IterateLoopObjects.
type IterateLoopObjects struct { type IterateLoopObjects struct {
Opts metabase.IterateLoopObjects Opts metabase.IterateLoopObjects

View File

@ -1,379 +0,0 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package metabase
import (
"bytes"
"context"
"strings"
"github.com/zeebo/errs"
"storj.io/common/uuid"
"storj.io/private/tagsql"
)
// pendingObjectsIterator enables iteration on pending objects in a bucket.
type pendingObjectsIterator struct {
db *DB
projectID uuid.UUID
bucketName []byte
prefix ObjectKey
prefixLimit ObjectKey
batchSize int
recursive bool
includeCustomMetadata bool
includeSystemMetadata bool
curIndex int
curRows tagsql.Rows
cursor pendingObjectIterateCursor // not relative to prefix
skipPrefix ObjectKey // relative to prefix
doNextQuery func(context.Context, *pendingObjectsIterator) (_ tagsql.Rows, err error)
// failErr is set when either scan or next query fails during iteration.
failErr error
}
type pendingObjectIterateCursor struct {
Key ObjectKey
StreamID uuid.UUID
Inclusive bool
}
func iterateAllPendingObjects(ctx context.Context, db *DB, opts IteratePendingObjects, fn func(context.Context, PendingObjectsIterator) error) (err error) {
defer mon.Task()(&ctx)(&err)
it := &pendingObjectsIterator{
db: db,
projectID: opts.ProjectID,
bucketName: []byte(opts.BucketName),
prefix: opts.Prefix,
prefixLimit: prefixLimit(opts.Prefix),
batchSize: opts.BatchSize,
recursive: opts.Recursive,
includeCustomMetadata: opts.IncludeCustomMetadata,
includeSystemMetadata: opts.IncludeSystemMetadata,
curIndex: 0,
cursor: firstPendingObjectIterateCursor(opts.Recursive, opts.Cursor, opts.Prefix),
doNextQuery: doNextQueryAllPendingObjects,
}
// start from either the cursor or prefix, depending on which is larger
if lessKey(it.cursor.Key, opts.Prefix) {
it.cursor.Key = opts.Prefix
it.cursor.Inclusive = true
}
return iteratePendingObjects(ctx, it, fn)
}
func iteratePendingObjects(ctx context.Context, it *pendingObjectsIterator, fn func(context.Context, PendingObjectsIterator) error) (err error) {
batchsizeLimit.Ensure(&it.batchSize)
it.curRows, err = it.doNextQuery(ctx, it)
if err != nil {
return err
}
it.cursor.Inclusive = false
defer func() {
if rowsErr := it.curRows.Err(); rowsErr != nil {
err = errs.Combine(err, rowsErr)
}
err = errs.Combine(err, it.failErr, it.curRows.Close())
}()
return fn(ctx, it)
}
// Next returns true if there was another item and copy it in item.
func (it *pendingObjectsIterator) Next(ctx context.Context, item *PendingObjectEntry) bool {
if it.recursive {
return it.next(ctx, item)
}
// TODO: implement this on the database side
// skip until we are past the prefix we returned before.
if it.skipPrefix != "" {
for strings.HasPrefix(string(item.ObjectKey), string(it.skipPrefix)) {
if !it.next(ctx, item) {
return false
}
}
it.skipPrefix = ""
} else {
ok := it.next(ctx, item)
if !ok {
return false
}
}
// should this be treated as a prefix?
p := strings.IndexByte(string(item.ObjectKey), Delimiter)
if p >= 0 {
it.skipPrefix = item.ObjectKey[:p+1]
*item = PendingObjectEntry{
IsPrefix: true,
ObjectKey: item.ObjectKey[:p+1],
}
}
return true
}
// next returns true if there was another item and copy it in item.
func (it *pendingObjectsIterator) next(ctx context.Context, item *PendingObjectEntry) bool {
next := it.curRows.Next()
if !next {
if it.curIndex < it.batchSize {
return false
}
if it.curRows.Err() != nil {
return false
}
if !it.recursive {
afterPrefix := it.cursor.Key[len(it.prefix):]
p := bytes.IndexByte([]byte(afterPrefix), Delimiter)
if p >= 0 {
it.cursor.Key = it.prefix + prefixLimit(afterPrefix[:p+1])
it.cursor.StreamID = uuid.UUID{}
}
}
rows, err := it.doNextQuery(ctx, it)
if err != nil {
it.failErr = errs.Combine(it.failErr, err)
return false
}
if closeErr := it.curRows.Close(); closeErr != nil {
it.failErr = errs.Combine(it.failErr, closeErr, rows.Close())
return false
}
it.curRows = rows
it.curIndex = 0
if !it.curRows.Next() {
return false
}
}
err := it.scanItem(item)
if err != nil {
it.failErr = errs.Combine(it.failErr, err)
return false
}
it.curIndex++
it.cursor.Key = it.prefix + item.ObjectKey
it.cursor.StreamID = item.StreamID
return true
}
func doNextQueryAllPendingObjects(ctx context.Context, it *pendingObjectsIterator) (_ tagsql.Rows, err error) {
defer mon.Task()(&ctx)(&err)
cursorCompare := ">"
if it.cursor.Inclusive {
cursorCompare = ">="
}
if it.prefixLimit == "" {
querySelectFields := pendingObjectsQuerySelectorFields("object_key", it)
return it.db.db.QueryContext(ctx, `
SELECT
`+querySelectFields+`
FROM pending_objects
WHERE
(project_id, bucket_name, object_key, stream_id) `+cursorCompare+` ($1, $2, $3, $4)
AND (project_id, bucket_name) < ($1, $6)
AND (expires_at IS NULL OR expires_at > now())
ORDER BY (project_id, bucket_name, object_key, stream_id) ASC
LIMIT $5
`, it.projectID, it.bucketName,
[]byte(it.cursor.Key), it.cursor.StreamID,
it.batchSize,
nextBucket(it.bucketName),
)
}
fromSubstring := 1
if it.prefix != "" {
fromSubstring = len(it.prefix) + 1
}
querySelectFields := pendingObjectsQuerySelectorFields("SUBSTRING(object_key FROM $7)", it)
return it.db.db.QueryContext(ctx, `
SELECT
`+querySelectFields+`
FROM pending_objects
WHERE
(project_id, bucket_name, object_key, stream_id) `+cursorCompare+` ($1, $2, $3, $4)
AND (project_id, bucket_name, object_key) < ($1, $2, $5)
AND (expires_at IS NULL OR expires_at > now())
ORDER BY (project_id, bucket_name, object_key, stream_id) ASC
LIMIT $6
`, it.projectID, it.bucketName,
[]byte(it.cursor.Key), it.cursor.StreamID,
[]byte(it.prefixLimit),
it.batchSize,
fromSubstring,
)
}
func pendingObjectsQuerySelectorFields(objectKeyColumn string, it *pendingObjectsIterator) string {
querySelectFields := objectKeyColumn + `
,stream_id
,encryption`
if it.includeSystemMetadata {
querySelectFields += `
,created_at
,expires_at`
}
if it.includeCustomMetadata {
querySelectFields += `
,encrypted_metadata_nonce
,encrypted_metadata
,encrypted_metadata_encrypted_key`
}
return querySelectFields
}
// scanItem scans doNextQuery results into PendingObjectEntry.
func (it *pendingObjectsIterator) scanItem(item *PendingObjectEntry) (err error) {
item.IsPrefix = false
fields := []interface{}{
&item.ObjectKey,
&item.StreamID,
encryptionParameters{&item.Encryption},
}
if it.includeSystemMetadata {
fields = append(fields,
&item.CreatedAt,
&item.ExpiresAt,
)
}
if it.includeCustomMetadata {
fields = append(fields,
&item.EncryptedMetadataNonce,
&item.EncryptedMetadata,
&item.EncryptedMetadataEncryptedKey,
)
}
return it.curRows.Scan(fields...)
}
// firstPendingObjectIterateCursor adjust the cursor for a non-recursive iteration.
// The cursor is non-inclusive and we need to adjust to handle prefix as cursor properly.
// We return the next possible key from the prefix.
func firstPendingObjectIterateCursor(recursive bool, cursor PendingObjectsCursor, prefix ObjectKey) pendingObjectIterateCursor {
if recursive {
return pendingObjectIterateCursor{
Key: cursor.Key,
StreamID: cursor.StreamID,
}
}
// when the cursor does not match the prefix, we'll return the original cursor.
if !strings.HasPrefix(string(cursor.Key), string(prefix)) {
return pendingObjectIterateCursor{
Key: cursor.Key,
StreamID: cursor.StreamID,
}
}
// handle case where:
// prefix: x/y/
// cursor: x/y/z/w
// In this case, we want the skip prefix to be `x/y/z` + string('/' + 1).
cursorWithoutPrefix := cursor.Key[len(prefix):]
p := strings.IndexByte(string(cursorWithoutPrefix), Delimiter)
if p < 0 {
// The cursor is not a prefix, but instead a path inside the prefix,
// so we can use it directly.
return pendingObjectIterateCursor{
Key: cursor.Key,
StreamID: cursor.StreamID,
}
}
// return the next prefix given a scoped path
return pendingObjectIterateCursor{
Key: cursor.Key[:len(prefix)+p] + ObjectKey(Delimiter+1),
StreamID: cursor.StreamID,
Inclusive: true,
}
}
func iteratePendingObjectsByKeyNew(ctx context.Context, db *DB, opts IteratePendingObjectsByKey, fn func(context.Context, PendingObjectsIterator) error) (err error) {
defer mon.Task()(&ctx)(&err)
cursor := opts.Cursor
if cursor.StreamID.IsZero() {
cursor.StreamID = uuid.UUID{}
}
it := &pendingObjectsIterator{
db: db,
projectID: opts.ProjectID,
bucketName: []byte(opts.BucketName),
prefix: "",
prefixLimit: "",
batchSize: opts.BatchSize,
recursive: true,
includeCustomMetadata: true,
includeSystemMetadata: true,
curIndex: 0,
cursor: pendingObjectIterateCursor{
Key: opts.ObjectKey,
StreamID: opts.Cursor.StreamID,
},
doNextQuery: doNextQueryPendingStreamsByKey,
}
return iteratePendingObjects(ctx, it, fn)
}
func doNextQueryPendingStreamsByKey(ctx context.Context, it *pendingObjectsIterator) (_ tagsql.Rows, err error) {
defer mon.Task()(&ctx)(&err)
return it.db.db.QueryContext(ctx, `
SELECT
object_key, stream_id, encryption,
created_at, expires_at,
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key
FROM pending_objects
WHERE
(project_id, bucket_name, object_key) = ($1, $2, $3) AND
stream_id > $4::BYTEA
ORDER BY stream_id ASC
LIMIT $5
`, it.projectID, it.bucketName,
[]byte(it.cursor.Key),
it.cursor.StreamID,
it.batchSize,
)
}

File diff suppressed because it is too large Load Diff

View File

@ -41,25 +41,6 @@ type RawObject struct {
ZombieDeletionDeadline *time.Time ZombieDeletionDeadline *time.Time
} }
// RawPendingObject defines the full pending object that is stored in the database. It should be rarely used directly.
type RawPendingObject struct {
PendingObjectStream
CreatedAt time.Time
ExpiresAt *time.Time
EncryptedMetadataNonce []byte
EncryptedMetadata []byte
EncryptedMetadataEncryptedKey []byte
Encryption storj.EncryptionParameters
// ZombieDeletionDeadline defines when the pending raw object should be deleted from the database.
// This is as a safeguard against objects that failed to upload and the client has not indicated
// whether they want to continue uploading or delete the already uploaded data.
ZombieDeletionDeadline *time.Time
}
// RawSegment defines the full segment that is stored in the database. It should be rarely used directly. // RawSegment defines the full segment that is stored in the database. It should be rarely used directly.
type RawSegment struct { type RawSegment struct {
StreamID uuid.UUID StreamID uuid.UUID
@ -96,9 +77,8 @@ type RawCopy struct {
// RawState contains full state of a table. // RawState contains full state of a table.
type RawState struct { type RawState struct {
Objects []RawObject Objects []RawObject
PendingObjects []RawPendingObject Segments []RawSegment
Segments []RawSegment
} }
// TestingGetState returns the state of the database. // TestingGetState returns the state of the database.
@ -110,11 +90,6 @@ func (db *DB) TestingGetState(ctx context.Context) (_ *RawState, err error) {
return nil, Error.New("GetState: %w", err) return nil, Error.New("GetState: %w", err)
} }
state.PendingObjects, err = db.testingGetAllPendingObjects(ctx)
if err != nil {
return nil, Error.New("GetState: %w", err)
}
state.Segments, err = db.testingGetAllSegments(ctx) state.Segments, err = db.testingGetAllSegments(ctx)
if err != nil { if err != nil {
return nil, Error.New("GetState: %w", err) return nil, Error.New("GetState: %w", err)
@ -127,7 +102,6 @@ func (db *DB) TestingGetState(ctx context.Context) (_ *RawState, err error) {
func (db *DB) TestingDeleteAll(ctx context.Context) (err error) { func (db *DB) TestingDeleteAll(ctx context.Context) (err error) {
_, err = db.db.ExecContext(ctx, ` _, err = db.db.ExecContext(ctx, `
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM objects; WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM objects;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM pending_objects;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM segments; WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM segments;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM node_aliases; WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM node_aliases;
WITH ignore_full_scan_for_test AS (SELECT 1) SELECT setval('node_alias_seq', 1, false); WITH ignore_full_scan_for_test AS (SELECT 1) SELECT setval('node_alias_seq', 1, false);
@ -198,57 +172,6 @@ func (db *DB) testingGetAllObjects(ctx context.Context) (_ []RawObject, err erro
return objs, nil return objs, nil
} }
// testingGetAllPendingObjects returns the state of the database.
func (db *DB) testingGetAllPendingObjects(ctx context.Context) (_ []RawPendingObject, err error) {
objs := []RawPendingObject{}
rows, err := db.db.QueryContext(ctx, `
WITH ignore_full_scan_for_test AS (SELECT 1)
SELECT
project_id, bucket_name, object_key, stream_id,
created_at, expires_at,
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
encryption, zombie_deletion_deadline
FROM pending_objects
ORDER BY project_id ASC, bucket_name ASC, object_key ASC, stream_id ASC
`)
if err != nil {
return nil, Error.New("testingGetAllPendingObjects query: %w", err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var obj RawPendingObject
err := rows.Scan(
&obj.ProjectID,
&obj.BucketName,
&obj.ObjectKey,
&obj.StreamID,
&obj.CreatedAt,
&obj.ExpiresAt,
&obj.EncryptedMetadataNonce,
&obj.EncryptedMetadata,
&obj.EncryptedMetadataEncryptedKey,
encryptionParameters{&obj.Encryption},
&obj.ZombieDeletionDeadline,
)
if err != nil {
return nil, Error.New("testingGetAllPendingObjects scan failed: %w", err)
}
objs = append(objs, obj)
}
if err := rows.Err(); err != nil {
return nil, Error.New("testingGetAllPendingObjects scan failed: %w", err)
}
if len(objs) == 0 {
return nil, nil
}
return objs, nil
}
// testingGetAllSegments returns the state of the database. // testingGetAllSegments returns the state of the database.
func (db *DB) testingGetAllSegments(ctx context.Context) (_ []RawSegment, err error) { func (db *DB) testingGetAllSegments(ctx context.Context) (_ []RawSegment, err error) {
segs := []RawSegment{} segs := []RawSegment{}

View File

@ -92,5 +92,5 @@ func (chore *Chore) deleteZombieObjects(ctx context.Context) (err error) {
return err return err
} }
return chore.metabase.DeleteInactivePendingObjects(ctx, opts) return nil
} }

View File

@ -167,7 +167,6 @@ func (c Config) Metabase(applicationName string) metabase.Config {
} }
// ExtendedConfig extended config keeps additional helper fields and methods around Config. // ExtendedConfig extended config keeps additional helper fields and methods around Config.
// TODO potentially can be removed when UsePendingObjectsTableProjects won't be used anymore.
type ExtendedConfig struct { type ExtendedConfig struct {
Config Config