2020-10-28 15:28:06 +00:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
2020-11-03 15:51:03 +00:00
|
|
|
"bytes"
|
2020-10-28 15:28:06 +00:00
|
|
|
"context"
|
2020-11-03 15:51:03 +00:00
|
|
|
"sort"
|
2020-10-28 15:28:06 +00:00
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
|
|
|
"storj.io/common/storj"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil"
|
|
|
|
"storj.io/private/dbutil/pgutil"
|
|
|
|
"storj.io/private/tagsql"
|
2020-10-28 15:28:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// DeleteObjectExactVersion contains arguments necessary for deleting an exact version of object.
|
|
|
|
type DeleteObjectExactVersion struct {
|
|
|
|
Version Version
|
|
|
|
ObjectLocation
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify delete object fields.
|
|
|
|
func (obj *DeleteObjectExactVersion) Verify() error {
|
|
|
|
if err := obj.ObjectLocation.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if obj.Version <= 0 {
|
|
|
|
return ErrInvalidRequest.New("Version invalid: %v", obj.Version)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjectResult result of deleting object.
|
|
|
|
type DeleteObjectResult struct {
|
2020-11-03 10:45:49 +00:00
|
|
|
Objects []Object
|
2020-10-28 15:28:06 +00:00
|
|
|
Segments []DeletedSegmentInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletedSegmentInfo info about deleted segment.
|
|
|
|
type DeletedSegmentInfo struct {
|
|
|
|
RootPieceID storj.PieceID
|
|
|
|
Pieces Pieces
|
|
|
|
}
|
|
|
|
|
2021-01-11 10:08:18 +00:00
|
|
|
// DeleteObjectAnyStatusAllVersions contains arguments necessary for deleting all object versions.
|
|
|
|
type DeleteObjectAnyStatusAllVersions struct {
|
2020-10-28 15:28:06 +00:00
|
|
|
ObjectLocation
|
|
|
|
}
|
|
|
|
|
2020-11-03 15:51:03 +00:00
|
|
|
// DeleteObjectsAllVersions contains arguments necessary for deleting all versions of multiple objects from the same bucket.
|
|
|
|
type DeleteObjectsAllVersions struct {
|
|
|
|
Locations []ObjectLocation
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify delete objects fields.
|
|
|
|
func (delete *DeleteObjectsAllVersions) Verify() error {
|
|
|
|
if len(delete.Locations) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(delete.Locations) > 1000 {
|
|
|
|
return ErrInvalidRequest.New("cannot delete more than 1000 objects in a single request")
|
|
|
|
}
|
|
|
|
|
|
|
|
var errGroup errs.Group
|
|
|
|
for _, location := range delete.Locations {
|
|
|
|
errGroup.Add(location.Verify())
|
|
|
|
}
|
|
|
|
|
|
|
|
err := errGroup.Err()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify if all locations are in the same bucket
|
|
|
|
first := delete.Locations[0]
|
|
|
|
for _, item := range delete.Locations[1:] {
|
|
|
|
if first.ProjectID != item.ProjectID || first.BucketName != item.BucketName {
|
|
|
|
return ErrInvalidRequest.New("all objects must be in the same bucket")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
// DeleteObjectLatestVersion contains arguments necessary for deleting latest object version.
|
|
|
|
type DeleteObjectLatestVersion struct {
|
|
|
|
ObjectLocation
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjectExactVersion deletes an exact object version.
|
|
|
|
func (db *DB) DeleteObjectExactVersion(ctx context.Context, opts DeleteObjectExactVersion) (result DeleteObjectResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
2021-07-28 14:44:22 +01:00
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
2021-03-11 14:45:00 +00:00
|
|
|
WITH deleted_objects AS (
|
|
|
|
DELETE FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
version = $4 AND
|
|
|
|
status = `+committedStatus+`
|
|
|
|
RETURNING
|
|
|
|
version, stream_id,
|
|
|
|
created_at, expires_at,
|
|
|
|
status, segment_count,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption
|
|
|
|
), deleted_segments AS (
|
|
|
|
DELETE FROM segments
|
|
|
|
WHERE segments.stream_id in (SELECT deleted_objects.stream_id FROM deleted_objects)
|
|
|
|
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
deleted_objects.version, deleted_objects.stream_id,
|
|
|
|
deleted_objects.created_at, deleted_objects.expires_at,
|
|
|
|
deleted_objects.status, deleted_objects.segment_count,
|
|
|
|
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
|
|
|
|
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
|
|
|
|
deleted_objects.encryption,
|
|
|
|
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
|
|
|
|
FROM deleted_objects
|
|
|
|
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
|
2021-10-04 14:43:07 +01:00
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version))(func(rows tagsql.Rows) error {
|
2021-03-11 14:45:00 +00:00
|
|
|
result.Objects, result.Segments, err = db.scanObjectDeletion(ctx, opts.ObjectLocation, rows)
|
|
|
|
return err
|
2020-11-12 11:56:15 +00:00
|
|
|
})
|
2020-10-28 15:28:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
2021-03-11 14:45:00 +00:00
|
|
|
|
|
|
|
if len(result.Objects) == 0 {
|
|
|
|
return DeleteObjectResult{}, storj.ErrObjectNotFound.Wrap(Error.New("no rows deleted"))
|
|
|
|
}
|
|
|
|
|
2021-06-04 14:21:09 +01:00
|
|
|
mon.Meter("object_delete").Mark(len(result.Objects))
|
|
|
|
mon.Meter("segment_delete").Mark(len(result.Segments))
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-12-01 23:17:05 +00:00
|
|
|
// DeletePendingObject contains arguments necessary for deleting a pending object.
|
|
|
|
type DeletePendingObject struct {
|
2021-05-04 14:51:40 +01:00
|
|
|
ObjectStream
|
2020-12-01 23:17:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify verifies delete pending object fields validity.
|
|
|
|
func (opts *DeletePendingObject) Verify() error {
|
2021-05-04 14:51:40 +01:00
|
|
|
if err := opts.ObjectStream.Verify(); err != nil {
|
2020-12-01 23:17:05 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletePendingObject deletes a pending object with specified version and streamID.
|
|
|
|
func (db *DB) DeletePendingObject(ctx context.Context, opts DeletePendingObject) (result DeleteObjectResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
|
|
|
|
2021-07-28 14:44:22 +01:00
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
2021-03-11 14:45:00 +00:00
|
|
|
WITH deleted_objects AS (
|
|
|
|
DELETE FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
version = $4 AND
|
|
|
|
stream_id = $5 AND
|
|
|
|
status = `+pendingStatus+`
|
|
|
|
RETURNING
|
|
|
|
version, stream_id,
|
|
|
|
created_at, expires_at,
|
|
|
|
status, segment_count,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption
|
|
|
|
), deleted_segments AS (
|
|
|
|
DELETE FROM segments
|
|
|
|
WHERE segments.stream_id in (SELECT deleted_objects.stream_id FROM deleted_objects)
|
|
|
|
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
deleted_objects.version, deleted_objects.stream_id,
|
|
|
|
deleted_objects.created_at, deleted_objects.expires_at,
|
|
|
|
deleted_objects.status, deleted_objects.segment_count,
|
|
|
|
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
|
|
|
|
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
|
|
|
|
deleted_objects.encryption,
|
|
|
|
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
|
|
|
|
FROM deleted_objects
|
|
|
|
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
|
2021-10-04 14:43:07 +01:00
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID))(func(rows tagsql.Rows) error {
|
2021-05-04 14:51:40 +01:00
|
|
|
result.Objects, result.Segments, err = db.scanObjectDeletion(ctx, opts.Location(), rows)
|
2021-03-11 14:45:00 +00:00
|
|
|
return err
|
2020-12-01 23:17:05 +00:00
|
|
|
})
|
2021-03-11 14:45:00 +00:00
|
|
|
|
2020-12-01 23:17:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
2021-03-11 14:45:00 +00:00
|
|
|
|
|
|
|
if len(result.Objects) == 0 {
|
|
|
|
return DeleteObjectResult{}, storj.ErrObjectNotFound.Wrap(Error.New("no rows deleted"))
|
|
|
|
}
|
|
|
|
|
2021-06-04 14:21:09 +01:00
|
|
|
mon.Meter("object_delete").Mark(len(result.Objects))
|
|
|
|
mon.Meter("segment_delete").Mark(len(result.Segments))
|
|
|
|
|
2020-12-01 23:17:05 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
// DeleteObjectLatestVersion deletes latest object version.
|
|
|
|
func (db *DB) DeleteObjectLatestVersion(ctx context.Context, opts DeleteObjectLatestVersion) (result DeleteObjectResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
var query string
|
2021-05-11 09:49:26 +01:00
|
|
|
switch db.impl {
|
2021-03-11 14:45:00 +00:00
|
|
|
case dbutil.Cockroach:
|
|
|
|
query = `
|
|
|
|
WITH deleted_objects AS (
|
|
|
|
DELETE FROM objects
|
|
|
|
WHERE
|
2020-11-12 11:56:15 +00:00
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
2021-03-11 14:45:00 +00:00
|
|
|
status = ` + committedStatus + `
|
|
|
|
ORDER BY version DESC
|
|
|
|
LIMIT 1
|
|
|
|
RETURNING
|
|
|
|
version, stream_id,
|
|
|
|
created_at, expires_at,
|
|
|
|
status, segment_count,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption
|
|
|
|
), deleted_segments AS (
|
|
|
|
DELETE FROM segments
|
|
|
|
WHERE segments.stream_id in (SELECT deleted_objects.stream_id FROM deleted_objects)
|
|
|
|
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
deleted_objects.version, deleted_objects.stream_id,
|
|
|
|
deleted_objects.created_at, deleted_objects.expires_at,
|
|
|
|
deleted_objects.status, deleted_objects.segment_count,
|
|
|
|
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
|
|
|
|
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
|
|
|
|
deleted_objects.encryption,
|
|
|
|
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
|
|
|
|
FROM deleted_objects
|
|
|
|
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
|
|
|
|
`
|
|
|
|
case dbutil.Postgres:
|
|
|
|
query = `
|
|
|
|
WITH deleted_objects AS (
|
|
|
|
DELETE FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
version IN (
|
|
|
|
SELECT version FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
status = ` + committedStatus + `
|
|
|
|
ORDER BY version DESC LIMIT 1
|
|
|
|
) AND
|
|
|
|
status = ` + committedStatus + `
|
|
|
|
RETURNING
|
|
|
|
version, stream_id,
|
|
|
|
created_at, expires_at,
|
|
|
|
status, segment_count,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption
|
|
|
|
), deleted_segments AS (
|
|
|
|
DELETE FROM segments
|
|
|
|
WHERE segments.stream_id in (SELECT deleted_objects.stream_id FROM deleted_objects)
|
|
|
|
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
deleted_objects.version, deleted_objects.stream_id,
|
|
|
|
deleted_objects.created_at, deleted_objects.expires_at,
|
|
|
|
deleted_objects.status, deleted_objects.segment_count,
|
|
|
|
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
|
|
|
|
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
|
|
|
|
deleted_objects.encryption,
|
|
|
|
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
|
|
|
|
FROM deleted_objects
|
|
|
|
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
|
|
|
|
`
|
|
|
|
default:
|
2021-05-11 09:49:26 +01:00
|
|
|
return DeleteObjectResult{}, Error.New("unhandled database: %v", db.impl)
|
2021-03-11 14:45:00 +00:00
|
|
|
}
|
2021-10-04 14:43:07 +01:00
|
|
|
err = withRows(db.db.QueryContext(ctx, query, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey))(func(rows tagsql.Rows) error {
|
2021-03-11 14:45:00 +00:00
|
|
|
result.Objects, result.Segments, err = db.scanObjectDeletion(ctx, opts.ObjectLocation, rows)
|
|
|
|
return err
|
2020-11-12 11:56:15 +00:00
|
|
|
})
|
2020-10-28 15:28:06 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2020-11-12 11:56:15 +00:00
|
|
|
return DeleteObjectResult{}, err
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
2021-03-11 14:45:00 +00:00
|
|
|
|
|
|
|
if len(result.Objects) == 0 {
|
|
|
|
return DeleteObjectResult{}, storj.ErrObjectNotFound.Wrap(Error.New("no rows deleted"))
|
|
|
|
}
|
|
|
|
|
2021-06-04 14:21:09 +01:00
|
|
|
mon.Meter("object_delete").Mark(len(result.Objects))
|
|
|
|
mon.Meter("segment_delete").Mark(len(result.Segments))
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2021-01-11 10:08:18 +00:00
|
|
|
// DeleteObjectAnyStatusAllVersions deletes all object versions.
|
|
|
|
func (db *DB) DeleteObjectAnyStatusAllVersions(ctx context.Context, opts DeleteObjectAnyStatusAllVersions) (result DeleteObjectResult, err error) {
|
2020-10-28 15:28:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
2021-03-11 14:45:00 +00:00
|
|
|
|
2021-07-28 14:44:22 +01:00
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
2021-03-11 14:45:00 +00:00
|
|
|
WITH deleted_objects AS (
|
|
|
|
DELETE FROM objects
|
|
|
|
WHERE
|
2020-11-12 11:56:15 +00:00
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
2021-01-11 10:08:18 +00:00
|
|
|
object_key = $3
|
2021-03-11 14:45:00 +00:00
|
|
|
RETURNING
|
|
|
|
version, stream_id,
|
|
|
|
created_at, expires_at,
|
|
|
|
status, segment_count,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption
|
|
|
|
), deleted_segments AS (
|
|
|
|
DELETE FROM segments
|
|
|
|
WHERE segments.stream_id in (SELECT deleted_objects.stream_id FROM deleted_objects)
|
|
|
|
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
deleted_objects.version, deleted_objects.stream_id,
|
|
|
|
deleted_objects.created_at, deleted_objects.expires_at,
|
|
|
|
deleted_objects.status, deleted_objects.segment_count,
|
|
|
|
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
|
|
|
|
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
|
|
|
|
deleted_objects.encryption,
|
|
|
|
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
|
|
|
|
FROM deleted_objects
|
|
|
|
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
|
2021-10-04 14:43:07 +01:00
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey))(func(rows tagsql.Rows) error {
|
2021-03-11 14:45:00 +00:00
|
|
|
result.Objects, result.Segments, err = db.scanObjectDeletion(ctx, opts.ObjectLocation, rows)
|
|
|
|
return err
|
2020-11-12 11:56:15 +00:00
|
|
|
})
|
2021-03-11 14:45:00 +00:00
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
2021-03-11 14:45:00 +00:00
|
|
|
|
|
|
|
if len(result.Objects) == 0 {
|
|
|
|
return DeleteObjectResult{}, storj.ErrObjectNotFound.Wrap(Error.New("no rows deleted"))
|
|
|
|
}
|
|
|
|
|
2021-06-04 14:21:09 +01:00
|
|
|
mon.Meter("object_delete").Mark(len(result.Objects))
|
|
|
|
mon.Meter("segment_delete").Mark(len(result.Segments))
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-11-03 15:51:03 +00:00
|
|
|
// DeleteObjectsAllVersions deletes all versions of multiple objects from the same bucket.
|
|
|
|
func (db *DB) DeleteObjectsAllVersions(ctx context.Context, opts DeleteObjectsAllVersions) (result DeleteObjectResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if len(opts.Locations) == 0 {
|
|
|
|
// nothing to delete, no error
|
|
|
|
return DeleteObjectResult{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
// It is aleady verified that all object locations are in the same bucket
|
|
|
|
projectID := opts.Locations[0].ProjectID
|
|
|
|
bucketName := opts.Locations[0].BucketName
|
2020-11-03 15:51:03 +00:00
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
objectKeys := make([][]byte, len(opts.Locations))
|
|
|
|
for i := range opts.Locations {
|
|
|
|
objectKeys[i] = []byte(opts.Locations[i].ObjectKey)
|
|
|
|
}
|
2020-11-12 11:56:15 +00:00
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
// Sorting the object keys just in case.
|
|
|
|
// TODO: Check if this is really necessary for the SQL query.
|
|
|
|
sort.Slice(objectKeys, func(i, j int) bool {
|
|
|
|
return bytes.Compare(objectKeys[i], objectKeys[j]) < 0
|
|
|
|
})
|
2021-07-28 14:44:22 +01:00
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
2021-03-11 14:45:00 +00:00
|
|
|
WITH deleted_objects AS (
|
|
|
|
DELETE FROM objects
|
|
|
|
WHERE
|
2020-11-12 11:56:15 +00:00
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = ANY ($3) AND
|
2020-11-16 13:58:22 +00:00
|
|
|
status = `+committedStatus+`
|
2021-03-11 14:45:00 +00:00
|
|
|
RETURNING
|
|
|
|
project_id, bucket_name,
|
|
|
|
object_key, version, stream_id,
|
|
|
|
created_at, expires_at,
|
|
|
|
status, segment_count,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
|
|
|
encryption
|
|
|
|
), deleted_segments AS (
|
|
|
|
DELETE FROM segments
|
|
|
|
WHERE segments.stream_id in (SELECT deleted_objects.stream_id FROM deleted_objects)
|
|
|
|
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
deleted_objects.project_id, deleted_objects.bucket_name,
|
|
|
|
deleted_objects.object_key,deleted_objects.version, deleted_objects.stream_id,
|
|
|
|
deleted_objects.created_at, deleted_objects.expires_at,
|
|
|
|
deleted_objects.status, deleted_objects.segment_count,
|
|
|
|
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
|
|
|
|
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
|
|
|
|
deleted_objects.encryption,
|
|
|
|
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
|
|
|
|
FROM deleted_objects
|
|
|
|
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
|
|
|
|
`, projectID, []byte(bucketName), pgutil.ByteaArray(objectKeys)))(func(rows tagsql.Rows) error {
|
|
|
|
result.Objects, result.Segments, err = db.scanMultipleObjectsDeletion(ctx, rows)
|
|
|
|
return err
|
2020-11-12 11:56:15 +00:00
|
|
|
})
|
2021-03-11 14:45:00 +00:00
|
|
|
|
2020-11-03 15:51:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return DeleteObjectResult{}, err
|
|
|
|
}
|
2021-06-04 14:21:09 +01:00
|
|
|
|
|
|
|
mon.Meter("object_delete").Mark(len(result.Objects))
|
|
|
|
mon.Meter("segment_delete").Mark(len(result.Segments))
|
|
|
|
|
2020-11-03 15:51:03 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
func (db *DB) scanObjectDeletion(ctx context.Context, location ObjectLocation, rows tagsql.Rows) (objects []Object, segments []DeletedSegmentInfo, err error) {
|
2021-10-21 07:47:45 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-10-28 15:28:06 +00:00
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
|
2020-11-03 10:45:49 +00:00
|
|
|
objects = make([]Object, 0, 10)
|
2021-03-11 14:45:00 +00:00
|
|
|
segments = make([]DeletedSegmentInfo, 0, 10)
|
|
|
|
|
|
|
|
var rootPieceID *storj.PieceID
|
|
|
|
var object Object
|
|
|
|
var segment DeletedSegmentInfo
|
|
|
|
var aliasPieces AliasPieces
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
for rows.Next() {
|
2021-03-11 14:45:00 +00:00
|
|
|
|
2020-11-03 10:45:49 +00:00
|
|
|
object.ProjectID = location.ProjectID
|
|
|
|
object.BucketName = location.BucketName
|
|
|
|
object.ObjectKey = location.ObjectKey
|
|
|
|
|
|
|
|
err = rows.Scan(&object.Version, &object.StreamID,
|
|
|
|
&object.CreatedAt, &object.ExpiresAt,
|
|
|
|
&object.Status, &object.SegmentCount,
|
2020-11-16 16:46:47 +00:00
|
|
|
&object.EncryptedMetadataNonce, &object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey,
|
2020-11-24 12:29:16 +00:00
|
|
|
&object.TotalPlainSize, &object.TotalEncryptedSize, &object.FixedSegmentSize,
|
2021-03-11 14:45:00 +00:00
|
|
|
encryptionParameters{&object.Encryption}, &rootPieceID, &aliasPieces)
|
2020-10-28 15:28:06 +00:00
|
|
|
if err != nil {
|
2021-03-11 14:45:00 +00:00
|
|
|
return nil, nil, Error.New("unable to delete object: %w", err)
|
|
|
|
}
|
|
|
|
if len(objects) == 0 || objects[len(objects)-1].StreamID != object.StreamID {
|
|
|
|
objects = append(objects, object)
|
|
|
|
}
|
|
|
|
if rootPieceID != nil {
|
|
|
|
segment.RootPieceID = *rootPieceID
|
|
|
|
segment.Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if len(segment.Pieces) > 0 {
|
|
|
|
segments = append(segments, segment)
|
|
|
|
}
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := rows.Err(); err != nil {
|
2021-03-11 14:45:00 +00:00
|
|
|
return nil, nil, Error.New("unable to delete object: %w", err)
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
2020-11-03 10:45:49 +00:00
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
if len(segments) == 0 {
|
|
|
|
return objects, nil, nil
|
|
|
|
}
|
|
|
|
return objects, segments, nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
func (db *DB) scanMultipleObjectsDeletion(ctx context.Context, rows tagsql.Rows) (objects []Object, segments []DeletedSegmentInfo, err error) {
|
2021-10-21 07:47:45 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-11-03 15:51:03 +00:00
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
|
|
|
|
objects = make([]Object, 0, 10)
|
2021-03-11 14:45:00 +00:00
|
|
|
segments = make([]DeletedSegmentInfo, 0, 10)
|
|
|
|
|
|
|
|
var rootPieceID *storj.PieceID
|
|
|
|
var object Object
|
|
|
|
var segment DeletedSegmentInfo
|
|
|
|
var aliasPieces AliasPieces
|
|
|
|
|
2020-11-03 15:51:03 +00:00
|
|
|
for rows.Next() {
|
|
|
|
err = rows.Scan(&object.ProjectID, &object.BucketName,
|
|
|
|
&object.ObjectKey, &object.Version, &object.StreamID,
|
|
|
|
&object.CreatedAt, &object.ExpiresAt,
|
|
|
|
&object.Status, &object.SegmentCount,
|
2020-11-16 16:46:47 +00:00
|
|
|
&object.EncryptedMetadataNonce, &object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey,
|
2020-11-24 12:29:16 +00:00
|
|
|
&object.TotalPlainSize, &object.TotalEncryptedSize, &object.FixedSegmentSize,
|
2021-03-11 14:45:00 +00:00
|
|
|
encryptionParameters{&object.Encryption}, &rootPieceID, &aliasPieces)
|
2020-11-03 15:51:03 +00:00
|
|
|
if err != nil {
|
2021-03-11 14:45:00 +00:00
|
|
|
return nil, nil, Error.New("unable to delete object: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(objects) == 0 || objects[len(objects)-1].StreamID != object.StreamID {
|
|
|
|
objects = append(objects, object)
|
|
|
|
}
|
|
|
|
if rootPieceID != nil {
|
|
|
|
segment.RootPieceID = *rootPieceID
|
|
|
|
segment.Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if len(segment.Pieces) > 0 {
|
|
|
|
segments = append(segments, segment)
|
|
|
|
}
|
2020-11-03 15:51:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := rows.Err(); err != nil {
|
2021-03-11 14:45:00 +00:00
|
|
|
return nil, nil, Error.New("unable to delete object: %w", err)
|
2020-11-03 15:51:03 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 11:56:15 +00:00
|
|
|
if len(objects) == 0 {
|
|
|
|
objects = nil
|
|
|
|
}
|
2021-03-11 14:45:00 +00:00
|
|
|
if len(segments) == 0 {
|
|
|
|
return objects, nil, nil
|
2020-11-04 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 14:45:00 +00:00
|
|
|
return objects, segments, nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|