2020-10-28 15:28:06 +00:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"errors"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
pgxerrcode "github.com/jackc/pgerrcode"
|
|
|
|
"github.com/zeebo/errs"
|
2022-09-21 09:10:06 +01:00
|
|
|
"go.uber.org/zap"
|
2020-10-28 15:28:06 +00:00
|
|
|
|
2021-09-24 15:18:21 +01:00
|
|
|
"storj.io/common/memory"
|
2020-10-28 15:28:06 +00:00
|
|
|
"storj.io/common/storj"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil/pgutil/pgerrcode"
|
|
|
|
"storj.io/private/dbutil/txutil"
|
|
|
|
"storj.io/private/tagsql"
|
2020-10-28 15:28:06 +00:00
|
|
|
)
|
|
|
|
|
2021-01-04 13:43:24 +00:00
|
|
|
// we need to disable PlainSize validation for old uplinks.
|
|
|
|
const validatePlainSize = false
|
|
|
|
|
2021-05-06 12:42:20 +01:00
|
|
|
const defaultZombieDeletionPeriod = 24 * time.Hour
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
var (
|
2023-04-19 10:54:16 +01:00
|
|
|
// ErrObjectNotFound is used to indicate that the object does not exist.
|
2023-05-02 10:37:26 +01:00
|
|
|
ErrObjectNotFound = errs.Class("object not found")
|
2020-10-28 15:28:06 +00:00
|
|
|
// ErrInvalidRequest is used to indicate invalid requests.
|
|
|
|
ErrInvalidRequest = errs.Class("metabase: invalid request")
|
2023-09-13 14:30:32 +01:00
|
|
|
// ErrFailedPrecondition is used to indicate that some conditions in the request has failed.
|
|
|
|
ErrFailedPrecondition = errs.Class("metabase: failed precondition")
|
2020-10-28 15:28:06 +00:00
|
|
|
// ErrConflict is used to indicate conflict with the request.
|
|
|
|
ErrConflict = errs.Class("metabase: conflict")
|
|
|
|
)
|
|
|
|
|
|
|
|
// BeginObjectNextVersion contains arguments necessary for starting an object upload.
|
|
|
|
type BeginObjectNextVersion struct {
|
|
|
|
ObjectStream
|
|
|
|
|
|
|
|
ExpiresAt *time.Time
|
|
|
|
ZombieDeletionDeadline *time.Time
|
|
|
|
|
2021-10-29 11:38:28 +01:00
|
|
|
EncryptedMetadata []byte // optional
|
|
|
|
EncryptedMetadataNonce []byte // optional
|
|
|
|
EncryptedMetadataEncryptedKey []byte // optional
|
|
|
|
|
2020-10-29 13:49:53 +00:00
|
|
|
Encryption storj.EncryptionParameters
|
2023-07-21 10:36:59 +01:00
|
|
|
|
|
|
|
// UsePendingObjectsTable was added to options not metabase configuration
|
|
|
|
// to be able to test scenarios with pending object in pending_objects and
|
|
|
|
// objects table with the same test case.
|
|
|
|
UsePendingObjectsTable bool
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 12:04:55 +01:00
|
|
|
// Verify verifies get object request fields.
|
2021-10-29 11:38:28 +01:00
|
|
|
func (opts *BeginObjectNextVersion) Verify() error {
|
|
|
|
if err := opts.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Version != NextVersion {
|
|
|
|
return ErrInvalidRequest.New("Version should be metabase.NextVersion")
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.EncryptedMetadata == nil && (opts.EncryptedMetadataNonce != nil || opts.EncryptedMetadataEncryptedKey != nil) {
|
2021-10-29 12:04:55 +01:00
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be not set if EncryptedMetadata is not set")
|
2021-10-29 11:38:28 +01:00
|
|
|
} else if opts.EncryptedMetadata != nil && (opts.EncryptedMetadataNonce == nil || opts.EncryptedMetadataEncryptedKey == nil) {
|
2021-10-29 12:04:55 +01:00
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be set if EncryptedMetadata is set")
|
2021-10-29 11:38:28 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
// BeginObjectNextVersion adds a pending object to the database, with automatically assigned version.
|
2023-07-21 10:36:59 +01:00
|
|
|
// TODO at the end of transition to pending_objects table we can rename this metod to just BeginObject.
|
2022-09-07 16:43:17 +01:00
|
|
|
func (db *DB) BeginObjectNextVersion(ctx context.Context, opts BeginObjectNextVersion) (object Object, err error) {
|
2020-10-28 15:28:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-10-29 11:38:28 +01:00
|
|
|
if err := opts.Verify(); err != nil {
|
2022-09-07 16:43:17 +01:00
|
|
|
return Object{}, err
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-05-06 12:42:20 +01:00
|
|
|
if opts.ZombieDeletionDeadline == nil {
|
|
|
|
deadline := time.Now().Add(defaultZombieDeletionPeriod)
|
|
|
|
opts.ZombieDeletionDeadline = &deadline
|
|
|
|
}
|
|
|
|
|
2022-09-07 16:43:17 +01:00
|
|
|
object = Object{
|
|
|
|
ObjectStream: ObjectStream{
|
|
|
|
ProjectID: opts.ProjectID,
|
|
|
|
BucketName: opts.BucketName,
|
|
|
|
ObjectKey: opts.ObjectKey,
|
|
|
|
StreamID: opts.StreamID,
|
|
|
|
},
|
|
|
|
ExpiresAt: opts.ExpiresAt,
|
|
|
|
Encryption: opts.Encryption,
|
|
|
|
ZombieDeletionDeadline: opts.ZombieDeletionDeadline,
|
|
|
|
}
|
|
|
|
|
2023-07-21 10:36:59 +01:00
|
|
|
if opts.UsePendingObjectsTable {
|
|
|
|
object.Status = Pending
|
2023-09-11 14:58:30 +01:00
|
|
|
object.Version = PendingVersion
|
2023-07-21 10:36:59 +01:00
|
|
|
|
|
|
|
if err := db.db.QueryRowContext(ctx, `
|
|
|
|
INSERT INTO pending_objects (
|
|
|
|
project_id, bucket_name, object_key, stream_id,
|
|
|
|
expires_at, encryption,
|
|
|
|
zombie_deletion_deadline,
|
|
|
|
encrypted_metadata, encrypted_metadata_nonce, encrypted_metadata_encrypted_key
|
|
|
|
) VALUES (
|
|
|
|
$1, $2, $3, $4,
|
|
|
|
$5, $6,
|
|
|
|
$7,
|
|
|
|
$8, $9, $10)
|
|
|
|
RETURNING created_at
|
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID,
|
|
|
|
opts.ExpiresAt, encryptionParameters{&opts.Encryption},
|
|
|
|
opts.ZombieDeletionDeadline,
|
|
|
|
opts.EncryptedMetadata, opts.EncryptedMetadataNonce, opts.EncryptedMetadataEncryptedKey,
|
|
|
|
).Scan(&object.CreatedAt); err != nil {
|
|
|
|
if code := pgerrcode.FromError(err); code == pgxerrcode.UniqueViolation {
|
|
|
|
return Object{}, Error.Wrap(ErrObjectAlreadyExists.New(""))
|
|
|
|
}
|
|
|
|
return Object{}, Error.New("unable to insert object: %w", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := db.db.QueryRowContext(ctx, `
|
|
|
|
INSERT INTO objects (
|
|
|
|
project_id, bucket_name, object_key, version, stream_id,
|
|
|
|
expires_at, encryption,
|
|
|
|
zombie_deletion_deadline,
|
|
|
|
encrypted_metadata, encrypted_metadata_nonce, encrypted_metadata_encrypted_key
|
|
|
|
) VALUES (
|
|
|
|
$1, $2, $3,
|
|
|
|
coalesce((
|
|
|
|
SELECT version + 1
|
|
|
|
FROM objects
|
|
|
|
WHERE project_id = $1 AND bucket_name = $2 AND object_key = $3
|
|
|
|
ORDER BY version DESC
|
|
|
|
LIMIT 1
|
|
|
|
), 1),
|
|
|
|
$4, $5, $6,
|
|
|
|
$7,
|
|
|
|
$8, $9, $10)
|
|
|
|
RETURNING status, version, created_at
|
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID,
|
|
|
|
opts.ExpiresAt, encryptionParameters{&opts.Encryption},
|
|
|
|
opts.ZombieDeletionDeadline,
|
|
|
|
opts.EncryptedMetadata, opts.EncryptedMetadataNonce, opts.EncryptedMetadataEncryptedKey,
|
|
|
|
).Scan(&object.Status, &object.Version, &object.CreatedAt); err != nil {
|
|
|
|
return Object{}, Error.New("unable to insert object: %w", err)
|
|
|
|
}
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-06-04 14:21:09 +01:00
|
|
|
mon.Meter("object_begin").Mark(1)
|
|
|
|
|
2022-09-07 16:43:17 +01:00
|
|
|
return object, nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// BeginObjectExactVersion contains arguments necessary for starting an object upload.
|
|
|
|
type BeginObjectExactVersion struct {
|
|
|
|
ObjectStream
|
|
|
|
|
|
|
|
ExpiresAt *time.Time
|
|
|
|
ZombieDeletionDeadline *time.Time
|
|
|
|
|
2021-10-29 11:38:28 +01:00
|
|
|
EncryptedMetadata []byte // optional
|
|
|
|
EncryptedMetadataNonce []byte // optional
|
|
|
|
EncryptedMetadataEncryptedKey []byte // optional
|
|
|
|
|
2020-10-29 13:49:53 +00:00
|
|
|
Encryption storj.EncryptionParameters
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 11:38:28 +01:00
|
|
|
// Verify verifies get object reqest fields.
|
|
|
|
func (opts *BeginObjectExactVersion) Verify() error {
|
|
|
|
if err := opts.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Version == NextVersion {
|
|
|
|
return ErrInvalidRequest.New("Version should not be metabase.NextVersion")
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.EncryptedMetadata == nil && (opts.EncryptedMetadataNonce != nil || opts.EncryptedMetadataEncryptedKey != nil) {
|
2021-10-29 12:04:55 +01:00
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be not set if EncryptedMetadata is not set")
|
2021-10-29 11:38:28 +01:00
|
|
|
} else if opts.EncryptedMetadata != nil && (opts.EncryptedMetadataNonce == nil || opts.EncryptedMetadataEncryptedKey == nil) {
|
2021-10-29 12:04:55 +01:00
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be set if EncryptedMetadata is set")
|
2021-10-29 11:38:28 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
// BeginObjectExactVersion adds a pending object to the database, with specific version.
|
2021-01-12 11:29:13 +00:00
|
|
|
func (db *DB) BeginObjectExactVersion(ctx context.Context, opts BeginObjectExactVersion) (committed Object, err error) {
|
2020-10-28 15:28:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-10-29 11:38:28 +01:00
|
|
|
if err := opts.Verify(); err != nil {
|
2021-01-12 11:29:13 +00:00
|
|
|
return Object{}, err
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-05-06 12:42:20 +01:00
|
|
|
if opts.ZombieDeletionDeadline == nil {
|
|
|
|
deadline := time.Now().Add(defaultZombieDeletionPeriod)
|
|
|
|
opts.ZombieDeletionDeadline = &deadline
|
|
|
|
}
|
|
|
|
|
2021-01-12 11:29:13 +00:00
|
|
|
object := Object{
|
|
|
|
ObjectStream: ObjectStream{
|
|
|
|
ProjectID: opts.ProjectID,
|
|
|
|
BucketName: opts.BucketName,
|
|
|
|
ObjectKey: opts.ObjectKey,
|
|
|
|
Version: opts.Version,
|
|
|
|
StreamID: opts.StreamID,
|
|
|
|
},
|
|
|
|
ExpiresAt: opts.ExpiresAt,
|
|
|
|
Encryption: opts.Encryption,
|
|
|
|
ZombieDeletionDeadline: opts.ZombieDeletionDeadline,
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-07-28 14:44:22 +01:00
|
|
|
err = db.db.QueryRowContext(ctx, `
|
2020-10-28 15:28:06 +00:00
|
|
|
INSERT INTO objects (
|
|
|
|
project_id, bucket_name, object_key, version, stream_id,
|
2020-10-29 13:49:53 +00:00
|
|
|
expires_at, encryption,
|
2021-10-29 11:38:28 +01:00
|
|
|
zombie_deletion_deadline,
|
|
|
|
encrypted_metadata, encrypted_metadata_nonce, encrypted_metadata_encrypted_key
|
2021-05-06 12:42:20 +01:00
|
|
|
) VALUES (
|
2020-10-28 15:28:06 +00:00
|
|
|
$1, $2, $3, $4, $5,
|
2020-10-29 13:49:53 +00:00
|
|
|
$6, $7,
|
2021-10-29 11:38:28 +01:00
|
|
|
$8,
|
|
|
|
$9, $10, $11
|
2020-10-28 15:28:06 +00:00
|
|
|
)
|
2021-01-12 11:29:13 +00:00
|
|
|
RETURNING status, created_at
|
2022-09-21 09:10:06 +01:00
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
|
2020-10-29 13:49:53 +00:00
|
|
|
opts.ExpiresAt, encryptionParameters{&opts.Encryption},
|
2021-10-29 11:38:28 +01:00
|
|
|
opts.ZombieDeletionDeadline,
|
|
|
|
opts.EncryptedMetadata, opts.EncryptedMetadataNonce, opts.EncryptedMetadataEncryptedKey,
|
2022-09-21 09:10:06 +01:00
|
|
|
).Scan(
|
|
|
|
&object.Status, &object.CreatedAt,
|
|
|
|
)
|
2020-10-28 15:28:06 +00:00
|
|
|
if err != nil {
|
|
|
|
if code := pgerrcode.FromError(err); code == pgxerrcode.UniqueViolation {
|
2022-01-27 10:30:45 +00:00
|
|
|
return Object{}, Error.Wrap(ErrObjectAlreadyExists.New(""))
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
2021-01-12 11:29:13 +00:00
|
|
|
return Object{}, Error.New("unable to insert object: %w", err)
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2021-06-04 14:21:09 +01:00
|
|
|
mon.Meter("object_begin").Mark(1)
|
|
|
|
|
2021-01-12 11:29:13 +00:00
|
|
|
return object, nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// BeginSegment contains options to verify, whether a new segment upload can be started.
|
|
|
|
type BeginSegment struct {
|
|
|
|
ObjectStream
|
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
Position SegmentPosition
|
|
|
|
|
|
|
|
// TODO: unused field, can remove
|
2020-10-28 15:28:06 +00:00
|
|
|
RootPieceID storj.PieceID
|
2022-02-24 14:26:05 +00:00
|
|
|
|
|
|
|
Pieces Pieces
|
2023-07-21 11:20:49 +01:00
|
|
|
|
|
|
|
UsePendingObjectsTable bool
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// BeginSegment verifies, whether a new segment upload can be started.
|
|
|
|
func (db *DB) BeginSegment(ctx context.Context, opts BeginSegment) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-21 10:48:48 +00:00
|
|
|
if err := opts.Pieces.Verify(); err != nil {
|
|
|
|
return err
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2020-12-21 10:48:48 +00:00
|
|
|
if opts.RootPieceID.IsZero() {
|
|
|
|
return ErrInvalidRequest.New("RootPieceID missing")
|
|
|
|
}
|
2020-10-28 15:28:06 +00:00
|
|
|
|
|
|
|
// NOTE: this isn't strictly necessary, since we can also fail this in CommitSegment.
|
|
|
|
// however, we should prevent creating segements for non-partial objects.
|
|
|
|
|
2021-09-27 18:38:15 +01:00
|
|
|
// Verify that object exists and is partial.
|
|
|
|
var value int
|
2023-07-21 11:20:49 +01:00
|
|
|
if opts.UsePendingObjectsTable {
|
|
|
|
err = db.db.QueryRowContext(ctx, `
|
|
|
|
SELECT 1
|
|
|
|
FROM pending_objects WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
stream_id = $4
|
|
|
|
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID).Scan(&value)
|
|
|
|
} else {
|
|
|
|
err = db.db.QueryRowContext(ctx, `
|
2020-11-12 11:50:21 +00:00
|
|
|
SELECT 1
|
|
|
|
FROM objects WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
version = $4 AND
|
|
|
|
stream_id = $5 AND
|
2020-11-16 13:58:22 +00:00
|
|
|
status = `+pendingStatus,
|
2023-07-21 11:20:49 +01:00
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID).Scan(&value)
|
|
|
|
}
|
2021-06-04 14:21:09 +01:00
|
|
|
if err != nil {
|
2021-09-27 18:38:15 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2022-05-09 16:11:36 +01:00
|
|
|
return ErrPendingObjectMissing.New("")
|
2021-09-27 18:38:15 +01:00
|
|
|
}
|
|
|
|
return Error.New("unable to query object status: %w", err)
|
2021-06-04 14:21:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
mon.Meter("segment_begin").Mark(1)
|
|
|
|
|
|
|
|
return nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CommitSegment contains all necessary information about the segment.
|
|
|
|
type CommitSegment struct {
|
|
|
|
ObjectStream
|
|
|
|
|
|
|
|
Position SegmentPosition
|
|
|
|
RootPieceID storj.PieceID
|
|
|
|
|
2021-06-10 11:08:21 +01:00
|
|
|
ExpiresAt *time.Time
|
|
|
|
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce []byte
|
2020-10-28 15:28:06 +00:00
|
|
|
EncryptedKey []byte
|
|
|
|
|
|
|
|
PlainOffset int64 // offset in the original data stream
|
|
|
|
PlainSize int32 // size before encryption
|
|
|
|
EncryptedSize int32 // segment size after encryption
|
|
|
|
|
2021-03-25 07:53:10 +00:00
|
|
|
EncryptedETag []byte
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
Redundancy storj.RedundancyScheme
|
|
|
|
|
|
|
|
Pieces Pieces
|
2021-10-27 09:50:27 +01:00
|
|
|
|
|
|
|
Placement storj.PlacementConstraint
|
2023-07-28 10:30:44 +01:00
|
|
|
|
|
|
|
UsePendingObjectsTable bool
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CommitSegment commits segment to the database.
|
|
|
|
func (db *DB) CommitSegment(ctx context.Context, opts CommitSegment) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-21 10:48:48 +00:00
|
|
|
if err := opts.Pieces.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:28:06 +00:00
|
|
|
switch {
|
|
|
|
case opts.RootPieceID.IsZero():
|
|
|
|
return ErrInvalidRequest.New("RootPieceID missing")
|
|
|
|
case len(opts.EncryptedKey) == 0:
|
|
|
|
return ErrInvalidRequest.New("EncryptedKey missing")
|
2021-10-12 14:37:12 +01:00
|
|
|
case len(opts.EncryptedKeyNonce) == 0:
|
2020-10-28 15:28:06 +00:00
|
|
|
return ErrInvalidRequest.New("EncryptedKeyNonce missing")
|
|
|
|
case opts.EncryptedSize <= 0:
|
|
|
|
return ErrInvalidRequest.New("EncryptedSize negative or zero")
|
2021-01-04 13:43:24 +00:00
|
|
|
case opts.PlainSize <= 0 && validatePlainSize:
|
2020-10-28 15:28:06 +00:00
|
|
|
return ErrInvalidRequest.New("PlainSize negative or zero")
|
|
|
|
case opts.PlainOffset < 0:
|
|
|
|
return ErrInvalidRequest.New("PlainOffset negative")
|
|
|
|
case opts.Redundancy.IsZero():
|
|
|
|
return ErrInvalidRequest.New("Redundancy zero")
|
|
|
|
}
|
|
|
|
|
2021-03-11 14:52:53 +00:00
|
|
|
if len(opts.Pieces) < int(opts.Redundancy.OptimalShares) {
|
|
|
|
return ErrInvalidRequest.New("number of pieces is less than redundancy optimal shares value")
|
|
|
|
}
|
2020-10-28 15:28:06 +00:00
|
|
|
|
2022-09-15 14:50:13 +01:00
|
|
|
aliasPieces, err := db.aliasCache.EnsurePiecesToAliases(ctx, opts.Pieces)
|
2021-02-08 09:33:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.New("unable to convert pieces to aliases: %w", err)
|
|
|
|
}
|
|
|
|
|
2023-07-28 10:30:44 +01:00
|
|
|
// second part will be removed when there will be no pending_objects in objects table.
|
2021-01-21 12:17:51 +00:00
|
|
|
// Verify that object exists and is partial.
|
2023-07-28 10:30:44 +01:00
|
|
|
if opts.UsePendingObjectsTable {
|
|
|
|
_, err = db.db.ExecContext(ctx, `
|
|
|
|
INSERT INTO segments (
|
|
|
|
stream_id, position, expires_at,
|
|
|
|
root_piece_id, encrypted_key_nonce, encrypted_key,
|
|
|
|
encrypted_size, plain_offset, plain_size, encrypted_etag,
|
|
|
|
redundancy,
|
|
|
|
remote_alias_pieces,
|
|
|
|
placement
|
|
|
|
) VALUES (
|
|
|
|
(SELECT stream_id
|
|
|
|
FROM pending_objects WHERE
|
|
|
|
project_id = $12 AND
|
|
|
|
bucket_name = $13 AND
|
|
|
|
object_key = $14 AND
|
|
|
|
stream_id = $15
|
|
|
|
), $1, $2,
|
|
|
|
$3, $4, $5,
|
|
|
|
$6, $7, $8, $9,
|
|
|
|
$10,
|
|
|
|
$11,
|
|
|
|
$16
|
|
|
|
)
|
|
|
|
ON CONFLICT(stream_id, position)
|
|
|
|
DO UPDATE SET
|
|
|
|
expires_at = $2,
|
|
|
|
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
|
|
|
|
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
|
|
|
|
redundancy = $10,
|
|
|
|
remote_alias_pieces = $11,
|
|
|
|
placement = $16
|
2021-09-27 18:38:15 +01:00
|
|
|
`, opts.Position, opts.ExpiresAt,
|
2023-07-28 10:30:44 +01:00
|
|
|
opts.RootPieceID, opts.EncryptedKeyNonce, opts.EncryptedKey,
|
|
|
|
opts.EncryptedSize, opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
|
|
|
|
redundancyScheme{&opts.Redundancy},
|
|
|
|
aliasPieces,
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID,
|
|
|
|
opts.Placement,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
_, err = db.db.ExecContext(ctx, `
|
|
|
|
INSERT INTO segments (
|
|
|
|
stream_id, position, expires_at,
|
|
|
|
root_piece_id, encrypted_key_nonce, encrypted_key,
|
|
|
|
encrypted_size, plain_offset, plain_size, encrypted_etag,
|
|
|
|
redundancy,
|
|
|
|
remote_alias_pieces,
|
|
|
|
placement
|
|
|
|
) VALUES (
|
|
|
|
(SELECT stream_id
|
|
|
|
FROM objects WHERE
|
|
|
|
project_id = $12 AND
|
|
|
|
bucket_name = $13 AND
|
|
|
|
object_key = $14 AND
|
|
|
|
version = $15 AND
|
|
|
|
stream_id = $16 AND
|
|
|
|
status = `+pendingStatus+
|
|
|
|
` ), $1, $2,
|
|
|
|
$3, $4, $5,
|
|
|
|
$6, $7, $8, $9,
|
|
|
|
$10,
|
|
|
|
$11,
|
|
|
|
$17
|
|
|
|
)
|
|
|
|
ON CONFLICT(stream_id, position)
|
|
|
|
DO UPDATE SET
|
|
|
|
expires_at = $2,
|
|
|
|
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
|
|
|
|
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
|
|
|
|
redundancy = $10,
|
|
|
|
remote_alias_pieces = $11,
|
|
|
|
placement = $17
|
|
|
|
`, opts.Position, opts.ExpiresAt,
|
|
|
|
opts.RootPieceID, opts.EncryptedKeyNonce, opts.EncryptedKey,
|
|
|
|
opts.EncryptedSize, opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
|
|
|
|
redundancyScheme{&opts.Redundancy},
|
|
|
|
aliasPieces,
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
|
|
|
|
opts.Placement,
|
|
|
|
)
|
|
|
|
}
|
2021-01-21 12:17:51 +00:00
|
|
|
if err != nil {
|
|
|
|
if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
|
2022-05-09 16:11:36 +01:00
|
|
|
return ErrPendingObjectMissing.New("")
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
2021-01-21 12:17:51 +00:00
|
|
|
return Error.New("unable to insert segment: %w", err)
|
|
|
|
}
|
2021-06-04 14:21:09 +01:00
|
|
|
|
|
|
|
mon.Meter("segment_commit").Mark(1)
|
|
|
|
mon.IntVal("segment_commit_encrypted_size").Observe(int64(opts.EncryptedSize))
|
|
|
|
|
2021-01-21 12:17:51 +00:00
|
|
|
return nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CommitInlineSegment contains all necessary information about the segment.
|
|
|
|
type CommitInlineSegment struct {
|
|
|
|
ObjectStream
|
|
|
|
|
2020-11-11 10:54:10 +00:00
|
|
|
Position SegmentPosition
|
2020-10-28 15:28:06 +00:00
|
|
|
|
2021-06-10 11:08:21 +01:00
|
|
|
ExpiresAt *time.Time
|
|
|
|
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce []byte
|
2020-10-28 15:28:06 +00:00
|
|
|
EncryptedKey []byte
|
|
|
|
|
2021-03-25 07:53:10 +00:00
|
|
|
PlainOffset int64 // offset in the original data stream
|
|
|
|
PlainSize int32 // size before encryption
|
|
|
|
EncryptedETag []byte
|
2020-10-28 15:28:06 +00:00
|
|
|
|
|
|
|
InlineData []byte
|
2023-07-28 10:30:44 +01:00
|
|
|
|
|
|
|
UsePendingObjectsTable bool
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CommitInlineSegment commits inline segment to the database.
|
|
|
|
func (db *DB) CommitInlineSegment(ctx context.Context, opts CommitInlineSegment) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: do we have a lower limit for inline data?
|
2021-02-19 09:28:41 +00:00
|
|
|
// TODO should we move check for max inline segment from metainfo here
|
2020-10-28 15:28:06 +00:00
|
|
|
|
|
|
|
switch {
|
|
|
|
case len(opts.EncryptedKey) == 0:
|
|
|
|
return ErrInvalidRequest.New("EncryptedKey missing")
|
2021-10-12 14:37:12 +01:00
|
|
|
case len(opts.EncryptedKeyNonce) == 0:
|
2020-10-28 15:28:06 +00:00
|
|
|
return ErrInvalidRequest.New("EncryptedKeyNonce missing")
|
2021-01-04 13:43:24 +00:00
|
|
|
case opts.PlainSize <= 0 && validatePlainSize:
|
2020-10-28 15:28:06 +00:00
|
|
|
return ErrInvalidRequest.New("PlainSize negative or zero")
|
|
|
|
case opts.PlainOffset < 0:
|
|
|
|
return ErrInvalidRequest.New("PlainOffset negative")
|
|
|
|
}
|
|
|
|
|
2023-07-28 10:30:44 +01:00
|
|
|
if opts.UsePendingObjectsTable {
|
|
|
|
_, err = db.db.ExecContext(ctx, `
|
|
|
|
INSERT INTO segments (
|
|
|
|
stream_id, position, expires_at,
|
|
|
|
root_piece_id, encrypted_key_nonce, encrypted_key,
|
|
|
|
encrypted_size, plain_offset, plain_size, encrypted_etag,
|
|
|
|
inline_data
|
|
|
|
) VALUES (
|
|
|
|
(SELECT stream_id
|
|
|
|
FROM pending_objects WHERE
|
|
|
|
project_id = $11 AND
|
|
|
|
bucket_name = $12 AND
|
|
|
|
object_key = $13 AND
|
|
|
|
stream_id = $14
|
|
|
|
), $1, $2,
|
|
|
|
$3, $4, $5,
|
|
|
|
$6, $7, $8, $9,
|
|
|
|
$10
|
|
|
|
)
|
|
|
|
ON CONFLICT(stream_id, position)
|
|
|
|
DO UPDATE SET
|
|
|
|
expires_at = $2,
|
|
|
|
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
|
|
|
|
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
|
|
|
|
inline_data = $10
|
2021-09-27 18:38:15 +01:00
|
|
|
`, opts.Position, opts.ExpiresAt,
|
2023-07-28 10:30:44 +01:00
|
|
|
storj.PieceID{}, opts.EncryptedKeyNonce, opts.EncryptedKey,
|
|
|
|
len(opts.InlineData), opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
|
|
|
|
opts.InlineData,
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.StreamID,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
_, err = db.db.ExecContext(ctx, `
|
|
|
|
INSERT INTO segments (
|
|
|
|
stream_id, position, expires_at,
|
|
|
|
root_piece_id, encrypted_key_nonce, encrypted_key,
|
|
|
|
encrypted_size, plain_offset, plain_size, encrypted_etag,
|
|
|
|
inline_data
|
|
|
|
) VALUES (
|
|
|
|
(SELECT stream_id
|
|
|
|
FROM objects WHERE
|
|
|
|
project_id = $11 AND
|
|
|
|
bucket_name = $12 AND
|
|
|
|
object_key = $13 AND
|
|
|
|
version = $14 AND
|
|
|
|
stream_id = $15 AND
|
|
|
|
status = `+pendingStatus+
|
|
|
|
` ), $1, $2,
|
|
|
|
$3, $4, $5,
|
|
|
|
$6, $7, $8, $9,
|
|
|
|
$10
|
|
|
|
)
|
|
|
|
ON CONFLICT(stream_id, position)
|
|
|
|
DO UPDATE SET
|
|
|
|
expires_at = $2,
|
|
|
|
root_piece_id = $3, encrypted_key_nonce = $4, encrypted_key = $5,
|
|
|
|
encrypted_size = $6, plain_offset = $7, plain_size = $8, encrypted_etag = $9,
|
|
|
|
inline_data = $10
|
|
|
|
`, opts.Position, opts.ExpiresAt,
|
|
|
|
storj.PieceID{}, opts.EncryptedKeyNonce, opts.EncryptedKey,
|
|
|
|
len(opts.InlineData), opts.PlainOffset, opts.PlainSize, opts.EncryptedETag,
|
|
|
|
opts.InlineData,
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
|
|
|
|
)
|
|
|
|
}
|
2021-01-21 12:17:51 +00:00
|
|
|
if err != nil {
|
|
|
|
if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
|
2022-05-09 16:11:36 +01:00
|
|
|
return ErrPendingObjectMissing.New("")
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
2021-01-21 12:17:51 +00:00
|
|
|
return Error.New("unable to insert segment: %w", err)
|
|
|
|
}
|
2021-06-04 14:21:09 +01:00
|
|
|
|
|
|
|
mon.Meter("segment_commit").Mark(1)
|
|
|
|
mon.IntVal("segment_commit_encrypted_size").Observe(int64(len(opts.InlineData)))
|
|
|
|
|
2021-01-21 12:17:51 +00:00
|
|
|
return nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CommitObject contains arguments necessary for committing an object.
|
|
|
|
type CommitObject struct {
|
|
|
|
ObjectStream
|
|
|
|
|
2021-01-14 10:24:29 +00:00
|
|
|
Encryption storj.EncryptionParameters
|
|
|
|
|
2022-04-14 12:20:18 +01:00
|
|
|
// this flag controls if we want to set metadata fields with CommitObject
|
|
|
|
// it's possible to set metadata with BeginObject request so we need to
|
|
|
|
// be explicit if we would like to set it with CommitObject which will
|
|
|
|
// override any existing metadata.
|
|
|
|
OverrideEncryptedMetadata bool
|
2021-10-29 12:04:55 +01:00
|
|
|
EncryptedMetadata []byte // optional
|
|
|
|
EncryptedMetadataNonce []byte // optional
|
|
|
|
EncryptedMetadataEncryptedKey []byte // optional
|
2022-09-21 09:10:06 +01:00
|
|
|
|
|
|
|
DisallowDelete bool
|
2023-07-28 19:39:32 +01:00
|
|
|
|
|
|
|
UsePendingObjectsTable bool
|
2021-10-29 12:04:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify verifies reqest fields.
|
|
|
|
func (c *CommitObject) Verify() error {
|
|
|
|
if err := c.ObjectStream.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.Encryption.CipherSuite != storj.EncUnspecified && c.Encryption.BlockSize <= 0 {
|
|
|
|
return ErrInvalidRequest.New("Encryption.BlockSize is negative or zero")
|
|
|
|
}
|
|
|
|
|
2022-04-14 12:20:18 +01:00
|
|
|
if c.OverrideEncryptedMetadata {
|
|
|
|
if c.EncryptedMetadata == nil && (c.EncryptedMetadataNonce != nil || c.EncryptedMetadataEncryptedKey != nil) {
|
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be not set if EncryptedMetadata is not set")
|
|
|
|
} else if c.EncryptedMetadata != nil && (c.EncryptedMetadataNonce == nil || c.EncryptedMetadataEncryptedKey == nil) {
|
|
|
|
return ErrInvalidRequest.New("EncryptedMetadataNonce and EncryptedMetadataEncryptedKey must be set if EncryptedMetadata is set")
|
|
|
|
}
|
2021-10-29 12:04:55 +01:00
|
|
|
}
|
|
|
|
return nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2022-10-04 16:38:20 +01:00
|
|
|
// CommitObject adds a pending object to the database. If another committed object is under target location
|
|
|
|
// it will be deleted.
|
2020-11-02 17:25:58 +00:00
|
|
|
func (db *DB) CommitObject(ctx context.Context, opts CommitObject) (object Object, err error) {
|
2020-10-28 15:28:06 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-10-29 12:04:55 +01:00
|
|
|
if err := opts.Verify(); err != nil {
|
2020-11-02 17:25:58 +00:00
|
|
|
return Object{}, err
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 11:50:21 +00:00
|
|
|
err = txutil.WithTx(ctx, db.db, nil, func(ctx context.Context, tx tagsql.Tx) error {
|
2021-03-30 10:44:43 +01:00
|
|
|
segments, err := fetchSegmentsForCommit(ctx, tx, opts.StreamID)
|
2020-11-12 11:50:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to fetch segments: %w", err)
|
2020-11-03 14:45:39 +00:00
|
|
|
}
|
|
|
|
|
2021-09-24 15:18:21 +01:00
|
|
|
if err = db.validateParts(segments); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-30 10:44:43 +01:00
|
|
|
finalSegments := convertToFinalSegments(segments)
|
|
|
|
err = updateSegmentOffsets(ctx, tx, opts.StreamID, finalSegments)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to update segments: %w", err)
|
|
|
|
}
|
2020-11-12 11:50:21 +00:00
|
|
|
|
|
|
|
// TODO: would we even need this when we make main index plain_offset?
|
|
|
|
fixedSegmentSize := int32(0)
|
2021-03-30 10:44:43 +01:00
|
|
|
if len(finalSegments) > 0 {
|
|
|
|
fixedSegmentSize = finalSegments[0].PlainSize
|
|
|
|
for i, seg := range finalSegments {
|
2021-01-25 11:30:56 +00:00
|
|
|
if seg.Position.Part != 0 || seg.Position.Index != uint32(i) {
|
2021-01-07 08:46:49 +00:00
|
|
|
fixedSegmentSize = -1
|
|
|
|
break
|
|
|
|
}
|
2021-03-30 10:44:43 +01:00
|
|
|
if i < len(finalSegments)-1 && seg.PlainSize != fixedSegmentSize {
|
2020-11-12 11:50:21 +00:00
|
|
|
fixedSegmentSize = -1
|
|
|
|
break
|
|
|
|
}
|
2020-11-03 14:45:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-24 12:29:16 +00:00
|
|
|
var totalPlainSize, totalEncryptedSize int64
|
2021-03-30 10:44:43 +01:00
|
|
|
for _, seg := range finalSegments {
|
2020-11-24 12:29:16 +00:00
|
|
|
totalPlainSize += int64(seg.PlainSize)
|
2020-11-12 11:50:21 +00:00
|
|
|
totalEncryptedSize += int64(seg.EncryptedSize)
|
|
|
|
}
|
2020-10-28 15:28:06 +00:00
|
|
|
|
2023-09-11 14:58:30 +01:00
|
|
|
const versionArgIndex = 3
|
2022-05-09 16:11:36 +01:00
|
|
|
args := []interface{}{
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey, opts.Version, opts.StreamID,
|
2021-10-29 12:04:55 +01:00
|
|
|
len(segments),
|
|
|
|
totalPlainSize,
|
|
|
|
totalEncryptedSize,
|
|
|
|
fixedSegmentSize,
|
2022-05-09 16:11:36 +01:00
|
|
|
encryptionParameters{&opts.Encryption},
|
|
|
|
}
|
2021-10-29 12:04:55 +01:00
|
|
|
|
2022-09-21 09:10:06 +01:00
|
|
|
versionsToDelete := []Version{}
|
2022-12-07 11:10:01 +00:00
|
|
|
if err := withRows(tx.QueryContext(ctx, `
|
|
|
|
SELECT version
|
|
|
|
FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
status = `+committedStatus,
|
|
|
|
opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey))(func(rows tagsql.Rows) error {
|
|
|
|
for rows.Next() {
|
|
|
|
var version Version
|
|
|
|
if err := rows.Scan(&version); err != nil {
|
|
|
|
return Error.New("failed to scan previous object: %w", err)
|
2022-09-21 09:10:06 +01:00
|
|
|
}
|
2022-12-07 11:10:01 +00:00
|
|
|
|
|
|
|
versionsToDelete = append(versionsToDelete, version)
|
2022-09-21 09:10:06 +01:00
|
|
|
}
|
2022-12-07 11:10:01 +00:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return Error.New("failed to find previous objects: %w", err)
|
|
|
|
}
|
2022-09-21 09:10:06 +01:00
|
|
|
|
2022-12-07 11:10:01 +00:00
|
|
|
if len(versionsToDelete) > 1 {
|
|
|
|
db.log.Warn("object with multiple committed versions were found!",
|
|
|
|
zap.Stringer("Project ID", opts.ProjectID), zap.String("Bucket Name", opts.BucketName),
|
|
|
|
zap.ByteString("Object Key", []byte(opts.ObjectKey)), zap.Int("deleted", len(versionsToDelete)))
|
2022-09-21 09:10:06 +01:00
|
|
|
|
2022-12-07 11:10:01 +00:00
|
|
|
mon.Meter("multiple_committed_versions").Mark(1)
|
|
|
|
}
|
2022-09-21 09:10:06 +01:00
|
|
|
|
2022-12-07 11:10:01 +00:00
|
|
|
if len(versionsToDelete) != 0 && opts.DisallowDelete {
|
|
|
|
return ErrPermissionDenied.New("no permissions to delete existing object")
|
2022-09-21 09:10:06 +01:00
|
|
|
}
|
|
|
|
|
2023-07-28 19:39:32 +01:00
|
|
|
if opts.UsePendingObjectsTable {
|
2023-09-07 15:54:00 +01:00
|
|
|
// remove existing object(s) before inserting new one
|
|
|
|
// TODO after switching to pending_objects table completely we should
|
|
|
|
// be able to just delete all objects under this key and avoid
|
|
|
|
// selecting versions from above
|
|
|
|
for _, version := range versionsToDelete {
|
|
|
|
_, err := db.deleteObjectExactVersion(ctx, DeleteObjectExactVersion{
|
|
|
|
ObjectLocation: ObjectLocation{
|
|
|
|
ProjectID: opts.ProjectID,
|
|
|
|
BucketName: opts.BucketName,
|
|
|
|
ObjectKey: opts.ObjectKey,
|
|
|
|
},
|
|
|
|
Version: version,
|
|
|
|
}, tx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to delete existing object: %w", err)
|
|
|
|
}
|
2023-07-28 19:39:32 +01:00
|
|
|
}
|
|
|
|
|
2023-09-11 14:58:30 +01:00
|
|
|
opts.Version = DefaultVersion
|
|
|
|
args[versionArgIndex] = opts.Version
|
2023-09-07 15:54:00 +01:00
|
|
|
|
2023-07-28 19:39:32 +01:00
|
|
|
args = append(args,
|
|
|
|
opts.EncryptedMetadataNonce,
|
|
|
|
opts.EncryptedMetadata,
|
|
|
|
opts.EncryptedMetadataEncryptedKey,
|
|
|
|
opts.OverrideEncryptedMetadata,
|
|
|
|
)
|
|
|
|
|
|
|
|
err = tx.QueryRowContext(ctx, `
|
|
|
|
WITH delete_pending_object AS (
|
|
|
|
DELETE FROM pending_objects WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
stream_id = $5
|
2023-09-27 13:29:48 +01:00
|
|
|
RETURNING expires_at, encryption, encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key
|
2023-07-28 19:39:32 +01:00
|
|
|
)
|
|
|
|
INSERT INTO objects (
|
|
|
|
project_id, bucket_name, object_key, version, stream_id,
|
2023-09-27 13:29:48 +01:00
|
|
|
status, segment_count, total_plain_size, total_encrypted_size,
|
|
|
|
fixed_segment_size, zombie_deletion_deadline, expires_at,
|
2023-07-28 19:39:32 +01:00
|
|
|
encryption,
|
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key
|
|
|
|
)
|
2023-09-27 13:29:48 +01:00
|
|
|
SELECT
|
|
|
|
$1 as project_id, $2 as bucket_name, $3 as object_key, $4::INT4 as version, $5 as stream_id,
|
|
|
|
`+committedStatus+` as status, $6::INT4 as segment_count, $7::INT8 as total_plain_size, $8::INT8 as total_encrypted_size,
|
|
|
|
$9::INT4 as fixed_segment_size, NULL::timestamp as zombie_deletion_deadline, expires_at,
|
|
|
|
-- TODO should we allow to override existing encryption parameters or return error if don't match with opts?
|
|
|
|
CASE
|
|
|
|
WHEN encryption = 0 AND $10 <> 0 THEN $10
|
|
|
|
WHEN encryption = 0 AND $10 = 0 THEN NULL
|
|
|
|
ELSE encryption
|
|
|
|
END as
|
|
|
|
encryption,
|
|
|
|
CASE
|
|
|
|
WHEN $14::BOOL = true THEN $11
|
|
|
|
ELSE encrypted_metadata_nonce
|
|
|
|
END as
|
|
|
|
encrypted_metadata_nonce,
|
|
|
|
CASE
|
|
|
|
WHEN $14::BOOL = true THEN $12
|
|
|
|
ELSE encrypted_metadata
|
|
|
|
END as
|
|
|
|
encrypted_metadata,
|
|
|
|
CASE
|
|
|
|
WHEN $14::BOOL = true THEN $13
|
|
|
|
ELSE encrypted_metadata_encrypted_key
|
|
|
|
END as
|
|
|
|
encrypted_metadata_encrypted_key
|
|
|
|
FROM delete_pending_object
|
2023-09-07 15:54:00 +01:00
|
|
|
-- we don't want ON CONFLICT clause to update existign object
|
|
|
|
-- as this way we may miss removing old object segments
|
2023-07-28 19:39:32 +01:00
|
|
|
RETURNING
|
|
|
|
created_at, expires_at,
|
|
|
|
encrypted_metadata, encrypted_metadata_encrypted_key, encrypted_metadata_nonce,
|
|
|
|
encryption
|
2022-09-21 09:10:06 +01:00
|
|
|
`, args...).Scan(
|
2023-07-28 19:39:32 +01:00
|
|
|
&object.CreatedAt, &object.ExpiresAt,
|
|
|
|
&object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey, &object.EncryptedMetadataNonce,
|
|
|
|
encryptionParameters{&object.Encryption},
|
|
|
|
)
|
2023-09-07 15:54:00 +01:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return ErrObjectNotFound.Wrap(Error.New("object with specified version and pending status is missing"))
|
|
|
|
} else if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
|
|
|
|
// TODO maybe we should check message if 'encryption' label is there
|
|
|
|
return ErrInvalidRequest.New("Encryption is missing")
|
|
|
|
}
|
|
|
|
return Error.New("failed to update object: %w", err)
|
|
|
|
}
|
2023-07-28 19:39:32 +01:00
|
|
|
} else {
|
|
|
|
metadataColumns := ""
|
|
|
|
if opts.OverrideEncryptedMetadata {
|
|
|
|
args = append(args,
|
|
|
|
opts.EncryptedMetadataNonce,
|
|
|
|
opts.EncryptedMetadata,
|
|
|
|
opts.EncryptedMetadataEncryptedKey,
|
|
|
|
)
|
|
|
|
metadataColumns = `,
|
|
|
|
encrypted_metadata_nonce = $11,
|
|
|
|
encrypted_metadata = $12,
|
|
|
|
encrypted_metadata_encrypted_key = $13
|
|
|
|
`
|
|
|
|
}
|
|
|
|
err = tx.QueryRowContext(ctx, `
|
|
|
|
UPDATE objects SET
|
|
|
|
status =`+committedStatus+`,
|
|
|
|
segment_count = $6,
|
|
|
|
|
|
|
|
total_plain_size = $7,
|
|
|
|
total_encrypted_size = $8,
|
|
|
|
fixed_segment_size = $9,
|
|
|
|
zombie_deletion_deadline = NULL,
|
|
|
|
|
|
|
|
-- TODO should we allow to override existing encryption parameters or return error if don't match with opts?
|
|
|
|
encryption = CASE
|
|
|
|
WHEN objects.encryption = 0 AND $10 <> 0 THEN $10
|
|
|
|
WHEN objects.encryption = 0 AND $10 = 0 THEN NULL
|
|
|
|
ELSE objects.encryption
|
|
|
|
END
|
|
|
|
`+metadataColumns+`
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
bucket_name = $2 AND
|
|
|
|
object_key = $3 AND
|
|
|
|
version = $4 AND
|
|
|
|
stream_id = $5 AND
|
|
|
|
status = `+pendingStatus+`
|
|
|
|
RETURNING
|
|
|
|
created_at, expires_at,
|
|
|
|
encrypted_metadata, encrypted_metadata_encrypted_key, encrypted_metadata_nonce,
|
|
|
|
encryption
|
|
|
|
`, args...).Scan(
|
|
|
|
&object.CreatedAt, &object.ExpiresAt,
|
|
|
|
&object.EncryptedMetadata, &object.EncryptedMetadataEncryptedKey, &object.EncryptedMetadataNonce,
|
|
|
|
encryptionParameters{&object.Encryption},
|
|
|
|
)
|
2023-09-07 15:54:00 +01:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return ErrObjectNotFound.Wrap(Error.New("object with specified version and pending status is missing"))
|
|
|
|
} else if code := pgerrcode.FromError(err); code == pgxerrcode.NotNullViolation {
|
|
|
|
// TODO maybe we should check message if 'encryption' label is there
|
|
|
|
return ErrInvalidRequest.New("Encryption is missing")
|
|
|
|
}
|
|
|
|
return Error.New("failed to update object: %w", err)
|
2020-11-12 11:50:21 +00:00
|
|
|
}
|
2020-10-28 15:28:06 +00:00
|
|
|
|
2023-09-07 15:54:00 +01:00
|
|
|
for _, version := range versionsToDelete {
|
|
|
|
_, err := db.deleteObjectExactVersion(ctx, DeleteObjectExactVersion{
|
|
|
|
ObjectLocation: ObjectLocation{
|
|
|
|
ProjectID: opts.ProjectID,
|
|
|
|
BucketName: opts.BucketName,
|
|
|
|
ObjectKey: opts.ObjectKey,
|
|
|
|
},
|
|
|
|
Version: version,
|
|
|
|
}, tx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to delete existing object: %w", err)
|
|
|
|
}
|
2022-09-21 09:10:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-12 11:50:21 +00:00
|
|
|
object.StreamID = opts.StreamID
|
|
|
|
object.ProjectID = opts.ProjectID
|
|
|
|
object.BucketName = opts.BucketName
|
|
|
|
object.ObjectKey = opts.ObjectKey
|
|
|
|
object.Version = opts.Version
|
|
|
|
object.Status = Committed
|
|
|
|
object.SegmentCount = int32(len(segments))
|
2020-11-24 12:29:16 +00:00
|
|
|
object.TotalPlainSize = totalPlainSize
|
2020-11-12 11:50:21 +00:00
|
|
|
object.TotalEncryptedSize = totalEncryptedSize
|
|
|
|
object.FixedSegmentSize = fixedSegmentSize
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return Object{}, err
|
|
|
|
}
|
2021-06-04 14:21:09 +01:00
|
|
|
|
|
|
|
mon.Meter("object_commit").Mark(1)
|
|
|
|
mon.IntVal("object_commit_segments").Observe(int64(object.SegmentCount))
|
|
|
|
mon.IntVal("object_commit_encrypted_size").Observe(object.TotalEncryptedSize)
|
|
|
|
|
2020-11-12 11:50:21 +00:00
|
|
|
return object, nil
|
2020-10-28 15:28:06 +00:00
|
|
|
}
|
2021-09-24 15:18:21 +01:00
|
|
|
|
|
|
|
func (db *DB) validateParts(segments []segmentInfoForCommit) error {
|
|
|
|
partSize := make(map[uint32]memory.Size)
|
|
|
|
|
|
|
|
var lastPart uint32
|
|
|
|
for _, segment := range segments {
|
|
|
|
partSize[segment.Position.Part] += memory.Size(segment.PlainSize)
|
|
|
|
if lastPart < segment.Position.Part {
|
|
|
|
lastPart = segment.Position.Part
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(partSize) > db.config.MaxNumberOfParts {
|
2023-09-13 14:30:32 +01:00
|
|
|
return ErrFailedPrecondition.New("exceeded maximum number of parts: %d", db.config.MaxNumberOfParts)
|
2021-09-24 15:18:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for part, size := range partSize {
|
|
|
|
// Last part has no minimum size.
|
|
|
|
if part == lastPart {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if size < db.config.MinPartSize {
|
2023-09-13 14:30:32 +01:00
|
|
|
return ErrFailedPrecondition.New("size of part number %d is below minimum threshold, got: %s, min: %s", part, size, db.config.MinPartSize)
|
2021-09-24 15:18:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|