satellite/metabase: make UploadID stable for different options

Multipart upload requires to have the same UploadID returned from
different requests (BeginUpload, ListUploads). Otherwise client won't
be able to find existing uploads. Main issue was that data needed to
construct UploadID is in System metadata which can be filtered out
by listing option.

This change is fixing how we are setting Status for listed objects and
it's forcing reading System metadata if we are reading pending objects.

Fixes https://github.com/storj/storj/issues/5298

Change-Id: I8dd5fbab4421a64dc3ed95556408ead4c829f276
This commit is contained in:
Michal Niewrzal 2022-11-09 11:26:18 +01:00 committed by Storj Robot
parent 8c569866aa
commit 6273ed035d
4 changed files with 58 additions and 23 deletions

View File

@ -98,6 +98,7 @@ func iteratePendingObjectsByKey(ctx context.Context, db *DB, opts IteratePending
recursive: true,
includeCustomMetadata: true,
includeSystemMetadata: true,
status: Pending,
curIndex: 0,
cursor: iterateCursor{
@ -285,7 +286,6 @@ func querySelectorFields(objectKeyColumn string, it *objectsIterator) string {
if it.includeSystemMetadata {
querySelectFields += `
,status
,created_at
,expires_at
,segment_count
@ -317,7 +317,7 @@ func doNextQueryStreamsByKey(ctx context.Context, it *objectsIterator) (_ tagsql
return it.db.db.QueryContext(ctx, `
SELECT
object_key, stream_id, version, encryption, status,
object_key, stream_id, version, encryption,
created_at, expires_at,
segment_count,
total_plain_size, total_encrypted_size, fixed_segment_size,
@ -340,6 +340,7 @@ func doNextQueryStreamsByKey(ctx context.Context, it *objectsIterator) (_ tagsql
// scanItem scans doNextQuery results into ObjectEntry.
func (it *objectsIterator) scanItem(item *ObjectEntry) (err error) {
item.IsPrefix = false
item.Status = it.status
fields := []interface{}{
&item.ObjectKey,
@ -350,7 +351,6 @@ func (it *objectsIterator) scanItem(item *ObjectEntry) (err error) {
if it.includeSystemMetadata {
fields = append(fields,
&item.Status,
&item.CreatedAt,
&item.ExpiresAt,
&item.SegmentCount,

View File

@ -802,11 +802,12 @@ func TestIterateObjectsWithStatus(t *testing.T) {
require.NotEmpty(t, entry.ObjectKey)
require.NotEmpty(t, entry.StreamID)
require.NotZero(t, entry.Version)
require.Equal(t, metabase.Committed, entry.Status)
require.False(t, entry.Encryption.IsZero())
require.True(t, entry.CreatedAt.IsZero())
require.Nil(t, entry.ExpiresAt)
require.Zero(t, entry.Status)
require.Zero(t, entry.SegmentCount)
require.Zero(t, entry.TotalPlainSize)
require.Zero(t, entry.TotalEncryptedSize)

View File

@ -814,14 +814,17 @@ func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListReq
Limit: limit,
Status: status,
IncludeCustomMetadata: includeCustomMetadata,
IncludeSystemMetadata: includeSystemMetadata,
// because multipart upload UploadID depends on some System metadata fields we need
// to force reading it for listing pending object when its not included in options.
// This is used by libuplink ListUploads method.
IncludeSystemMetadata: status == metabase.Pending || includeSystemMetadata,
})
if err != nil {
return nil, endpoint.convertMetabaseErr(err)
}
for _, entry := range result.Objects {
item, err := endpoint.objectEntryToProtoListItem(ctx, req.Bucket, entry, prefix, includeCustomMetadata, placement)
item, err := endpoint.objectEntryToProtoListItem(ctx, req.Bucket, entry, prefix, includeSystemMetadata, includeCustomMetadata, placement)
if err != nil {
return nil, endpoint.convertMetabaseErr(err)
}
@ -843,11 +846,11 @@ func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListReq
BatchSize: limit + 1,
Status: status,
IncludeCustomMetadata: includeCustomMetadata,
IncludeSystemMetadata: includeSystemMetadata,
IncludeSystemMetadata: status == metabase.Pending || includeSystemMetadata,
}, func(ctx context.Context, it metabase.ObjectsIterator) error {
entry := metabase.ObjectEntry{}
for len(resp.Items) < limit && it.Next(ctx, &entry) {
item, err := endpoint.objectEntryToProtoListItem(ctx, req.Bucket, entry, prefix, includeCustomMetadata, placement)
item, err := endpoint.objectEntryToProtoListItem(ctx, req.Bucket, entry, prefix, includeSystemMetadata, includeCustomMetadata, placement)
if err != nil {
return err
}
@ -930,7 +933,7 @@ func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.
}, func(ctx context.Context, it metabase.ObjectsIterator) error {
entry := metabase.ObjectEntry{}
for len(resp.Items) < limit && it.Next(ctx, &entry) {
item, err := endpoint.objectEntryToProtoListItem(ctx, req.Bucket, entry, "", true, placement)
item, err := endpoint.objectEntryToProtoListItem(ctx, req.Bucket, entry, "", true, true, placement)
if err != nil {
return err
}
@ -1288,20 +1291,23 @@ func (endpoint *Endpoint) objectToProto(ctx context.Context, object metabase.Obj
func (endpoint *Endpoint) objectEntryToProtoListItem(ctx context.Context, bucket []byte,
entry metabase.ObjectEntry, prefixToPrependInSatStreamID metabase.ObjectKey,
includeMetadata bool, placement storj.PlacementConstraint) (item *pb.ObjectListItem, err error) {
expires := time.Time{}
if entry.ExpiresAt != nil {
expires = *entry.ExpiresAt
}
includeSystem, includeMetadata bool, placement storj.PlacementConstraint) (item *pb.ObjectListItem, err error) {
item = &pb.ObjectListItem{
EncryptedPath: []byte(entry.ObjectKey),
Version: int32(entry.Version), // TODO incompatible types
Status: pb.Object_Status(entry.Status),
ExpiresAt: expires,
CreatedAt: entry.CreatedAt,
PlainSize: entry.TotalPlainSize,
}
expiresAt := time.Time{}
if entry.ExpiresAt != nil {
expiresAt = *entry.ExpiresAt
}
if includeSystem {
item.ExpiresAt = expiresAt
item.CreatedAt = entry.CreatedAt
item.PlainSize = entry.TotalPlainSize
}
if includeMetadata {
@ -1350,10 +1356,10 @@ func (endpoint *Endpoint) objectEntryToProtoListItem(ctx context.Context, bucket
if entry.Status == metabase.Pending {
satStreamID, err := endpoint.packStreamID(ctx, &internalpb.StreamID{
Bucket: bucket,
EncryptedObjectKey: append([]byte(prefixToPrependInSatStreamID), item.EncryptedPath...),
Version: int64(item.Version),
CreationDate: item.CreatedAt,
ExpirationDate: item.ExpiresAt,
EncryptedObjectKey: append([]byte(prefixToPrependInSatStreamID), []byte(entry.ObjectKey)...),
Version: int64(entry.Version),
CreationDate: entry.CreatedAt,
ExpirationDate: expiresAt,
StreamId: entry.StreamID[:],
MultipartObject: entry.FixedSegmentSize <= 0,
EncryptionParameters: &pb.EncryptionParameters{

View File

@ -602,8 +602,36 @@ func TestEndpoint_Object_No_StorageNodes(t *testing.T) {
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
})
})
t.Run("UploadID check", func(t *testing.T) {
defer ctx.Check(deleteBucket)
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
require.NoError(t, err)
defer ctx.Check(project.Close)
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err)
for _, options := range []uplink.ListUploadsOptions{
{System: false, Custom: false},
{System: true, Custom: false},
{System: true, Custom: true},
{System: false, Custom: true},
} {
t.Run(fmt.Sprintf("system:%v;custom:%v", options.System, options.Custom), func(t *testing.T) {
uploadInfo, err := project.BeginUpload(ctx, bucketName, "multipart-object", nil)
require.NoError(t, err)
iterator := project.ListUploads(ctx, bucketName, &options)
require.True(t, iterator.Next())
require.Equal(t, uploadInfo.UploadID, iterator.Item().UploadID)
err = project.AbortUpload(ctx, bucketName, "multipart-object", iterator.Item().UploadID)
require.NoError(t, err)
})
}
})
})
}
// TODO remove when listing query tests feature flag is removed.