2021-04-26 10:35:44 +01:00
|
|
|
// Copyright (C) 2021 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabasetest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-02-24 10:54:57 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2021-04-26 10:35:44 +01:00
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
|
|
|
"storj.io/common/uuid"
|
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
)
|
|
|
|
|
|
|
|
// RandObjectStream returns a random object stream.
|
|
|
|
func RandObjectStream() metabase.ObjectStream {
|
|
|
|
return metabase.ObjectStream{
|
|
|
|
ProjectID: testrand.UUID(),
|
|
|
|
BucketName: testrand.BucketName(),
|
|
|
|
ObjectKey: metabase.ObjectKey(testrand.Bytes(16)),
|
|
|
|
Version: 1,
|
|
|
|
StreamID: testrand.UUID(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreatePendingObject creates a new pending object with the specified number of segments.
|
|
|
|
func CreatePendingObject(ctx *testcontext.Context, t *testing.T, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) {
|
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
},
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce: []byte{4},
|
2021-04-26 10:35:44 +01:00
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1024,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: 0,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateObject creates a new committed object with the specified number of segments.
|
|
|
|
func CreateObject(ctx *testcontext.Context, t *testing.T, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) metabase.Object {
|
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
},
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce: []byte{4},
|
2021-04-26 10:35:44 +01:00
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1024,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: 0,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
return CommitObject{
|
|
|
|
Opts: metabase.CommitObject{
|
|
|
|
ObjectStream: obj,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateExpiredObject creates a new committed expired object with the specified number of segments.
|
|
|
|
func CreateExpiredObject(ctx *testcontext.Context, t *testing.T, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte, expiresAt time.Time) metabase.Object {
|
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
ExpiresAt: &expiresAt,
|
|
|
|
},
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
2021-06-08 11:03:24 +01:00
|
|
|
ExpiresAt: &expiresAt,
|
2021-04-26 10:35:44 +01:00
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce: []byte{4},
|
2021-04-26 10:35:44 +01:00
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1024,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: 0,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
return CommitObject{
|
|
|
|
Opts: metabase.CommitObject{
|
|
|
|
ObjectStream: obj,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateFullObjectsWithKeys creates multiple objects with the specified keys.
|
|
|
|
func CreateFullObjectsWithKeys(ctx *testcontext.Context, t *testing.T, db *metabase.DB, projectID uuid.UUID, bucketName string, keys []metabase.ObjectKey) map[metabase.ObjectKey]metabase.LoopObjectEntry {
|
|
|
|
objects := make(map[metabase.ObjectKey]metabase.LoopObjectEntry, len(keys))
|
|
|
|
for _, key := range keys {
|
|
|
|
obj := RandObjectStream()
|
|
|
|
obj.ProjectID = projectID
|
|
|
|
obj.BucketName = bucketName
|
|
|
|
obj.ObjectKey = key
|
|
|
|
|
|
|
|
CreateObject(ctx, t, db, obj, 0)
|
|
|
|
|
|
|
|
objects[key] = metabase.LoopObjectEntry{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Status: metabase.Committed,
|
|
|
|
CreatedAt: time.Now(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return objects
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateTestObject is for testing metabase.CreateTestObject.
|
|
|
|
type CreateTestObject struct {
|
|
|
|
BeginObjectExactVersion *metabase.BeginObjectExactVersion
|
|
|
|
CommitObject *metabase.CommitObject
|
|
|
|
// TODO add BeginSegment, CommitSegment
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run runs the test.
|
2022-02-24 14:26:05 +00:00
|
|
|
func (co CreateTestObject) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) (metabase.Object, []metabase.Segment) {
|
2021-04-26 10:35:44 +01:00
|
|
|
boeOpts := metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
}
|
|
|
|
if co.BeginObjectExactVersion != nil {
|
|
|
|
boeOpts = *co.BeginObjectExactVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: boeOpts,
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
createdSegments := []metabase.Segment{}
|
|
|
|
|
2021-04-26 10:35:44 +01:00
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
commitSegmentOpts := metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
ExpiresAt: boeOpts.ExpiresAt,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
|
|
|
EncryptedKeyNonce: []byte{4},
|
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1060,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: int64(i) * 512,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
}
|
|
|
|
|
2021-04-26 10:35:44 +01:00
|
|
|
CommitSegment{
|
2022-02-24 14:26:05 +00:00
|
|
|
Opts: commitSegmentOpts,
|
|
|
|
}.Check(ctx, t, db)
|
2021-04-26 10:35:44 +01:00
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
segment, err := db.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
|
|
|
StreamID: commitSegmentOpts.StreamID,
|
|
|
|
Position: commitSegmentOpts.Position,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2021-04-26 10:35:44 +01:00
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
createdSegments = append(createdSegments, metabase.Segment{
|
|
|
|
StreamID: obj.StreamID,
|
|
|
|
Position: commitSegmentOpts.Position,
|
|
|
|
|
|
|
|
CreatedAt: segment.CreatedAt,
|
|
|
|
RepairedAt: nil,
|
|
|
|
ExpiresAt: nil,
|
|
|
|
|
|
|
|
RootPieceID: commitSegmentOpts.RootPieceID,
|
|
|
|
EncryptedKeyNonce: commitSegmentOpts.EncryptedKeyNonce,
|
|
|
|
EncryptedKey: commitSegmentOpts.EncryptedKey,
|
|
|
|
|
|
|
|
EncryptedSize: commitSegmentOpts.EncryptedSize,
|
|
|
|
PlainSize: commitSegmentOpts.PlainSize,
|
|
|
|
PlainOffset: commitSegmentOpts.PlainOffset,
|
|
|
|
EncryptedETag: commitSegmentOpts.EncryptedETag,
|
|
|
|
|
|
|
|
Redundancy: commitSegmentOpts.Redundancy,
|
|
|
|
|
|
|
|
InlineData: nil,
|
|
|
|
Pieces: commitSegmentOpts.Pieces,
|
|
|
|
|
|
|
|
Placement: segment.Placement,
|
|
|
|
})
|
2021-04-26 10:35:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
coOpts := metabase.CommitObject{
|
|
|
|
ObjectStream: obj,
|
|
|
|
}
|
|
|
|
if co.CommitObject != nil {
|
|
|
|
coOpts = *co.CommitObject
|
|
|
|
}
|
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
createdObject := CommitObject{
|
2021-04-26 10:35:44 +01:00
|
|
|
Opts: coOpts,
|
|
|
|
}.Check(ctx, t, db)
|
2022-02-24 14:26:05 +00:00
|
|
|
|
|
|
|
return createdObject, createdSegments
|
2021-04-26 10:35:44 +01:00
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
|
|
|
// CreateObjectCopy is for testing object copy.
|
|
|
|
type CreateObjectCopy struct {
|
2022-02-25 11:12:37 +00:00
|
|
|
OriginalObject metabase.Object
|
|
|
|
// if empty, creates fake segments if necessary
|
|
|
|
OriginalSegments []metabase.Segment
|
2022-01-27 00:01:03 +00:00
|
|
|
FinishObject *metabase.FinishCopyObject
|
|
|
|
CopyObjectStream *metabase.ObjectStream
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run creates the copy.
|
|
|
|
func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB) (metabase.Object, []metabase.RawSegment) {
|
|
|
|
|
|
|
|
var copyStream metabase.ObjectStream
|
|
|
|
if cc.CopyObjectStream != nil {
|
|
|
|
copyStream = *cc.CopyObjectStream
|
|
|
|
} else {
|
|
|
|
copyStream = RandObjectStream()
|
|
|
|
}
|
|
|
|
|
|
|
|
newEncryptedKeysNonces := make([]metabase.EncryptedKeyAndNonce, cc.OriginalObject.SegmentCount)
|
2022-02-25 11:12:37 +00:00
|
|
|
newSegments := make([]metabase.RawSegment, cc.OriginalObject.SegmentCount)
|
2022-01-27 00:01:03 +00:00
|
|
|
expectedEncryptedSize := 1060
|
|
|
|
|
|
|
|
for i := 0; i < int(cc.OriginalObject.SegmentCount); i++ {
|
|
|
|
newEncryptedKeysNonces[i] = metabase.EncryptedKeyAndNonce{
|
|
|
|
Position: metabase.SegmentPosition{Index: uint32(i)},
|
|
|
|
EncryptedKeyNonce: testrand.Nonce().Bytes(),
|
|
|
|
EncryptedKey: testrand.Bytes(32),
|
|
|
|
}
|
|
|
|
|
2022-02-25 11:12:37 +00:00
|
|
|
var originalSegment metabase.RawSegment
|
|
|
|
if len(cc.OriginalSegments) == 0 {
|
|
|
|
originalSegment = DefaultRawSegment(cc.OriginalObject.ObjectStream, metabase.SegmentPosition{Index: uint32(i)})
|
|
|
|
// TODO: place this calculation in metabasetest.
|
|
|
|
originalSegment.PlainOffset = int64(i) * int64(originalSegment.PlainSize)
|
|
|
|
// TODO: we should use the same value for encrypted size in both test methods.
|
|
|
|
originalSegment.EncryptedSize = int32(expectedEncryptedSize)
|
|
|
|
} else {
|
|
|
|
originalSegment = metabase.RawSegment(cc.OriginalSegments[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
newSegment := metabase.RawSegment{
|
|
|
|
StreamID: copyStream.StreamID,
|
|
|
|
EncryptedKeyNonce: newEncryptedKeysNonces[i].EncryptedKeyNonce,
|
|
|
|
EncryptedKey: newEncryptedKeysNonces[i].EncryptedKey,
|
|
|
|
EncryptedSize: originalSegment.EncryptedSize,
|
|
|
|
Position: originalSegment.Position,
|
|
|
|
RootPieceID: originalSegment.RootPieceID,
|
|
|
|
Redundancy: originalSegment.Redundancy,
|
|
|
|
PlainSize: originalSegment.PlainSize,
|
|
|
|
PlainOffset: originalSegment.PlainOffset,
|
|
|
|
CreatedAt: time.Now().UTC(),
|
|
|
|
}
|
|
|
|
|
|
|
|
newSegments[i] = newSegment
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
opts := cc.FinishObject
|
|
|
|
if opts == nil {
|
|
|
|
opts = &metabase.FinishCopyObject{
|
|
|
|
NewStreamID: copyStream.StreamID,
|
|
|
|
NewBucket: copyStream.BucketName,
|
|
|
|
ObjectStream: cc.OriginalObject.ObjectStream,
|
|
|
|
NewSegmentKeys: newEncryptedKeysNonces,
|
|
|
|
NewEncryptedObjectKey: []byte(copyStream.ObjectKey),
|
|
|
|
NewEncryptedMetadataKeyNonce: testrand.Nonce().Bytes(),
|
|
|
|
NewEncryptedMetadataKey: testrand.Bytes(32),
|
|
|
|
}
|
|
|
|
}
|
2022-02-24 10:54:57 +00:00
|
|
|
|
2022-02-25 11:12:37 +00:00
|
|
|
copyObj, err := db.FinishCopyObject(ctx, *opts)
|
2022-02-24 10:54:57 +00:00
|
|
|
require.NoError(t, err)
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-02-25 11:12:37 +00:00
|
|
|
return copyObj, newSegments
|
|
|
|
}
|
|
|
|
|
|
|
|
// SegmentsToRaw converts a slice of Segment to a slice of RawSegment.
|
|
|
|
func SegmentsToRaw(segments []metabase.Segment) []metabase.RawSegment {
|
|
|
|
rawSegments := []metabase.RawSegment{}
|
|
|
|
|
|
|
|
for _, segment := range segments {
|
|
|
|
rawSegments = append(rawSegments, metabase.RawSegment(segment))
|
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-02-25 11:12:37 +00:00
|
|
|
return rawSegments
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|