adf687aebb
Add FullIterateObjects that iterates over all objects in the metabase. Change-Id: I4b8dbda32dfce2d7729e9574af79e4f20faed98a
284 lines
7.6 KiB
Go
284 lines
7.6 KiB
Go
// Copyright (C) 2021 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package metabase_test
|
|
|
|
import (
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"storj.io/common/testcontext"
|
|
"storj.io/common/testrand"
|
|
"storj.io/common/uuid"
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
|
)
|
|
|
|
func TestFullIterateObjects(t *testing.T) {
|
|
All(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
|
|
t.Run("Limit is negative", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: -1,
|
|
},
|
|
ErrClass: &metabase.ErrInvalidRequest,
|
|
ErrText: "BatchSize is negative",
|
|
}.Check(ctx, t, db)
|
|
Verify{}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("no data", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: 0,
|
|
},
|
|
Result: nil,
|
|
}.Check(ctx, t, db)
|
|
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: 10,
|
|
},
|
|
Result: nil,
|
|
}.Check(ctx, t, db)
|
|
|
|
Verify{}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("pending and committed", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
|
|
now := time.Now()
|
|
|
|
pending := randObjectStream()
|
|
committed := randObjectStream()
|
|
committed.ProjectID = pending.ProjectID
|
|
committed.BucketName = pending.BucketName + "z"
|
|
|
|
BeginObjectExactVersion{
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
ObjectStream: pending,
|
|
Encryption: defaultTestEncryption,
|
|
},
|
|
Version: 1,
|
|
}.Check(ctx, t, db)
|
|
|
|
encryptedMetadata := testrand.Bytes(1024)
|
|
encryptedMetadataNonce := testrand.Nonce()
|
|
encryptedMetadataKey := testrand.Bytes(265)
|
|
|
|
BeginObjectExactVersion{
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
ObjectStream: committed,
|
|
Encryption: defaultTestEncryption,
|
|
},
|
|
Version: 1,
|
|
}.Check(ctx, t, db)
|
|
CommitObject{
|
|
Opts: metabase.CommitObject{
|
|
ObjectStream: committed,
|
|
EncryptedMetadataNonce: encryptedMetadataNonce[:],
|
|
EncryptedMetadata: encryptedMetadata,
|
|
EncryptedMetadataEncryptedKey: encryptedMetadataKey,
|
|
},
|
|
}.Check(ctx, t, db)
|
|
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: 1,
|
|
},
|
|
Result: []metabase.FullObjectEntry{
|
|
{
|
|
ObjectStream: pending,
|
|
CreatedAt: now,
|
|
Status: metabase.Pending,
|
|
Encryption: defaultTestEncryption,
|
|
},
|
|
{
|
|
ObjectStream: committed,
|
|
CreatedAt: now,
|
|
Status: metabase.Committed,
|
|
Encryption: defaultTestEncryption,
|
|
EncryptedMetadataNonce: encryptedMetadataNonce[:],
|
|
EncryptedMetadata: encryptedMetadata,
|
|
EncryptedMetadataEncryptedKey: encryptedMetadataKey,
|
|
},
|
|
},
|
|
}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("less objects than limit", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
numberOfObjects := 3
|
|
limit := 10
|
|
expected := make([]metabase.FullObjectEntry, numberOfObjects)
|
|
objects := createObjects(ctx, t, db, numberOfObjects, uuid.UUID{1}, "mybucket")
|
|
for i, obj := range objects {
|
|
expected[i] = fullObjectEntryFromRaw(obj)
|
|
}
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: limit,
|
|
},
|
|
Result: expected,
|
|
}.Check(ctx, t, db)
|
|
Verify{Objects: objects}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("more objects than limit", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
numberOfObjects := 10
|
|
limit := 3
|
|
expected := make([]metabase.FullObjectEntry, numberOfObjects)
|
|
objects := createObjects(ctx, t, db, numberOfObjects, uuid.UUID{1}, "mybucket")
|
|
for i, obj := range objects {
|
|
expected[i] = fullObjectEntryFromRaw(obj)
|
|
}
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: limit,
|
|
},
|
|
Result: expected,
|
|
}.Check(ctx, t, db)
|
|
Verify{Objects: objects}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("recursive", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
projectID, bucketName := uuid.UUID{1}, "bucky"
|
|
|
|
objects := createFullObjectsWithKeys(ctx, t, db, projectID, bucketName, []metabase.ObjectKey{
|
|
"a",
|
|
"b/1",
|
|
"b/2",
|
|
"b/3",
|
|
"c",
|
|
"c/",
|
|
"c//",
|
|
"c/1",
|
|
"g",
|
|
})
|
|
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: 3,
|
|
},
|
|
Result: []metabase.FullObjectEntry{
|
|
objects["a"],
|
|
objects["b/1"],
|
|
objects["b/2"],
|
|
objects["b/3"],
|
|
objects["c"],
|
|
objects["c/"],
|
|
objects["c//"],
|
|
objects["c/1"],
|
|
objects["g"],
|
|
},
|
|
}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("multiple projects", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
|
|
projects := []uuid.UUID{}
|
|
for i := 0; i < 10; i++ {
|
|
p := testrand.UUID()
|
|
p[0] = byte(i)
|
|
projects = append(projects, p)
|
|
}
|
|
bucketNames := strings.Split("abcde", "")
|
|
|
|
expected := make([]metabase.FullObjectEntry, 0, len(projects)*len(bucketNames))
|
|
for _, projectID := range projects {
|
|
for _, bucketName := range bucketNames {
|
|
rawObjects := createObjects(ctx, t, db, 1, projectID, bucketName)
|
|
for _, obj := range rawObjects {
|
|
expected = append(expected, fullObjectEntryFromRaw(obj))
|
|
}
|
|
}
|
|
}
|
|
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: 3,
|
|
},
|
|
Result: expected,
|
|
}.Check(ctx, t, db)
|
|
})
|
|
|
|
t.Run("multiple projects", func(t *testing.T) {
|
|
defer DeleteAll{}.Check(ctx, t, db)
|
|
|
|
projects := []uuid.UUID{}
|
|
for i := 0; i < 10; i++ {
|
|
p := testrand.UUID()
|
|
p[0] = byte(i)
|
|
projects = append(projects, p)
|
|
}
|
|
bucketNames := strings.Split("abcde", "")
|
|
|
|
expected := make([]metabase.FullObjectEntry, 0, len(projects)*len(bucketNames))
|
|
for _, projectID := range projects {
|
|
for _, bucketName := range bucketNames {
|
|
obj := randObjectStream()
|
|
obj.ProjectID = projectID
|
|
obj.BucketName = bucketName
|
|
for version := 1; version < 4; version++ {
|
|
obj.Version = metabase.Version(version)
|
|
rawObject := createObject(ctx, t, db, obj, 0)
|
|
expected = append(expected, fullObjectEntryFromRaw(metabase.RawObject(rawObject)))
|
|
}
|
|
}
|
|
}
|
|
|
|
FullIterateObjects{
|
|
Opts: metabase.FullIterateObjects{
|
|
BatchSize: 2,
|
|
},
|
|
Result: expected,
|
|
}.Check(ctx, t, db)
|
|
})
|
|
})
|
|
}
|
|
|
|
func createFullObjectsWithKeys(ctx *testcontext.Context, t *testing.T, db *metabase.DB, projectID uuid.UUID, bucketName string, keys []metabase.ObjectKey) map[metabase.ObjectKey]metabase.FullObjectEntry {
|
|
objects := make(map[metabase.ObjectKey]metabase.FullObjectEntry, len(keys))
|
|
for _, key := range keys {
|
|
obj := randObjectStream()
|
|
obj.ProjectID = projectID
|
|
obj.BucketName = bucketName
|
|
obj.ObjectKey = key
|
|
now := time.Now()
|
|
|
|
createObject(ctx, t, db, obj, 0)
|
|
|
|
objects[key] = metabase.FullObjectEntry{
|
|
ObjectStream: obj,
|
|
CreatedAt: now,
|
|
Status: metabase.Committed,
|
|
Encryption: defaultTestEncryption,
|
|
}
|
|
}
|
|
|
|
return objects
|
|
}
|
|
func fullObjectEntryFromRaw(m metabase.RawObject) metabase.FullObjectEntry {
|
|
return metabase.FullObjectEntry{
|
|
ObjectStream: m.ObjectStream,
|
|
CreatedAt: m.CreatedAt,
|
|
ExpiresAt: m.ExpiresAt,
|
|
Status: m.Status,
|
|
SegmentCount: m.SegmentCount,
|
|
EncryptedMetadataNonce: m.EncryptedMetadataNonce,
|
|
EncryptedMetadata: m.EncryptedMetadata,
|
|
EncryptedMetadataEncryptedKey: m.EncryptedMetadataEncryptedKey,
|
|
TotalEncryptedSize: m.TotalEncryptedSize,
|
|
FixedSegmentSize: m.FixedSegmentSize,
|
|
Encryption: m.Encryption,
|
|
ZombieDeletionDeadline: m.ZombieDeletionDeadline,
|
|
}
|
|
}
|