satellite/metainfo: remove ServerSideCopyDuplicateMetadata

https://github.com/storj/storj/issues/5891

Change-Id: Ib5440169107acca6e832c2280e1ad12dfd380f28
This commit is contained in:
Michal Niewrzal 2023-08-04 11:27:08 +02:00 committed by Storj Robot
parent 9550b5f4a5
commit 7be844351d
22 changed files with 79 additions and 1479 deletions

View File

@ -1161,8 +1161,6 @@ func TestProjectBandwidthUsageWithCopies(t *testing.T) {
// this effectively disable live accounting cache
config.LiveAccounting.BandwidthCacheTTL = -1
config.LiveAccounting.AsOfSystemInterval = 0
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {

View File

@ -10,7 +10,6 @@ import (
"time"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/pb"
@ -18,7 +17,6 @@ import (
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/audit"
)
@ -84,11 +82,6 @@ func TestAuditOrderLimit(t *testing.T) {
func TestAuditSkipsRemoteCopies(t *testing.T) {
testWithRangedLoop(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, pauseQueueing pauseQueueingFunc, runQueueingOnce runQueueingOnceFunc) {
satellite := planet.Satellites[0]
audits := satellite.Audit

View File

@ -22,7 +22,6 @@ import (
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/gc/bloomfilter"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop"
@ -306,12 +305,7 @@ func TestGarbageCollectionWithCopiesWithDuplicateMetadata(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 3, 4, 4),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
),
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]

View File

@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"golang.org/x/exp/slices"
"storj.io/common/memory"
@ -235,12 +234,7 @@ func TestGracefulExit_CopiedObjects(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 3, 4, 4),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
),
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])

View File

@ -52,10 +52,6 @@ type FinishCopyObject struct {
NewSegmentKeys []EncryptedKeyAndNonce
// If set, copy the object by duplicating the metadata and
// remote_alias_pieces list, rather than using segment_copies.
DuplicateMetadata bool
// VerifyLimits holds a callback by which the caller can interrupt the copy
// if it turns out completing the copy would exceed a limit.
// It will be called only once.
@ -156,8 +152,7 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
redundancySchemes := make([]int64, sourceObject.SegmentCount)
if opts.DuplicateMetadata {
err = withRows(db.db.QueryContext(ctx, `
err = withRows(db.db.QueryContext(ctx, `
SELECT
position,
expires_at,
@ -172,75 +167,35 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
ORDER BY position ASC
LIMIT $2
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&remoteAliasPiecesLists[index],
&placementConstraints[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}
if err := rows.Err(); err != nil {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&remoteAliasPiecesLists[index],
&placementConstraints[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}
if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}
if err := rows.Err(); err != nil {
return err
}
return nil
})
} else {
err = withRows(db.db.QueryContext(ctx, `
SELECT
position,
expires_at,
root_piece_id,
encrypted_size, plain_offset, plain_size,
redundancy,
inline_data
FROM segments
WHERE stream_id = $1
ORDER BY position ASC
LIMIT $2
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}
if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}
if err := rows.Err(); err != nil {
return err
}
return nil
})
if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}
return nil
})
}
if err != nil {
return Error.New("unable to copy object: %w", err)
}
@ -351,19 +306,6 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
return nil
}
if !opts.DuplicateMetadata {
_, err = tx.ExecContext(ctx, `
INSERT INTO segment_copies (
stream_id, ancestor_stream_id
) VALUES (
$1, $2
)
`, opts.NewStreamID, ancestorStreamID)
if err != nil {
return Error.New("unable to copy object: %w", err)
}
}
return nil
})
@ -387,7 +329,6 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
// Fetch the following in a single query:
// - object at copy source location (error if it's not there)
// - source ancestor stream id (if any)
// - next version available
// - object at copy destination location (if any).
func getObjectAtCopySourceAndDestination(
@ -395,7 +336,6 @@ func getObjectAtCopySourceAndDestination(
) (sourceObject Object, ancestorStreamID uuid.UUID, destinationObject *Object, nextAvailableVersion Version, err error) {
defer mon.Task()(&ctx)(&err)
var ancestorStreamIDBytes []byte
var highestVersion Version
sourceObject.ProjectID = opts.ProjectID
@ -422,11 +362,9 @@ func getObjectAtCopySourceAndDestination(
encrypted_metadata,
total_plain_size, total_encrypted_size, fixed_segment_size,
encryption,
segment_copies.ancestor_stream_id,
0,
coalesce((SELECT max(version) FROM destination_current_versions),0) AS highest_version
FROM objects
LEFT JOIN segment_copies ON objects.stream_id = segment_copies.stream_id
WHERE
project_id = $1 AND
bucket_name = $3 AND
@ -441,7 +379,6 @@ func getObjectAtCopySourceAndDestination(
NULL,
total_plain_size, total_encrypted_size, fixed_segment_size,
encryption,
NULL,
version,
(SELECT max(version) FROM destination_current_versions) AS highest_version
FROM objects
@ -472,7 +409,6 @@ func getObjectAtCopySourceAndDestination(
&sourceObject.EncryptedMetadata,
&sourceObject.TotalPlainSize, &sourceObject.TotalEncryptedSize, &sourceObject.FixedSegmentSize,
encryptionParameters{&sourceObject.Encryption},
&ancestorStreamIDBytes,
&highestVersion,
&highestVersion,
)
@ -483,19 +419,7 @@ func getObjectAtCopySourceAndDestination(
return Object{}, uuid.UUID{}, nil, 0, ErrObjectNotFound.New("object was changed during copy")
}
if len(ancestorStreamIDBytes) != 0 {
// Source object already was a copy, the new copy becomes yet another copy of the existing ancestor
ancestorStreamID, err = uuid.FromBytes(ancestorStreamIDBytes)
if err != nil {
return Object{}, uuid.UUID{}, nil, 0, err
}
} else {
// Source object was not a copy, it will now become an ancestor (unless it has only inline segments)
ancestorStreamID = sourceObject.StreamID
}
if rows.Next() {
var _bogusBytes []byte
destinationObject = &Object{}
destinationObject.ProjectID = opts.ProjectID
destinationObject.BucketName = opts.NewBucket
@ -509,7 +433,6 @@ func getObjectAtCopySourceAndDestination(
&destinationObject.EncryptedMetadata,
&destinationObject.TotalPlainSize, &destinationObject.TotalEncryptedSize, &destinationObject.FixedSegmentSize,
encryptionParameters{&destinationObject.Encryption},
&_bogusBytes,
&destinationObject.Version,
&highestVersion,
)

File diff suppressed because it is too large Load Diff

View File

@ -297,7 +297,6 @@ func TestDeleteBucketObjectsCancel(t *testing.T) {
}
func TestDeleteBucketWithCopies(t *testing.T) {
duplicateMetadata := true
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
for _, numberOfSegments := range []int{0, 1, 3} {
t.Run(fmt.Sprintf("%d segments", numberOfSegments), func(t *testing.T) {
@ -322,7 +321,7 @@ func TestDeleteBucketWithCopies(t *testing.T) {
metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
CopyObjectStream: &copyObjectStream,
}.Run(ctx, t, db, duplicateMetadata)
}.Run(ctx, t, db)
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: metabase.BucketLocation{
@ -363,7 +362,7 @@ func TestDeleteBucketWithCopies(t *testing.T) {
copyObj, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
CopyObjectStream: &copyObjectStream,
}.Run(ctx, t, db, duplicateMetadata)
}.Run(ctx, t, db)
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: metabase.BucketLocation{
@ -421,12 +420,12 @@ func TestDeleteBucketWithCopies(t *testing.T) {
metabasetest.CreateObjectCopy{
OriginalObject: originalObj1,
CopyObjectStream: &copyObjectStream1,
}.Run(ctx, t, db, duplicateMetadata)
}.Run(ctx, t, db)
copyObj2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj2,
CopyObjectStream: &copyObjectStream2,
}.Run(ctx, t, db, duplicateMetadata)
}.Run(ctx, t, db)
// done preparing, delete bucket 1
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
@ -487,12 +486,12 @@ func TestDeleteBucketWithCopies(t *testing.T) {
metabasetest.CreateObjectCopy{
OriginalObject: originalObj1,
CopyObjectStream: &copyObjectStream1,
}.Run(ctx, t, db, duplicateMetadata)
}.Run(ctx, t, db)
copyObj2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj2,
CopyObjectStream: &copyObjectStream2,
}.Run(ctx, t, db, duplicateMetadata)
}.Run(ctx, t, db)
// done preparing, delete bucket 1
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{

View File

@ -753,7 +753,7 @@ func TestDeleteCopyWithDuplicateMetadata(t *testing.T) {
copyObj, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
// check that copy went OK
metabasetest.Verify{
@ -798,10 +798,10 @@ func TestDeleteCopyWithDuplicateMetadata(t *testing.T) {
copyObject1, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
@ -838,7 +838,7 @@ func TestDeleteCopyWithDuplicateMetadata(t *testing.T) {
copyObject, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
@ -878,10 +878,10 @@ func TestDeleteCopyWithDuplicateMetadata(t *testing.T) {
copyObject1, _, copySegments1 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
_, err := db.DeleteObjectExactVersion(ctx, metabase.DeleteObjectExactVersion{
Version: originalObj.Version,

View File

@ -41,12 +41,6 @@ func (s Segment) Inline() bool {
return s.Redundancy.IsZero() && len(s.Pieces) == 0
}
// PiecesInAncestorSegment returns true if remote alias pieces are to be found in an ancestor segment.
// TODO we will remove this method and related to code when all metadata will be migrated to segment copies.
func (s Segment) PiecesInAncestorSegment() bool {
return s.EncryptedSize != 0 && len(s.InlineData) == 0 && len(s.Pieces) == 0
}
// Expired checks if segment is expired relative to now.
func (s Segment) Expired(now time.Time) bool {
return s.ExpiresAt != nil && s.ExpiresAt.Before(now)
@ -271,13 +265,6 @@ func (db *DB) GetSegmentByPosition(ctx context.Context, opts GetSegmentByPositio
segment.StreamID = opts.StreamID
segment.Position = opts.Position
if db.config.ServerSideCopy {
err = db.updateWithAncestorSegment(ctx, &segment)
if err != nil {
return Segment{}, err
}
}
return segment, nil
}
@ -342,52 +329,9 @@ func (db *DB) GetLatestObjectLastSegment(ctx context.Context, opts GetLatestObje
}
}
if db.config.ServerSideCopy {
err = db.updateWithAncestorSegment(ctx, &segment)
if err != nil {
return Segment{}, err
}
}
return segment, nil
}
func (db *DB) updateWithAncestorSegment(ctx context.Context, segment *Segment) (err error) {
if !segment.PiecesInAncestorSegment() {
return nil
}
var aliasPieces AliasPieces
err = db.db.QueryRowContext(ctx, `
SELECT
root_piece_id,
repaired_at,
remote_alias_pieces
FROM segments
WHERE
stream_id IN (SELECT ancestor_stream_id FROM segment_copies WHERE stream_id = $1)
AND position = $2
`, segment.StreamID, segment.Position.Encode()).Scan(
&segment.RootPieceID,
&segment.RepairedAt,
&aliasPieces,
)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrSegmentNotFound.New("segment missing")
}
return Error.New("unable to query segment: %w", err)
}
segment.Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
if err != nil {
return Error.New("unable to convert aliases to pieces: %w", err)
}
return nil
}
// BucketEmpty contains arguments necessary for checking if bucket is empty.
type BucketEmpty struct {
ProjectID uuid.UUID

View File

@ -337,52 +337,6 @@ func TestGetObjectLastCommitted(t *testing.T) {
}}.Check(ctx, t, db)
})
t.Run("Get latest copied object version", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
copyObjStream := metabasetest.RandObjectStream()
originalObject := metabasetest.CreateObject(ctx, t, db, obj, 0)
copiedObj, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: originalObject,
CopyObjectStream: &copyObjStream,
}.Run(ctx, t, db, false)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
Version: 1,
ObjectLocation: obj.Location(),
},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{originalObject},
},
}.Check(ctx, t, db)
metabasetest.GetObjectLastCommitted{
Opts: metabase.GetObjectLastCommitted{
ObjectLocation: copiedObj.Location(),
},
Result: copiedObj,
}.Check(ctx, t, db)
metabasetest.Verify{Objects: []metabase.RawObject{
{
ObjectStream: metabase.ObjectStream{
ProjectID: copiedObj.ProjectID,
BucketName: copiedObj.BucketName,
ObjectKey: copiedObj.ObjectKey,
Version: copiedObj.Version,
StreamID: copiedObj.StreamID,
},
CreatedAt: now,
Status: metabase.Committed,
Encryption: metabasetest.DefaultEncryption,
EncryptedMetadata: copiedObj.EncryptedMetadata,
EncryptedMetadataNonce: copiedObj.EncryptedMetadataNonce,
EncryptedMetadataEncryptedKey: copiedObj.EncryptedMetadataEncryptedKey,
},
}}.Check(ctx, t, db)
})
t.Run("Get latest copied object version with duplicate metadata", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
copyObjStream := metabasetest.RandObjectStream()
@ -391,7 +345,7 @@ func TestGetObjectLastCommitted(t *testing.T) {
copiedObj, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: originalObject,
CopyObjectStream: &copyObjStream,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
@ -621,33 +575,12 @@ func TestGetSegmentByPosition(t *testing.T) {
Redundancy: metabasetest.DefaultRedundancy,
}
expectedCopiedSegmentRaw := metabase.Segment{
StreamID: copyObjStream.StreamID,
Position: metabase.SegmentPosition{
Index: 0,
},
CreatedAt: obj.CreatedAt,
ExpiresAt: obj.ExpiresAt,
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{},
EncryptedKey: newEncryptedKeyNonces[0].EncryptedKey,
EncryptedKeyNonce: newEncryptedKeyNonces[0].EncryptedKeyNonce,
EncryptedSize: 1024,
PlainSize: 512,
Redundancy: metabasetest.DefaultRedundancy,
InlineData: []byte{},
}
expectedCopiedSegmentGet := expectedSegment
expectedCopiedSegmentGet.EncryptedETag = nil
expectedCopiedSegmentGet.StreamID = copyObjStream.StreamID
expectedCopiedSegmentGet.EncryptedKey = newEncryptedKeyNonces[0].EncryptedKey
expectedCopiedSegmentGet.EncryptedKeyNonce = newEncryptedKeyNonces[0].EncryptedKeyNonce
expectedCopiedSegmentGet.InlineData = []byte{}
expectedCopiedSegment := expectedSegment
expectedCopiedSegment.StreamID = copyObjStream.StreamID
expectedCopiedSegment.EncryptedETag = nil
expectedCopiedSegment.EncryptedKey = newEncryptedKeyNonces[0].EncryptedKey
expectedCopiedSegment.EncryptedKeyNonce = newEncryptedKeyNonces[0].EncryptedKeyNonce
expectedCopiedSegment.InlineData = []byte{}
metabasetest.GetSegmentByPosition{
Opts: metabase.GetSegmentByPosition{
@ -666,7 +599,7 @@ func TestGetSegmentByPosition(t *testing.T) {
Index: 0,
},
},
Result: expectedCopiedSegmentGet,
Result: expectedCopiedSegment,
}.Check(ctx, t, db)
metabasetest.Verify{
@ -701,13 +634,8 @@ func TestGetSegmentByPosition(t *testing.T) {
},
Segments: []metabase.RawSegment{
metabase.RawSegment(expectedSegment),
metabase.RawSegment(expectedCopiedSegmentRaw),
metabase.RawSegment(expectedCopiedSegment),
},
Copies: []metabase.RawCopy{
{
StreamID: copyObjStream.StreamID,
AncestorStreamID: objStream.StreamID,
}},
}.Check(ctx, t, db)
})
@ -871,7 +799,6 @@ func TestGetSegmentByPosition(t *testing.T) {
metabase.RawSegment(expectedSegment),
metabase.RawSegment(expectedCopiedSegmentRaw),
},
Copies: nil,
}.Check(ctx, t, db)
})
@ -1047,7 +974,6 @@ func TestGetSegmentByPosition(t *testing.T) {
metabase.RawSegment(expectedSegment),
metabase.RawSegment(expectedCopiedSegmentRaw),
},
Copies: nil,
}.Check(ctx, t, db)
})
})
@ -1160,7 +1086,7 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
copyObj, _, newSegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, false)
}.Run(ctx, t, db)
metabasetest.GetLatestObjectLastSegment{
Opts: metabase.GetLatestObjectLastSegment{
@ -1189,10 +1115,6 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
metabase.RawObject(copyObj),
},
Segments: append(metabasetest.SegmentsToRaw(originalSegments), newSegments...),
Copies: []metabase.RawCopy{{
StreamID: copyObj.StreamID,
AncestorStreamID: originalObj.StreamID,
}},
}.Check(ctx, t, db)
})
@ -1212,7 +1134,7 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
copyObj, _, newSegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
metabasetest.GetLatestObjectLastSegment{
Opts: metabase.GetLatestObjectLastSegment{
@ -1401,7 +1323,6 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
metabase.RawSegment(expectedSegment),
metabase.RawSegment(expectedCopiedSegmentRaw),
},
Copies: nil,
}.Check(ctx, t, db)
})
@ -1573,7 +1494,6 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
metabase.RawSegment(expectedSegment),
metabase.RawSegment(expectedCopiedSegmentRaw),
},
Copies: nil,
}.Check(ctx, t, db)
})
})

View File

@ -20,10 +20,6 @@ type ListSegments struct {
Limit int
Range *StreamRange
// This causes ListSegments to update the first Segment in the response
// with the ancestor info if it exists and server side copy is enabled.
UpdateFirstWithAncestor bool
}
// ListSegmentsResult result of listing segments.

View File

@ -282,7 +282,7 @@ func TestListSegments(t *testing.T) {
_, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObject,
CopyObjectStream: &copyStream,
}.Run(ctx, t, db, true)
}.Run(ctx, t, db)
expectedSegments := []metabase.Segment{}
for _, segment := range copySegments {
@ -304,8 +304,7 @@ func TestListSegments(t *testing.T) {
metabasetest.ListSegments{
Opts: metabase.ListSegments{
StreamID: copyStream.StreamID,
UpdateFirstWithAncestor: true,
StreamID: copyStream.StreamID,
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments,

View File

@ -40,8 +40,6 @@ func (step Verify) Check(ctx *testcontext.Context, t testing.TB, db *metabase.DB
sortRawPendingObjects(step.PendingObjects)
sortRawSegments(state.Segments)
sortRawSegments(step.Segments)
sortRawCopies(state.Copies)
sortRawCopies(step.Copies)
diff := cmp.Diff(metabase.RawState(step), *state,
DefaultTimeDiff(),
@ -85,12 +83,6 @@ func sortRawSegments(segments []metabase.RawSegment) {
})
}
func sortRawCopies(copies []metabase.RawCopy) {
sort.Slice(copies, func(i, j int) bool {
return copies[i].StreamID.Less(copies[j].StreamID)
})
}
func checkError(t require.TestingT, err error, errClass *errs.Class, errText string) {
if errClass != nil {
require.True(t, errClass.Has(err), "expected an error %v got %v", *errClass, err)

View File

@ -321,10 +321,7 @@ type CreateObjectCopy struct {
}
// Run creates the copy.
//
// The duplicateMetadata argument is a hack and it will be great to get rid of it once
// duplicateMetadata is no longer an option.
func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB, duplicateMetadata bool) (copyObj metabase.Object, expectedOriginalSegments []metabase.RawSegment, expectedCopySegments []metabase.RawSegment) {
func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB) (copyObj metabase.Object, expectedOriginalSegments []metabase.RawSegment, expectedCopySegments []metabase.RawSegment) {
var copyStream metabase.ObjectStream
if cc.CopyObjectStream != nil {
copyStream = *cc.CopyObjectStream
@ -364,10 +361,8 @@ func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metab
expectedCopySegments[i].InlineData = []byte{}
}
if duplicateMetadata {
expectedCopySegments[i].Pieces = make(metabase.Pieces, len(expectedOriginalSegments[i].Pieces))
copy(expectedCopySegments[i].Pieces, expectedOriginalSegments[i].Pieces)
}
expectedCopySegments[i].Pieces = make(metabase.Pieces, len(expectedOriginalSegments[i].Pieces))
copy(expectedCopySegments[i].Pieces, expectedOriginalSegments[i].Pieces)
}
opts := cc.FinishObject
@ -382,7 +377,6 @@ func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metab
NewEncryptedMetadataKey: testrand.Bytes(32),
}
}
opts.DuplicateMetadata = duplicateMetadata
copyObj, err := db.FinishCopyObject(ctx, *opts)
require.NoError(t, err)

View File

@ -99,7 +99,6 @@ type RawState struct {
Objects []RawObject
PendingObjects []RawPendingObject
Segments []RawSegment
Copies []RawCopy
}
// TestingGetState returns the state of the database.
@ -121,11 +120,6 @@ func (db *DB) TestingGetState(ctx context.Context) (_ *RawState, err error) {
return nil, Error.New("GetState: %w", err)
}
state.Copies, err = db.testingGetAllCopies(ctx)
if err != nil {
return nil, Error.New("GetState: %w", err)
}
return state, nil
}
@ -135,7 +129,6 @@ func (db *DB) TestingDeleteAll(ctx context.Context) (err error) {
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM objects;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM pending_objects;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM segments;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM segment_copies;
WITH ignore_full_scan_for_test AS (SELECT 1) DELETE FROM node_aliases;
WITH ignore_full_scan_for_test AS (SELECT 1) SELECT setval('node_alias_seq', 1, false);
`)
@ -325,39 +318,3 @@ func (db *DB) testingGetAllSegments(ctx context.Context) (_ []RawSegment, err er
}
return segs, nil
}
// testingGetAllCopies returns the state of the database.
func (db *DB) testingGetAllCopies(ctx context.Context) (_ []RawCopy, err error) {
copies := []RawCopy{}
rows, err := db.db.QueryContext(ctx, `
WITH ignore_full_scan_for_test AS (SELECT 1)
SELECT
stream_id, ancestor_stream_id
FROM segment_copies
ORDER BY stream_id ASC, ancestor_stream_id ASC
`)
if err != nil {
return nil, Error.New("testingGetAllCopies query: %w", err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var copy RawCopy
err := rows.Scan(
&copy.StreamID,
&copy.AncestorStreamID,
)
if err != nil {
return nil, Error.New("testingGetAllCopies scan failed: %w", err)
}
copies = append(copies, copy)
}
if err := rows.Err(); err != nil {
return nil, Error.New("testingGetAllCopies scan failed: %w", err)
}
if len(copies) == 0 {
return nil, nil
}
return copies, nil
}

View File

@ -31,7 +31,7 @@ func (db *DB) GetStreamPieceCountByNodeID(ctx context.Context, opts GetStreamPie
err = withRows(db.db.QueryContext(ctx, `
SELECT remote_alias_pieces
FROM segments
WHERE stream_id IN (SELECT $1 UNION SELECT ancestor_stream_id FROM segment_copies WHERE stream_id = $1) AND remote_alias_pieces IS NOT null
WHERE stream_id = $1 AND remote_alias_pieces IS NOT null
`, opts.StreamID))(func(rows tagsql.Rows) error {
for rows.Next() {
var aliasPieces AliasPieces

View File

@ -193,7 +193,7 @@ func TestGetStreamPieceCountByNodeID(t *testing.T) {
_, _, _ = metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
CopyObjectStream: &copyStream,
}.Run(ctx, t, db, false)
}.Run(ctx, t, db)
metabasetest.GetStreamPieceCountByNodeID{
Opts: metabase.GetStreamPieceCountByNodeID{

View File

@ -143,9 +143,8 @@ type Config struct {
ProjectLimits ProjectLimitConfig `help:"project limit configuration"`
// TODO remove this flag when server-side copy implementation will be finished
ServerSideCopy bool `help:"enable code for server-side copy, deprecated. please leave this to true." default:"true"`
ServerSideCopyDisabled bool `help:"disable already enabled server-side copy. this is because once server side copy is enabled, delete code should stay changed, even if you want to disable server side copy" default:"false"`
ServerSideCopyDuplicateMetadata bool `help:"perform server-side copy by duplicating metadata, instead of using segment_copies" default:"false"`
ServerSideCopy bool `help:"enable code for server-side copy, deprecated. please leave this to true." default:"true"`
ServerSideCopyDisabled bool `help:"disable already enabled server-side copy. this is because once server side copy is enabled, delete code should stay changed, even if you want to disable server side copy" default:"false"`
UsePendingObjectsTable bool `help:"enable new flow for upload which is using pending_objects table" default:"false"`

View File

@ -477,8 +477,6 @@ func (endpoint *Endpoint) DownloadObject(ctx context.Context, req *pb.ObjectDown
StreamID: object.StreamID,
Range: streamRange,
Limit: int(req.Limit),
UpdateFirstWithAncestor: true,
})
if err != nil {
return nil, endpoint.convertMetabaseErr(err)
@ -1958,7 +1956,7 @@ func (endpoint *Endpoint) FinishCopyObject(ctx context.Context, req *pb.ObjectFi
NewEncryptedMetadata: req.NewEncryptedMetadata,
NewEncryptedMetadataKeyNonce: req.NewEncryptedMetadataKeyNonce,
NewEncryptedMetadataKey: req.NewEncryptedMetadataKey,
DuplicateMetadata: endpoint.config.ServerSideCopyDuplicateMetadata,
VerifyLimits: func(encryptedObjectSize int64, nSegments int64) error {
return endpoint.addStorageUsageUpToLimit(ctx, keyInfo.ProjectID, encryptedObjectSize, nSegments)
},

View File

@ -841,7 +841,6 @@ func TestEndpoint_Object_With_StorageNodes(t *testing.T) {
Reconfigure: testplanet.Reconfigure{
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
config.Overlay.GeoIP.MockCountries = []string{"DE"}
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
@ -2197,11 +2196,6 @@ func TestEndpoint_UpdateObjectMetadata(t *testing.T) {
func TestEndpoint_Object_CopyObject(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
checkDownload := func(objectKey string, expectedData []byte) {
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "multipleversions", objectKey)

View File

@ -191,7 +191,7 @@ func TestIdentifyIrreparableSegmentsObserver(t *testing.T) {
})
}
func TestIgnoringCopiedSegmentsObserver(t *testing.T) {
func TestObserver_CheckSegmentCopy(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
@ -241,15 +241,16 @@ func TestIgnoringCopiedSegmentsObserver(t *testing.T) {
_, err = rangedLoopService.RunOnce(ctx)
require.NoError(t, err)
// check that injured segment in repair queue streamID is same that in original segment.
injuredSegment, err := repairQueue.Select(ctx)
require.NoError(t, err)
require.Equal(t, segments[0].StreamID, injuredSegment.StreamID)
// check that repair queue has original segment and copied one as it has exactly the same metadata
for _, segment := range segmentsAfterCopy {
injuredSegment, err := repairQueue.Select(ctx)
require.NoError(t, err)
require.Equal(t, segment.StreamID, injuredSegment.StreamID)
}
// check that repair queue has only original segment, and not copied one.
injuredSegments, err := repairQueue.Count(ctx)
require.NoError(t, err)
require.Equal(t, 1, injuredSegments)
require.Equal(t, 2, injuredSegments)
})
}

View File

@ -691,9 +691,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# disable already enabled server-side copy. this is because once server side copy is enabled, delete code should stay changed, even if you want to disable server side copy
# metainfo.server-side-copy-disabled: false
# perform server-side copy by duplicating metadata, instead of using segment_copies
# metainfo.server-side-copy-duplicate-metadata: false
# test the new query for non-recursive listing
# metainfo.test-listing-query: false