satellite/metabase: remove segment_copies support from ListSegments
We don't need to support segment copies with references anymore. We migrated to copies where all metadata are copied from original segment to copy. https://github.com/storj/storj/issues/5891 Change-Id: Ic91dc21b0386ddf5c51aea45530024cd463e8ba9
This commit is contained in:
parent
6c3300c522
commit
03f8ad323d
@ -1161,6 +1161,8 @@ func TestProjectBandwidthUsageWithCopies(t *testing.T) {
|
||||
// this effectively disable live accounting cache
|
||||
config.LiveAccounting.BandwidthCacheTTL = -1
|
||||
config.LiveAccounting.AsOfSystemInterval = 0
|
||||
|
||||
config.Metainfo.ServerSideCopyDuplicateMetadata = true
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/private/dbutil/pgutil"
|
||||
"storj.io/private/tagsql"
|
||||
)
|
||||
|
||||
@ -121,69 +120,6 @@ func (db *DB) ListSegments(ctx context.Context, opts ListSegments) (result ListS
|
||||
return ListSegmentsResult{}, Error.New("unable to fetch object segments: %w", err)
|
||||
}
|
||||
|
||||
if db.config.ServerSideCopy {
|
||||
copies := make([]Segment, 0, len(result.Segments))
|
||||
copiesPositions := make([]int64, 0, len(result.Segments))
|
||||
for _, segment := range result.Segments {
|
||||
if segment.PiecesInAncestorSegment() {
|
||||
copies = append(copies, segment)
|
||||
copiesPositions = append(copiesPositions, int64(segment.Position.Encode()))
|
||||
}
|
||||
}
|
||||
|
||||
if len(copies) > 0 {
|
||||
index := 0
|
||||
err = withRows(db.db.QueryContext(ctx, `
|
||||
SELECT
|
||||
root_piece_id,
|
||||
remote_alias_pieces
|
||||
FROM segments as segments
|
||||
LEFT JOIN segment_copies as copies
|
||||
ON copies.ancestor_stream_id = segments.stream_id
|
||||
WHERE
|
||||
copies.stream_id = $1 AND segments.position IN (SELECT position FROM UNNEST($2::INT8[]) as position)
|
||||
ORDER BY segments.stream_id, segments.position ASC
|
||||
`, opts.StreamID, pgutil.Int8Array(copiesPositions)))(func(rows tagsql.Rows) error {
|
||||
|
||||
for rows.Next() {
|
||||
var aliasPieces AliasPieces
|
||||
err = rows.Scan(
|
||||
&copies[index].RootPieceID,
|
||||
&aliasPieces,
|
||||
)
|
||||
if err != nil {
|
||||
return Error.New("failed to scan segments: %w", err)
|
||||
}
|
||||
|
||||
copies[index].Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
|
||||
if err != nil {
|
||||
return Error.New("failed to convert aliases to pieces: %w", err)
|
||||
}
|
||||
index++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ListSegmentsResult{}, Error.New("unable to fetch object segments: %w", err)
|
||||
}
|
||||
|
||||
if index != len(copies) {
|
||||
return ListSegmentsResult{}, Error.New("number of ancestor segments is different than copies: want %d got %d",
|
||||
len(copies), index)
|
||||
}
|
||||
}
|
||||
|
||||
// we have to update the first segment because DownloadObject uses this call
|
||||
// and we only need to do the first segment because it only uses the extra
|
||||
// information for the first segment.
|
||||
if len(result.Segments) > 0 && opts.UpdateFirstWithAncestor {
|
||||
err = db.updateWithAncestorSegment(ctx, &result.Segments[0])
|
||||
if err != nil {
|
||||
return ListSegmentsResult{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(result.Segments) > opts.Limit {
|
||||
result.More = true
|
||||
result.Segments = result.Segments[:len(result.Segments)-1]
|
||||
|
@ -270,50 +270,6 @@ func TestListSegments(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("segments from copy", func(t *testing.T) {
|
||||
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
|
||||
|
||||
for _, numberOfSegments := range []byte{0, 1, 2, 10} {
|
||||
originalObjectStream := metabasetest.RandObjectStream()
|
||||
originalObject, originalSegments := metabasetest.CreateTestObject{}.
|
||||
Run(ctx, t, db, originalObjectStream, numberOfSegments)
|
||||
|
||||
copyStream := metabasetest.RandObjectStream()
|
||||
_, _, copySegments := metabasetest.CreateObjectCopy{
|
||||
OriginalObject: originalObject,
|
||||
CopyObjectStream: ©Stream,
|
||||
}.Run(ctx, t, db, false)
|
||||
|
||||
expectedSegments := []metabase.Segment{}
|
||||
for _, segment := range copySegments {
|
||||
expectedSegments = append(expectedSegments, metabase.Segment(segment))
|
||||
}
|
||||
|
||||
metabasetest.ListSegments{
|
||||
Opts: metabase.ListSegments{
|
||||
StreamID: copyStream.StreamID,
|
||||
},
|
||||
Result: metabase.ListSegmentsResult{
|
||||
Segments: expectedSegments,
|
||||
},
|
||||
}.Check(ctx, t, db)
|
||||
|
||||
if numberOfSegments > 0 {
|
||||
expectedSegments[0].Pieces = originalSegments[0].Pieces
|
||||
}
|
||||
|
||||
metabasetest.ListSegments{
|
||||
Opts: metabase.ListSegments{
|
||||
StreamID: copyStream.StreamID,
|
||||
UpdateFirstWithAncestor: true,
|
||||
},
|
||||
Result: metabase.ListSegmentsResult{
|
||||
Segments: expectedSegments,
|
||||
},
|
||||
}.Check(ctx, t, db)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("segments from copy with duplicate metadata", func(t *testing.T) {
|
||||
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
|
||||
|
||||
|
@ -841,6 +841,7 @@ func TestEndpoint_Object_With_StorageNodes(t *testing.T) {
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Overlay.GeoIP.MockCountries = []string{"DE"}
|
||||
config.Metainfo.ServerSideCopyDuplicateMetadata = true
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
@ -2196,6 +2197,11 @@ func TestEndpoint_UpdateObjectMetadata(t *testing.T) {
|
||||
func TestEndpoint_Object_CopyObject(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.ServerSideCopyDuplicateMetadata = true
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
checkDownload := func(objectKey string, expectedData []byte) {
|
||||
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "multipleversions", objectKey)
|
||||
|
@ -25,6 +25,9 @@ export STORJ_NETWORK_DIR=$TMP
|
||||
STORJ_NETWORK_HOST4=${STORJ_NETWORK_HOST4:-127.0.0.1}
|
||||
STORJ_SIM_POSTGRES=${STORJ_SIM_POSTGRES:-""}
|
||||
|
||||
# TODO remove when metainfo.server-side-copy-duplicate-metadata will be dropped
|
||||
export STORJ_METAINFO_SERVER_SIDE_COPY_DUPLICATE_METADATA=true
|
||||
|
||||
# setup the network
|
||||
# if postgres connection string is set as STORJ_SIM_POSTGRES then use that for testing
|
||||
if [ -z ${STORJ_SIM_POSTGRES} ]; then
|
||||
|
@ -55,6 +55,9 @@ fi
|
||||
storj-up env set satellite-api STORJ_DATABASE_OPTIONS_MIGRATION_UNSAFE="full"
|
||||
storj-up local satellite-api,storagenode -d "$TMP_BIN"
|
||||
|
||||
# TODO remove when metainfo.server-side-copy-duplicate-metadata will be dropped
|
||||
storj-up env set satellite-api STORJ_METAINFO_SERVER_SIDE_COPY_DUPLICATE_METADATA="true"
|
||||
|
||||
# start the services
|
||||
docker compose up -d
|
||||
if [ "$DB" == "cockroach" ]
|
||||
|
@ -44,6 +44,9 @@ export STORJ_NETWORK_DIR="${TMP_DIR}"
|
||||
STORJ_NETWORK_HOST4=${STORJ_NETWORK_HOST4:-127.0.0.1}
|
||||
export STORJ_REDIS_HOST=${STORJ_NETWORK_HOST4}
|
||||
|
||||
# TODO remove when metainfo.server-side-copy-duplicate-metadata will be dropped
|
||||
export STORJ_METAINFO_SERVER_SIDE_COPY_DUPLICATE_METADATA=true
|
||||
|
||||
# setup the network
|
||||
"${SCRIPT_DIR}/redis-server.sh" start
|
||||
storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \
|
||||
|
Loading…
Reference in New Issue
Block a user