storj/satellite/metainfo/endpoint_test.go
Michal Niewrzal 6502454947 satellite/metainfo: move RS configuration to satellite
With this change RS configuration will be set on satellite. Uplink with
get RS values with BeginObject request and will use it. For backward
compatibility and to avoid super large change redundancy scheme stored
with bucket is not touched. This can be done in future.

Change-Id: Ia5f76fc10c37e2c44e4f7b8754f28eafe1f97eff
2020-01-22 09:33:53 +00:00

260 lines
8.3 KiB
Go

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo_test
import (
"context"
"strconv"
"testing"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/cmd/uplink/cmd"
"storj.io/storj/private/testplanet"
"storj.io/storj/storage"
)
func TestEndpoint_DeleteObjectPieces(t *testing.T) {
t.Run("all nodes up", func(t *testing.T) {
t.Parallel()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
var testCases = []struct {
caseDescription string
objData []byte
}{
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
{caseDescription: "one inline segment", objData: testrand.Bytes(3 * memory.KiB)},
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
for i, tc := range testCases {
i := i
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
var (
bucketName = "a-bucket"
objectName = "object-filename" + strconv.Itoa(i)
)
// Use RSConfig for ensuring that we don't have long-tail cancellations and the
// upload doesn't leave garbage in the SNs
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, cmd.Config{
Client: cmd.ClientConfig{
SegmentSize: 10 * memory.KiB,
},
},
bucketName, objectName, tc.objData,
)
require.NoError(t, err)
// calculate the SNs total used space after data upload
var totalUsedSpace int64
for _, sn := range planet.StorageNodes {
usedSpace, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += usedSpace
}
projectID, encryptedPath := getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
err = satelliteSys.Metainfo.Endpoint2.DeleteObjectPieces(
ctx, *projectID, []byte(bucketName), encryptedPath,
)
require.NoError(t, err)
// calculate the SNs used space after delete the pieces
var totalUsedSpaceAfterDelete int64
for _, sn := range planet.StorageNodes {
usedSpace, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpaceAfterDelete += usedSpace
}
// At this point we can only guarantee that the 75% of the SNs pieces
// are delete due to the success threshold
deletedUsedSpace := float64(totalUsedSpace-totalUsedSpaceAfterDelete) / float64(totalUsedSpace)
if deletedUsedSpace < 0.75 {
t.Fatalf("deleted used space is less than 0.75%%. Got %f", deletedUsedSpace)
}
})
}
})
})
t.Run("some nodes down", func(t *testing.T) {
t.Parallel()
var testCases = []struct {
caseDescription string
objData []byte
}{
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
for i, tc := range testCases {
i := i
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
var (
bucketName = "a-bucket"
objectName = "object-filename" + strconv.Itoa(i)
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
// Use RSConfig for ensuring that we don't have long-tail cancellations and the
// upload doesn't leave garbage in the SNs
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, cmd.Config{
Client: cmd.ClientConfig{
SegmentSize: 10 * memory.KiB,
},
}, bucketName, objectName, tc.objData)
require.NoError(t, err)
// Shutdown the first 2 storage nodes before we delete the pieces
require.NoError(t, planet.StopPeer(planet.StorageNodes[0]))
require.NoError(t, planet.StopPeer(planet.StorageNodes[1]))
projectID, encryptedPath := getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
err = satelliteSys.Metainfo.Endpoint2.DeleteObjectPieces(
ctx, *projectID, []byte(bucketName), encryptedPath,
)
require.NoError(t, err)
// Check that storage nodes that were offline when deleting the pieces
// they are still holding data
var totalUsedSpace int64
for i := 0; i < 2; i++ {
usedSpace, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += usedSpace
}
require.NotZero(t, totalUsedSpace, "totalUsedSpace offline nodes")
// Check that storage nodes which are online when deleting pieces don't
// hold any piece
totalUsedSpace = 0
for i := 2; i < len(planet.StorageNodes); i++ {
usedSpace, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += usedSpace
}
require.Zero(t, totalUsedSpace, "totalUsedSpace online nodes")
})
})
}
})
t.Run("all nodes down", func(t *testing.T) {
t.Parallel()
var testCases = []struct {
caseDescription string
objData []byte
}{
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
for i, tc := range testCases {
i := i
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
var (
bucketName = "a-bucket"
objectName = "object-filename" + strconv.Itoa(i)
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
// Use RSConfig for ensuring that we don't have long-tail cancellations and the
// upload doesn't leave garbage in the SNs
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, cmd.Config{
Client: cmd.ClientConfig{
SegmentSize: 10 * memory.KiB,
},
}, bucketName, objectName, tc.objData)
require.NoError(t, err)
// Shutdown all the storage nodes before we delete the pieces
for _, sn := range planet.StorageNodes {
require.NoError(t, planet.StopPeer(sn))
}
projectID, encryptedPath := getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
err = satelliteSys.Metainfo.Endpoint2.DeleteObjectPieces(
ctx, *projectID, []byte(bucketName), encryptedPath,
)
require.NoError(t, err)
// Check that storage nodes that were offline when deleting the pieces
// they are still holding data
var totalUsedSpace int64
for _, sn := range planet.StorageNodes {
usedSpace, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += usedSpace
}
require.NotZero(t, totalUsedSpace, "totalUsedSpace")
})
})
}
})
}
func getProjectIDAndEncPathFirstObject(
ctx context.Context, t *testing.T, satellite *testplanet.SatelliteSystem,
) (projectID *uuid.UUID, encryptedPath []byte) {
t.Helper()
keys, err := satellite.Metainfo.Database.List(ctx, storage.Key{}, 1)
require.NoError(t, err)
keyParts := storj.SplitPath(keys[0].String())
require.Len(t, keyParts, 4)
projectID, err = uuid.Parse(keyParts[0])
require.NoError(t, err)
encryptedPath = []byte(keyParts[3])
return projectID, encryptedPath
}