14fd6a9ef0
This change updates the storagenode piecestore apis to expose access to the full piece size stored on disk. Previously we only had access to (and only kept a cache of) the content size used for all pieces. This was inaccurate when reporting the amount of disk space used by nodes. We now have access to the total content size, as well as the total disk usage, of all pieces. The pieces cache also keeps a cache of the total piece size along with the content size. Change-Id: I4fffe7e1257e04c46021a2e37c5adc6fe69bee55
260 lines
8.3 KiB
Go
260 lines
8.3 KiB
Go
// Copyright (C) 2019 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package metainfo_test
|
|
|
|
import (
|
|
"context"
|
|
"strconv"
|
|
"testing"
|
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"storj.io/common/memory"
|
|
"storj.io/common/storj"
|
|
"storj.io/common/testcontext"
|
|
"storj.io/common/testrand"
|
|
"storj.io/storj/cmd/uplink/cmd"
|
|
"storj.io/storj/private/testplanet"
|
|
"storj.io/storj/storage"
|
|
)
|
|
|
|
func TestEndpoint_DeleteObjectPieces(t *testing.T) {
|
|
t.Run("all nodes up", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
Reconfigure: testplanet.Reconfigure{
|
|
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
|
},
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
var (
|
|
uplnk = planet.Uplinks[0]
|
|
satelliteSys = planet.Satellites[0]
|
|
)
|
|
|
|
var testCases = []struct {
|
|
caseDescription string
|
|
objData []byte
|
|
}{
|
|
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
|
|
{caseDescription: "one inline segment", objData: testrand.Bytes(3 * memory.KiB)},
|
|
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
|
|
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
|
}
|
|
|
|
for i, tc := range testCases {
|
|
i := i
|
|
tc := tc
|
|
t.Run(tc.caseDescription, func(t *testing.T) {
|
|
var (
|
|
bucketName = "a-bucket"
|
|
objectName = "object-filename" + strconv.Itoa(i)
|
|
)
|
|
|
|
// Use RSConfig for ensuring that we don't have long-tail cancellations and the
|
|
// upload doesn't leave garbage in the SNs
|
|
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, cmd.Config{
|
|
Client: cmd.ClientConfig{
|
|
SegmentSize: 10 * memory.KiB,
|
|
},
|
|
},
|
|
bucketName, objectName, tc.objData,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// calculate the SNs total used space after data upload
|
|
var totalUsedSpace int64
|
|
for _, sn := range planet.StorageNodes {
|
|
usedSpace, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
require.NoError(t, err)
|
|
totalUsedSpace += usedSpace
|
|
}
|
|
|
|
projectID, encryptedPath := getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
|
|
err = satelliteSys.Metainfo.Endpoint2.DeleteObjectPieces(
|
|
ctx, *projectID, []byte(bucketName), encryptedPath,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// calculate the SNs used space after delete the pieces
|
|
var totalUsedSpaceAfterDelete int64
|
|
for _, sn := range planet.StorageNodes {
|
|
usedSpace, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
require.NoError(t, err)
|
|
totalUsedSpaceAfterDelete += usedSpace
|
|
}
|
|
|
|
// At this point we can only guarantee that the 75% of the SNs pieces
|
|
// are delete due to the success threshold
|
|
deletedUsedSpace := float64(totalUsedSpace-totalUsedSpaceAfterDelete) / float64(totalUsedSpace)
|
|
if deletedUsedSpace < 0.75 {
|
|
t.Fatalf("deleted used space is less than 0.75%%. Got %f", deletedUsedSpace)
|
|
}
|
|
})
|
|
}
|
|
})
|
|
})
|
|
|
|
t.Run("some nodes down", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var testCases = []struct {
|
|
caseDescription string
|
|
objData []byte
|
|
}{
|
|
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
|
|
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
|
|
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
|
}
|
|
|
|
for i, tc := range testCases {
|
|
i := i
|
|
tc := tc
|
|
t.Run(tc.caseDescription, func(t *testing.T) {
|
|
|
|
var (
|
|
bucketName = "a-bucket"
|
|
objectName = "object-filename" + strconv.Itoa(i)
|
|
)
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
Reconfigure: testplanet.Reconfigure{
|
|
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
|
},
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
var (
|
|
uplnk = planet.Uplinks[0]
|
|
satelliteSys = planet.Satellites[0]
|
|
)
|
|
// Use RSConfig for ensuring that we don't have long-tail cancellations and the
|
|
// upload doesn't leave garbage in the SNs
|
|
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, cmd.Config{
|
|
Client: cmd.ClientConfig{
|
|
SegmentSize: 10 * memory.KiB,
|
|
},
|
|
}, bucketName, objectName, tc.objData)
|
|
require.NoError(t, err)
|
|
|
|
// Shutdown the first 2 storage nodes before we delete the pieces
|
|
require.NoError(t, planet.StopPeer(planet.StorageNodes[0]))
|
|
require.NoError(t, planet.StopPeer(planet.StorageNodes[1]))
|
|
|
|
projectID, encryptedPath := getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
|
|
err = satelliteSys.Metainfo.Endpoint2.DeleteObjectPieces(
|
|
ctx, *projectID, []byte(bucketName), encryptedPath,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// Check that storage nodes that were offline when deleting the pieces
|
|
// they are still holding data
|
|
var totalUsedSpace int64
|
|
for i := 0; i < 2; i++ {
|
|
usedSpace, _, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
|
|
require.NoError(t, err)
|
|
totalUsedSpace += usedSpace
|
|
}
|
|
|
|
require.NotZero(t, totalUsedSpace, "totalUsedSpace offline nodes")
|
|
|
|
// Check that storage nodes which are online when deleting pieces don't
|
|
// hold any piece
|
|
totalUsedSpace = 0
|
|
for i := 2; i < len(planet.StorageNodes); i++ {
|
|
usedSpace, _, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
|
|
require.NoError(t, err)
|
|
totalUsedSpace += usedSpace
|
|
}
|
|
|
|
require.Zero(t, totalUsedSpace, "totalUsedSpace online nodes")
|
|
})
|
|
})
|
|
}
|
|
})
|
|
|
|
t.Run("all nodes down", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var testCases = []struct {
|
|
caseDescription string
|
|
objData []byte
|
|
}{
|
|
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
|
|
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
|
|
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
|
}
|
|
|
|
for i, tc := range testCases {
|
|
i := i
|
|
tc := tc
|
|
t.Run(tc.caseDescription, func(t *testing.T) {
|
|
var (
|
|
bucketName = "a-bucket"
|
|
objectName = "object-filename" + strconv.Itoa(i)
|
|
)
|
|
testplanet.Run(t, testplanet.Config{
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
Reconfigure: testplanet.Reconfigure{
|
|
Satellite: testplanet.ReconfigureRS(2, 2, 4, 4),
|
|
},
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
var (
|
|
uplnk = planet.Uplinks[0]
|
|
satelliteSys = planet.Satellites[0]
|
|
)
|
|
|
|
// Use RSConfig for ensuring that we don't have long-tail cancellations and the
|
|
// upload doesn't leave garbage in the SNs
|
|
err := uplnk.UploadWithClientConfig(ctx, satelliteSys, cmd.Config{
|
|
Client: cmd.ClientConfig{
|
|
SegmentSize: 10 * memory.KiB,
|
|
},
|
|
}, bucketName, objectName, tc.objData)
|
|
require.NoError(t, err)
|
|
|
|
// Shutdown all the storage nodes before we delete the pieces
|
|
for _, sn := range planet.StorageNodes {
|
|
require.NoError(t, planet.StopPeer(sn))
|
|
}
|
|
|
|
projectID, encryptedPath := getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
|
|
err = satelliteSys.Metainfo.Endpoint2.DeleteObjectPieces(
|
|
ctx, *projectID, []byte(bucketName), encryptedPath,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// Check that storage nodes that were offline when deleting the pieces
|
|
// they are still holding data
|
|
var totalUsedSpace int64
|
|
for _, sn := range planet.StorageNodes {
|
|
usedSpace, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
require.NoError(t, err)
|
|
totalUsedSpace += usedSpace
|
|
}
|
|
|
|
require.NotZero(t, totalUsedSpace, "totalUsedSpace")
|
|
})
|
|
})
|
|
}
|
|
})
|
|
}
|
|
|
|
func getProjectIDAndEncPathFirstObject(
|
|
ctx context.Context, t *testing.T, satellite *testplanet.SatelliteSystem,
|
|
) (projectID *uuid.UUID, encryptedPath []byte) {
|
|
t.Helper()
|
|
|
|
keys, err := satellite.Metainfo.Database.List(ctx, storage.Key{}, 1)
|
|
require.NoError(t, err)
|
|
keyParts := storj.SplitPath(keys[0].String())
|
|
require.Len(t, keyParts, 4)
|
|
projectID, err = uuid.Parse(keyParts[0])
|
|
require.NoError(t, err)
|
|
encryptedPath = []byte(keyParts[3])
|
|
|
|
return projectID, encryptedPath
|
|
}
|