satellite/metainfo: simplify metainfo delete test

Simplifies tests for pending and committed objects.

Change-Id: I0c683198f8fce62898142c6c343aca985d91a77f
This commit is contained in:
Michał Niewrzał 2021-01-11 13:12:54 +01:00 committed by Michal Niewrzal
parent 6dff40f5c5
commit 95320912b4

View File

@ -5,7 +5,7 @@ package metainfo_test
import (
"bytes"
"strconv"
"context"
"testing"
"github.com/stretchr/testify/require"
@ -20,238 +20,62 @@ import (
)
func TestEndpoint_DeleteCommittedObject(t *testing.T) {
t.Run("all nodes up", func(t *testing.T) {
t.Parallel()
bucketName := "a-bucket"
createObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, data []byte) {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "object-filename", data)
require.NoError(t, err)
}
deleteObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet) {
projectID := planet.Uplinks[0].Projects[0].ID
items, err := planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
var testCases = []struct {
caseDescription string
objData []byte
hasRemote bool
}{
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
{caseDescription: "one inline segment", objData: testrand.Bytes(3 * memory.KiB)},
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
_, err = planet.Satellites[0].Metainfo.Endpoint2.DeleteCommittedObject(ctx, projectID, bucketName, items[0].ObjectKey)
require.NoError(t, err)
for i, tc := range testCases {
i := i
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
// Reconfigure RS for ensuring that we don't have long-tail cancellations
// and the upload doesn't leave garbage in the SNs
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 2, 4, 4),
testplanet.MaxSegmentSize(13*memory.KiB),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
var (
bucketName = "a-bucket"
objectName = "object-filename" + strconv.Itoa(i)
percentExp = 0.75
)
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectName, tc.objData)
require.NoError(t, err)
// calculate the SNs total used space after data upload
var totalUsedSpace int64
for _, sn := range planet.StorageNodes {
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += piecesTotal
}
projectID := uplnk.Projects[0].ID
items, err := satelliteSys.Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
_, err = satelliteSys.Metainfo.Endpoint2.DeleteCommittedObject(ctx, projectID, bucketName, items[0].ObjectKey)
require.NoError(t, err)
planet.WaitForStorageNodeDeleters(ctx)
// calculate the SNs used space after delete the pieces
var totalUsedSpaceAfterDelete int64
for _, sn := range planet.StorageNodes {
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpaceAfterDelete += piecesTotal
}
// At this point we can only guarantee that the 75% of the SNs pieces
// are delete due to the success threshold
deletedUsedSpace := float64(totalUsedSpace-totalUsedSpaceAfterDelete) / float64(totalUsedSpace)
if deletedUsedSpace < percentExp {
t.Fatalf("deleted used space is less than %f%%. Got %f", percentExp, deletedUsedSpace)
}
})
})
}
})
t.Run("some nodes down", func(t *testing.T) {
t.Parallel()
var testCases = []struct {
caseDescription string
objData []byte
}{
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
const (
bucketName = "a-bucket"
objectName = "object-filename"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
// Reconfigure RS for ensuring that we don't have long-tail cancellations
// and the upload doesn't leave garbage in the SNs
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 2, 4, 4),
testplanet.MaxSegmentSize(13*memory.KiB),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
numToShutdown := 2
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectName, tc.objData)
require.NoError(t, err)
// Shutdown the first numToShutdown storage nodes before we delete the pieces
require.NoError(t, planet.StopPeer(planet.StorageNodes[0]))
require.NoError(t, planet.StopPeer(planet.StorageNodes[1]))
projectID := uplnk.Projects[0].ID
items, err := satelliteSys.Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
_, err = satelliteSys.Metainfo.Endpoint2.DeleteCommittedObject(ctx, projectID, bucketName, items[0].ObjectKey)
require.NoError(t, err)
planet.WaitForStorageNodeDeleters(ctx)
// Check that storage nodes that were offline when deleting the pieces
// they are still holding data
var totalUsedSpace int64
for i := 0; i < numToShutdown; i++ {
piecesTotal, _, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += piecesTotal
}
require.NotZero(t, totalUsedSpace, "totalUsedSpace offline nodes")
// Check that storage nodes which are online when deleting pieces don't
// hold any piece
totalUsedSpace = 0
for i := numToShutdown; i < len(planet.StorageNodes); i++ {
piecesTotal, _, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += piecesTotal
}
require.Zero(t, totalUsedSpace, "totalUsedSpace online nodes")
})
})
}
})
t.Run("all nodes down", func(t *testing.T) {
t.Parallel()
var testCases = []struct {
caseDescription string
objData []byte
}{
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
const (
bucketName = "a-bucket"
objectName = "object-filename"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
// Reconfigure RS for ensuring that we don't have long-tail cancellations
// and the upload doesn't leave garbage in the SNs
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 2, 4, 4),
testplanet.MaxSegmentSize(13*memory.KiB),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectName, tc.objData)
require.NoError(t, err)
// Shutdown all the storage nodes before we delete the pieces
for _, sn := range planet.StorageNodes {
require.NoError(t, planet.StopPeer(sn))
}
projectID := uplnk.Projects[0].ID
items, err := satelliteSys.Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
_, err = satelliteSys.Metainfo.Endpoint2.DeleteCommittedObject(ctx, projectID, bucketName, items[0].ObjectKey)
require.NoError(t, err)
// Check that storage nodes that were offline when deleting the pieces
// they are still holding data
var totalUsedSpace int64
for _, sn := range planet.StorageNodes {
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
require.NoError(t, err)
totalUsedSpace += piecesTotal
}
require.NotZero(t, totalUsedSpace, "totalUsedSpace")
})
})
}
})
items, err = planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 0)
}
testDeleteObject(t, createObject, deleteObject)
}
func TestEndpoint_DeletePendingObject(t *testing.T) {
bucketName := "a-bucket"
createObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, data []byte) {
// TODO This should be replaced by a call to testplanet.Uplink.MultipartUpload when available.
project, err := planet.Uplinks[0].GetProject(ctx, planet.Satellites[0])
require.NoError(t, err, "failed to retrieve project")
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err, "failed to create bucket")
info, err := project.NewMultipartUpload(ctx, bucketName, "object-filename", &uplink.MultipartUploadOptions{})
require.NoError(t, err, "failed to start multipart upload")
_, err = project.PutObjectPart(ctx, bucketName, bucketName, info.StreamID, 1, bytes.NewReader(data))
require.NoError(t, err, "failed to put object part")
}
deleteObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet) {
projectID := planet.Uplinks[0].Projects[0].ID
items, err := planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
deletedObjects, err := planet.Satellites[0].Metainfo.Endpoint2.DeletePendingObject(ctx, projectID, bucketName, items[0].ObjectKey, 1, items[0].StreamID)
require.NoError(t, err)
require.Len(t, deletedObjects, 1)
items, err = planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 0)
}
testDeleteObject(t, createObject, deleteObject)
}
func testDeleteObject(t *testing.T, createObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet,
data []byte), deleteObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet)) {
t.Run("all nodes up", func(t *testing.T) {
t.Parallel()
@ -266,8 +90,7 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
}
for i, tc := range testCases {
i := i
for _, tc := range testCases {
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
testplanet.Run(t, testplanet.Config{
@ -282,28 +105,10 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
var (
bucketName = "a-bucket"
objectName = "object-filename" + strconv.Itoa(i)
percentExp = 0.75
)
// TODO This should be replaced by a call to testplanet.Uplink.MultipartUpload when available.
project, err := uplnk.GetProject(ctx, satelliteSys)
require.NoError(t, err, "failed to retrieve project")
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err, "failed to create bucket")
info, err := project.NewMultipartUpload(ctx, bucketName, objectName, &uplink.MultipartUploadOptions{})
require.NoError(t, err, "failed to start multipart upload")
_, err = project.PutObjectPart(ctx, bucketName, bucketName, info.StreamID, 1, bytes.NewReader(tc.objData))
require.NoError(t, err, "failed to put object part")
createObject(ctx, t, planet, tc.objData)
// calculate the SNs total used space after data upload
var totalUsedSpace int64
@ -313,13 +118,7 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
totalUsedSpace += piecesTotal
}
projectID := uplnk.Projects[0].ID
items, err := satelliteSys.Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
_, err = satelliteSys.Metainfo.Endpoint2.DeletePendingObject(ctx, projectID, bucketName, items[0].ObjectKey, 1, items[0].StreamID)
require.NoError(t, err)
deleteObject(ctx, t, planet)
planet.WaitForStorageNodeDeleters(ctx)
@ -359,12 +158,6 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
const (
bucketName = "a-bucket"
objectName = "object-filename"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
@ -378,43 +171,16 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
numToShutdown := 2
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
project, err := uplnk.GetProject(ctx, satelliteSys)
require.NoError(t, err, "failed to retrieve project")
// TODO This should be replaced by a call to testplanet.Uplink.MultipartUpload when available.
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err, "failed to create bucket")
info, err := project.NewMultipartUpload(ctx, bucketName, objectName, &uplink.MultipartUploadOptions{})
require.NoError(t, err, "failed to start multipart upload")
_, err = project.PutObjectPart(ctx, bucketName, bucketName, info.StreamID, 1, bytes.NewReader(tc.objData))
require.NoError(t, err, "failed to put object part")
createObject(ctx, t, planet, tc.objData)
// Shutdown the first numToShutdown storage nodes before we delete the pieces
require.NoError(t, planet.StopPeer(planet.StorageNodes[0]))
require.NoError(t, planet.StopPeer(planet.StorageNodes[1]))
projectID := uplnk.Projects[0].ID
items, err := satelliteSys.Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
deletedObjects, err := satelliteSys.Metainfo.Endpoint2.DeletePendingObject(ctx, projectID, bucketName, items[0].ObjectKey, 1, items[0].StreamID)
require.NoError(t, err)
require.Len(t, deletedObjects, 1)
deleteObject(ctx, t, planet)
planet.WaitForStorageNodeDeleters(ctx)
items, err = satelliteSys.Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 0)
// Check that storage nodes that were offline when deleting the pieces
// they are still holding data
var totalUsedSpace int64
@ -456,10 +222,6 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.caseDescription, func(t *testing.T) {
const (
bucketName = "a-bucket"
objectName = "object-filename"
)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
@ -471,36 +233,14 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
uplnk = planet.Uplinks[0]
satelliteSys = planet.Satellites[0]
)
project, err := uplnk.GetProject(ctx, satelliteSys)
require.NoError(t, err, "failed to retrieve project")
// TODO This should be replaced by a call to testplanet.Uplink.MultipartUpload when available.
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err, "failed to create bucket")
info, err := project.NewMultipartUpload(ctx, bucketName, objectName, &uplink.MultipartUploadOptions{})
require.NoError(t, err, "failed to start multipart upload")
_, err = project.PutObjectPart(ctx, bucketName, bucketName, info.StreamID, 1, bytes.NewReader(tc.objData))
require.NoError(t, err, "failed to put object part")
createObject(ctx, t, planet, tc.objData)
// Shutdown all the storage nodes before we delete the pieces
for _, sn := range planet.StorageNodes {
require.NoError(t, planet.StopPeer(sn))
}
projectID := uplnk.Projects[0].ID
items, err := satelliteSys.Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
require.NoError(t, err)
require.Len(t, items, 1)
_, err = satelliteSys.Metainfo.Endpoint2.DeletePendingObject(ctx, projectID, bucketName, items[0].ObjectKey, 1, items[0].StreamID)
require.NoError(t, err)
deleteObject(ctx, t, planet)
// Check that storage nodes that were offline when deleting the pieces
// they are still holding data
@ -517,6 +257,7 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
}
})
}
func TestDeleteBucket(t *testing.T) {
testplanet.Run(t, testplanet.Config{
Reconfigure: testplanet.Reconfigure{