satellite/metainfo: speedup deletion tests
Speedup is done by reducing number of testplanet instances for tests without changing main test logic. Change-Id: Ic3849485d37b8ca55c013a45b7191dce65b88b04
This commit is contained in:
parent
187941ff86
commit
b32fbc0f93
@ -244,9 +244,6 @@ func testDeleteObject(t *testing.T, createObject func(ctx context.Context, t *te
|
||||
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.caseDescription, func(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
@ -260,11 +257,22 @@ func testDeleteObject(t *testing.T, createObject func(ctx context.Context, t *te
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
numToShutdown := 2
|
||||
|
||||
for _, tc := range testCases {
|
||||
createObject(ctx, t, planet, tc.objData)
|
||||
}
|
||||
|
||||
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
|
||||
|
||||
// Shutdown the first numToShutdown storage nodes before we delete the pieces
|
||||
require.NoError(t, planet.StopPeer(planet.StorageNodes[0]))
|
||||
require.NoError(t, planet.StopPeer(planet.StorageNodes[1]))
|
||||
// and collect used space values for those nodes
|
||||
snUsedSpace := make([]int64, len(planet.StorageNodes))
|
||||
for i := 0; i < numToShutdown; i++ {
|
||||
var err error
|
||||
snUsedSpace[i], _, err = planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, planet.StopPeer(planet.StorageNodes[i]))
|
||||
}
|
||||
|
||||
deleteAllObjects(ctx, t, planet)
|
||||
|
||||
@ -272,29 +280,18 @@ func testDeleteObject(t *testing.T, createObject func(ctx context.Context, t *te
|
||||
|
||||
// Check that storage nodes that were offline when deleting the pieces
|
||||
// they are still holding data
|
||||
var totalUsedSpace int64
|
||||
for i := 0; i < numToShutdown; i++ {
|
||||
piecesTotal, _, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
|
||||
require.NoError(t, err)
|
||||
totalUsedSpace += piecesTotal
|
||||
}
|
||||
|
||||
require.NotZero(t, totalUsedSpace, "totalUsedSpace offline nodes")
|
||||
|
||||
// Check that storage nodes which are online when deleting pieces don't
|
||||
// hold any piece
|
||||
totalUsedSpace = 0
|
||||
for i := numToShutdown; i < len(planet.StorageNodes); i++ {
|
||||
piecesTotal, _, err := planet.StorageNodes[i].Storage2.Store.SpaceUsedForPieces(ctx)
|
||||
// We are comparing used space from before deletion for nodes that were
|
||||
// offline, values for available nodes are 0
|
||||
for i, sn := range planet.StorageNodes {
|
||||
usedSpace, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
||||
require.NoError(t, err)
|
||||
totalUsedSpace += piecesTotal
|
||||
}
|
||||
|
||||
require.Zero(t, totalUsedSpace, "totalUsedSpace online nodes")
|
||||
})
|
||||
})
|
||||
require.Equal(t, snUsedSpace[i], usedSpace, "StorageNode #%d", i)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("all nodes down", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
Loading…
Reference in New Issue
Block a user