storj/satellite/gc/gc_test.go

138 lines
4.9 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package gc_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/storj/internal/memory"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/internal/testrand"
"storj.io/storj/pkg/encryption"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite"
"storj.io/storj/storage"
)
// TestGarbageCollection does the following:
// * Set up a network with one storagenode
// * Upload two objects
// * Delete one object from the metainfo service on the satellite
// * Wait for bloom filter generation
// * Check that pieces of the deleted object are deleted on the storagenode
// * Check that pieces of the kept object are not deleted on the storagenode
func TestGarbageCollection(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.GarbageCollection.FalsePositiveRate = 0.000000001
config.GarbageCollection.Interval = 500 * time.Millisecond
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
upl := planet.Uplinks[0]
targetNode := planet.StorageNodes[0]
gcService := satellite.GarbageCollection.Service
gcService.Loop.Pause()
// Upload two objects
testData1 := testrand.Bytes(8 * memory.KiB)
testData2 := testrand.Bytes(8 * memory.KiB)
err := upl.Upload(ctx, satellite, "testbucket", "test/path/1", testData1)
require.NoError(t, err)
deletedEncPath, pointerToDelete := getPointer(ctx, t, satellite, upl, "testbucket", "test/path/1")
var deletedPieceID storj.PieceID
for _, p := range pointerToDelete.GetRemote().GetRemotePieces() {
if p.NodeId == targetNode.ID() {
deletedPieceID = pointerToDelete.GetRemote().RootPieceId.Derive(p.NodeId, p.PieceNum)
break
}
}
require.NotZero(t, deletedPieceID)
err = upl.Upload(ctx, satellite, "testbucket", "test/path/2", testData2)
require.NoError(t, err)
_, pointerToKeep := getPointer(ctx, t, satellite, upl, "testbucket", "test/path/2")
var keptPieceID storj.PieceID
for _, p := range pointerToKeep.GetRemote().GetRemotePieces() {
if p.NodeId == targetNode.ID() {
keptPieceID = pointerToKeep.GetRemote().RootPieceId.Derive(p.NodeId, p.PieceNum)
break
}
}
require.NotZero(t, keptPieceID)
// Delete one object from metainfo service on satellite
err = satellite.Metainfo.Service.Delete(ctx, deletedEncPath)
require.NoError(t, err)
// Check that piece of the deleted object is on the storagenode
pieceAccess, err := targetNode.DB.Pieces().Stat(ctx, storage.BlobRef{
Namespace: satellite.ID().Bytes(),
Key: deletedPieceID.Bytes(),
})
require.NoError(t, err)
require.NotNil(t, pieceAccess)
// The pieceInfo.GetPieceIDs query converts piece creation and the filter creation timestamps
// to datetime in sql. This chops off all precision beyond seconds.
// In this test, the amount of time that elapses between piece uploads and the gc loop is
// less than a second, meaning datetime(piece_creation) < datetime(filter_creation) is false unless we sleep
// for a second.
time.Sleep(1 * time.Second)
// Wait for next iteration of garbage collection to finish
gcService.Loop.Restart()
gcService.Loop.TriggerWait()
// Wait for the storagenode's RetainService queue to be empty
storagenode/retain: fix concurrency issues (#2828) * nicer flags * fix concurrency * add concurrent workers * initialize things * fix tests * close retain service * ensure we don't have workers working on the same satellite * ensure things compile * fix other compilation issues: * concurrency changes ran this with `go test -count=1000` and it passed all of them. - we add a closed channel so that we can select on it with context cancellation. - we put a once in so we only close the channel once. - every time the queue/running state changes, we have to broadcast because we may want to wake up N pending Wait calls or other concurrent workers. - because we broadcast, we don't need to do the polling in Wait anymore. - ensure Run doesn't start multiple times so that we don't have to worry about concurrent Close with multiple Runs. - hold the lock while we start workers so that a concurrent Close with Run can't decide that there's nothing started and exit and then have Run start things. - make sure to poll the closed/context channels through loops or at the start of Run calls in case Close happens first. - these polls should be under a mutex because they have a default case which makes it possible to schedule such that Close hasn't executed the channel close so it starts more work. - cancel a local Run context when it's going to exit to make sure that any retainPieces calls have a canceled context. - hopefully enough comments to both check my work and help readers digest what's going on. Change-Id: Ida0e226a7e01e8ae64fa2c59dd5a84b04bccfbd7 * use the retain error class Change-Id: I1511eaef135f98afd57b878e997e4c8a0d11cafc * concurrency fixes again - forgot to update the gc test to use the old Wait api. - we need to drop the lock while we wait for the workers to exit, because they may be blocked on the condition variable - additionally, we need to broadcast when we close the signal channel because the state changed: they want to wake up and exit. Change-Id: I4204699792275260cd912f29aa73720f7d9b14b5 * undo my misguided rename Change-Id: I6baffe1eb0434e260212c485bbcc01bed3250881 * remove pollInterval * format paragraph more nicely * move skew calculation into retain pieces
2019-08-28 21:35:25 +01:00
targetNode.Storage2.RetainService.TestWaitUntilEmpty()
// Check that piece of the deleted object is not on the storagenode
pieceAccess, err = targetNode.DB.Pieces().Stat(ctx, storage.BlobRef{
Namespace: satellite.ID().Bytes(),
Key: deletedPieceID.Bytes(),
})
require.Error(t, err)
require.Nil(t, pieceAccess)
// Check that piece of the kept object is on the storagenode
pieceAccess, err = targetNode.DB.Pieces().Stat(ctx, storage.BlobRef{
Namespace: satellite.ID().Bytes(),
Key: keptPieceID.Bytes(),
})
require.NoError(t, err)
require.NotNil(t, pieceAccess)
})
}
func getPointer(ctx *testcontext.Context, t *testing.T, satellite *testplanet.SatelliteSystem, upl *testplanet.Uplink, bucket, path string) (lastSegPath string, pointer *pb.Pointer) {
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
require.Len(t, projects, 1)
encParameters := upl.GetConfig(satellite).GetEncryptionParameters()
cipherSuite := encParameters.CipherSuite
store := encryption.NewStore()
store.SetDefaultKey(new(storj.Key))
encryptedPath, err := encryption.EncryptPath(bucket, paths.NewUnencrypted(path), cipherSuite, store)
require.NoError(t, err)
lastSegPath = storj.JoinPaths(projects[0].ID.String(), "l", bucket, encryptedPath.Raw())
pointer, err = satellite.Metainfo.Service.Get(ctx, lastSegPath)
require.NoError(t, err)
return lastSegPath, pointer
}