2019-08-19 19:52:47 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information
|
|
|
|
|
|
|
|
package retain_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.uber.org/zap/zaptest"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/bloomfilter"
|
|
|
|
"storj.io/common/errs2"
|
|
|
|
"storj.io/common/identity/testidentity"
|
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2023-05-05 22:26:53 +01:00
|
|
|
"storj.io/storj/cmd/storagenode/internalcmd"
|
2019-08-19 19:52:47 +01:00
|
|
|
"storj.io/storj/storagenode"
|
2023-04-05 18:03:06 +01:00
|
|
|
"storj.io/storj/storagenode/blobstore"
|
|
|
|
"storj.io/storj/storagenode/blobstore/filestore"
|
2019-08-19 19:52:47 +01:00
|
|
|
"storj.io/storj/storagenode/pieces"
|
2023-05-05 22:26:53 +01:00
|
|
|
"storj.io/storj/storagenode/pieces/lazyfilewalker"
|
2019-08-19 19:52:47 +01:00
|
|
|
"storj.io/storj/storagenode/retain"
|
|
|
|
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestRetainPieces(t *testing.T) {
|
2020-01-20 14:56:12 +00:00
|
|
|
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
|
2023-03-01 03:59:53 +00:00
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
blobs := db.Pieces()
|
|
|
|
v0PieceInfo := db.V0PieceInfo()
|
|
|
|
fw := pieces.NewFileWalker(log, blobs, v0PieceInfo)
|
2023-02-28 13:54:01 +00:00
|
|
|
store := pieces.NewStore(log, fw, nil, blobs, v0PieceInfo, db.PieceExpirationDB(), db.PieceSpaceUsedDB(), pieces.DefaultConfig)
|
2019-11-13 19:15:31 +00:00
|
|
|
testStore := pieces.StoreForTest{Store: store}
|
2019-08-19 19:52:47 +01:00
|
|
|
|
2019-11-20 16:28:49 +00:00
|
|
|
const numPieces = 100
|
|
|
|
const numPiecesToKeep = 95
|
2019-08-19 19:52:47 +01:00
|
|
|
// pieces from numPiecesToKeep + numOldPieces to numPieces will
|
|
|
|
// have a recent timestamp and thus should not be deleted
|
|
|
|
const numOldPieces = 5
|
|
|
|
|
|
|
|
// for this test, we set the false positive rate very low, so we can test which pieces should be deleted with precision
|
|
|
|
filter := bloomfilter.NewOptimal(numPieces, 0.000000001)
|
|
|
|
|
|
|
|
pieceIDs := generateTestIDs(numPieces)
|
|
|
|
|
|
|
|
satellite0 := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
|
|
|
|
satellite1 := testidentity.MustPregeneratedSignedIdentity(2, storj.LatestIDVersion())
|
|
|
|
|
|
|
|
uplink := testidentity.MustPregeneratedSignedIdentity(3, storj.LatestIDVersion())
|
|
|
|
|
|
|
|
// keep pieceIDs[0 : numPiecesToKeep] (old + in filter)
|
|
|
|
// delete pieceIDs[numPiecesToKeep : numPiecesToKeep+numOldPieces] (old + not in filter)
|
|
|
|
// keep pieceIDs[numPiecesToKeep+numOldPieces : numPieces] (recent + not in filter)
|
|
|
|
// add all pieces to the node pieces info DB - but only count piece ids in filter
|
|
|
|
for index, id := range pieceIDs {
|
2023-04-05 18:03:06 +01:00
|
|
|
var formatVer blobstore.FormatVersion
|
2019-11-20 16:28:49 +00:00
|
|
|
if index%2 == 0 {
|
|
|
|
formatVer = filestore.FormatV0
|
2019-08-19 19:52:47 +01:00
|
|
|
} else {
|
2019-11-20 16:28:49 +00:00
|
|
|
formatVer = filestore.FormatV1
|
2019-08-19 19:52:47 +01:00
|
|
|
}
|
|
|
|
|
2019-11-20 16:28:49 +00:00
|
|
|
if index < numPiecesToKeep {
|
|
|
|
filter.Add(id)
|
2019-08-19 19:52:47 +01:00
|
|
|
}
|
|
|
|
|
2019-11-20 16:28:49 +00:00
|
|
|
const size = 100 * memory.B
|
|
|
|
|
|
|
|
// Write file for all satellites
|
|
|
|
for _, satelliteID := range []storj.NodeID{satellite0.ID, satellite1.ID} {
|
|
|
|
now := time.Now()
|
2022-08-01 10:30:33 +01:00
|
|
|
w, err := testStore.WriterForFormatVersion(ctx, satelliteID, id, formatVer, pb.PieceHashAlgorithm_SHA256)
|
2019-11-20 16:28:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = w.Write(testrand.Bytes(size))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, w.Commit(ctx, &pb.PieceHeader{
|
|
|
|
CreationTime: now,
|
|
|
|
}))
|
|
|
|
|
|
|
|
piecehash, err := signing.SignPieceHash(ctx,
|
|
|
|
signing.SignerFromFullIdentity(uplink),
|
|
|
|
&pb.PieceHash{
|
|
|
|
PieceId: id,
|
|
|
|
Hash: []byte{0, 2, 3, 4, 5},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if formatVer == filestore.FormatV0 {
|
|
|
|
v0db := testStore.GetV0PieceInfoDBForTest()
|
|
|
|
err = v0db.Add(ctx, &pieces.Info{
|
|
|
|
SatelliteID: satelliteID,
|
|
|
|
PieceSize: 4,
|
|
|
|
PieceID: id,
|
|
|
|
PieceCreation: now,
|
|
|
|
UplinkPieceHash: piecehash,
|
|
|
|
OrderLimit: &pb.OrderLimit{},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
2019-08-19 19:52:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
retainEnabled := retain.NewService(zaptest.NewLogger(t), store, retain.Config{
|
2019-08-28 21:35:25 +01:00
|
|
|
Status: retain.Enabled,
|
|
|
|
Concurrency: 1,
|
|
|
|
MaxTimeSkew: 0,
|
2019-08-19 19:52:47 +01:00
|
|
|
})
|
2019-08-28 21:35:25 +01:00
|
|
|
|
2019-08-19 19:52:47 +01:00
|
|
|
retainDisabled := retain.NewService(zaptest.NewLogger(t), store, retain.Config{
|
2019-08-28 21:35:25 +01:00
|
|
|
Status: retain.Disabled,
|
|
|
|
Concurrency: 1,
|
|
|
|
MaxTimeSkew: 0,
|
2019-08-19 19:52:47 +01:00
|
|
|
})
|
2019-08-28 21:35:25 +01:00
|
|
|
|
2019-08-19 19:52:47 +01:00
|
|
|
retainDebug := retain.NewService(zaptest.NewLogger(t), store, retain.Config{
|
2019-08-28 21:35:25 +01:00
|
|
|
Status: retain.Debug,
|
|
|
|
Concurrency: 1,
|
|
|
|
MaxTimeSkew: 0,
|
2019-08-19 19:52:47 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
// start the retain services
|
2019-08-28 21:35:25 +01:00
|
|
|
runCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2019-08-19 19:52:47 +01:00
|
|
|
var group errgroup.Group
|
|
|
|
group.Go(func() error {
|
2019-08-28 21:35:25 +01:00
|
|
|
return retainEnabled.Run(runCtx)
|
2019-08-19 19:52:47 +01:00
|
|
|
})
|
|
|
|
group.Go(func() error {
|
2019-08-28 21:35:25 +01:00
|
|
|
return retainDisabled.Run(runCtx)
|
2019-08-19 19:52:47 +01:00
|
|
|
})
|
|
|
|
group.Go(func() error {
|
2019-08-28 21:35:25 +01:00
|
|
|
return retainDebug.Run(runCtx)
|
2019-08-19 19:52:47 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
// expect that disabled and debug endpoints do not delete any pieces
|
|
|
|
req := retain.Request{
|
|
|
|
SatelliteID: satellite0.ID,
|
2019-11-20 16:28:49 +00:00
|
|
|
CreatedBefore: time.Now(),
|
2019-08-19 19:52:47 +01:00
|
|
|
Filter: filter,
|
|
|
|
}
|
|
|
|
queued := retainDisabled.Queue(req)
|
|
|
|
require.True(t, queued)
|
2019-08-28 21:35:25 +01:00
|
|
|
retainDisabled.TestWaitUntilEmpty()
|
2019-08-19 19:52:47 +01:00
|
|
|
|
|
|
|
queued = retainDebug.Queue(req)
|
|
|
|
require.True(t, queued)
|
2019-08-28 21:35:25 +01:00
|
|
|
retainDebug.TestWaitUntilEmpty()
|
2019-08-19 19:52:47 +01:00
|
|
|
|
2019-11-20 16:28:49 +00:00
|
|
|
satellite1Pieces, err := getAllPieceIDs(ctx, store, satellite1.ID)
|
2019-08-19 19:52:47 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces, len(satellite1Pieces))
|
|
|
|
|
2019-11-20 16:28:49 +00:00
|
|
|
satellite0Pieces, err := getAllPieceIDs(ctx, store, satellite0.ID)
|
2019-08-19 19:52:47 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces, len(satellite0Pieces))
|
|
|
|
|
|
|
|
// expect that enabled endpoint deletes the correct pieces
|
|
|
|
queued = retainEnabled.Queue(req)
|
|
|
|
require.True(t, queued)
|
2019-08-28 21:35:25 +01:00
|
|
|
retainEnabled.TestWaitUntilEmpty()
|
2019-08-19 19:52:47 +01:00
|
|
|
|
|
|
|
// check we have deleted nothing for satellite1
|
2019-11-20 16:28:49 +00:00
|
|
|
satellite1Pieces, err = getAllPieceIDs(ctx, store, satellite1.ID)
|
2019-08-19 19:52:47 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces, len(satellite1Pieces))
|
|
|
|
|
|
|
|
// check we did not delete recent pieces or retained pieces for satellite0
|
|
|
|
// also check that we deleted the correct pieces for satellite0
|
2019-11-20 16:28:49 +00:00
|
|
|
satellite0Pieces, err = getAllPieceIDs(ctx, store, satellite0.ID)
|
2019-08-19 19:52:47 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces-numOldPieces, len(satellite0Pieces))
|
|
|
|
|
2023-05-05 22:26:53 +01:00
|
|
|
for _, id := range pieceIDs[:numPiecesToKeep] {
|
|
|
|
require.Contains(t, satellite0Pieces, id, "piece should not have been deleted (not in bloom filter)")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range pieceIDs[numPiecesToKeep+numOldPieces:] {
|
|
|
|
require.Contains(t, satellite0Pieces, id, "piece should not have been deleted (recent piece)")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range pieceIDs[numPiecesToKeep : numPiecesToKeep+numOldPieces] {
|
|
|
|
require.NotContains(t, satellite0Pieces, id, "piece should have been deleted")
|
|
|
|
}
|
|
|
|
|
|
|
|
// shut down retain services
|
|
|
|
cancel()
|
|
|
|
err = group.Wait()
|
|
|
|
require.True(t, errs2.IsCanceled(err))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRetainPieces_lazyFilewalker(t *testing.T) {
|
|
|
|
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
blobs := db.Pieces()
|
|
|
|
v0PieceInfo := db.V0PieceInfo()
|
|
|
|
fw := pieces.NewFileWalker(log, blobs, v0PieceInfo)
|
|
|
|
cfg := pieces.DefaultConfig
|
|
|
|
cfg.EnableLazyFilewalker = true
|
|
|
|
|
|
|
|
lazyFwCfg := db.Config().LazyFilewalkerConfig()
|
|
|
|
lazyFw := lazyfilewalker.NewSupervisor(log, lazyFwCfg, "")
|
2023-05-24 22:26:33 +01:00
|
|
|
cmd := internalcmd.NewGCFilewalkerCmd()
|
|
|
|
cmd.Logger = log.Named("used-space-filewalker")
|
|
|
|
cmd.Ctx = ctx
|
|
|
|
lazyFw.TestingSetGCCmd(cmd)
|
2023-05-05 22:26:53 +01:00
|
|
|
store := pieces.NewStore(log, fw, lazyFw, blobs, v0PieceInfo, db.PieceExpirationDB(), db.PieceSpaceUsedDB(), cfg)
|
|
|
|
testStore := pieces.StoreForTest{Store: store}
|
|
|
|
|
|
|
|
const numPieces = 100
|
|
|
|
const numPiecesToKeep = 95
|
|
|
|
// pieces from numPiecesToKeep + numOldPieces to numPieces will
|
|
|
|
// have a recent timestamp and thus should not be deleted
|
|
|
|
const numOldPieces = 5
|
|
|
|
|
|
|
|
// for this test, we set the false positive rate very low, so we can test which pieces should be deleted with precision
|
|
|
|
filter := bloomfilter.NewOptimal(numPieces, 0.000000001)
|
|
|
|
|
|
|
|
pieceIDs := generateTestIDs(numPieces)
|
|
|
|
|
|
|
|
satellite0 := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
|
|
|
|
satellite1 := testidentity.MustPregeneratedSignedIdentity(2, storj.LatestIDVersion())
|
|
|
|
|
|
|
|
uplink := testidentity.MustPregeneratedSignedIdentity(3, storj.LatestIDVersion())
|
|
|
|
|
|
|
|
// keep pieceIDs[0 : numPiecesToKeep] (old + in filter)
|
|
|
|
// delete pieceIDs[numPiecesToKeep : numPiecesToKeep+numOldPieces] (old + not in filter)
|
|
|
|
// keep pieceIDs[numPiecesToKeep+numOldPieces : numPieces] (recent + not in filter)
|
|
|
|
// add all pieces to the node pieces info DB - but only count piece ids in filter
|
|
|
|
for index, id := range pieceIDs {
|
|
|
|
var formatVer blobstore.FormatVersion
|
|
|
|
if index%2 == 0 {
|
|
|
|
formatVer = filestore.FormatV0
|
|
|
|
} else {
|
|
|
|
formatVer = filestore.FormatV1
|
|
|
|
}
|
|
|
|
|
|
|
|
if index < numPiecesToKeep {
|
|
|
|
filter.Add(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
const size = 100 * memory.B
|
|
|
|
|
|
|
|
// Write file for all satellites
|
|
|
|
for _, satelliteID := range []storj.NodeID{satellite0.ID, satellite1.ID} {
|
|
|
|
now := time.Now()
|
|
|
|
w, err := testStore.WriterForFormatVersion(ctx, satelliteID, id, formatVer, pb.PieceHashAlgorithm_SHA256)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = w.Write(testrand.Bytes(size))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, w.Commit(ctx, &pb.PieceHeader{
|
|
|
|
CreationTime: now,
|
|
|
|
}))
|
|
|
|
|
|
|
|
piecehash, err := signing.SignPieceHash(ctx,
|
|
|
|
signing.SignerFromFullIdentity(uplink),
|
|
|
|
&pb.PieceHash{
|
|
|
|
PieceId: id,
|
|
|
|
Hash: []byte{0, 2, 3, 4, 5},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if formatVer == filestore.FormatV0 {
|
|
|
|
v0db := testStore.GetV0PieceInfoDBForTest()
|
|
|
|
err = v0db.Add(ctx, &pieces.Info{
|
|
|
|
SatelliteID: satelliteID,
|
|
|
|
PieceSize: 4,
|
|
|
|
PieceID: id,
|
|
|
|
PieceCreation: now,
|
|
|
|
UplinkPieceHash: piecehash,
|
|
|
|
OrderLimit: &pb.OrderLimit{},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
retainEnabled := retain.NewService(zaptest.NewLogger(t), store, retain.Config{
|
|
|
|
Status: retain.Enabled,
|
|
|
|
Concurrency: 1,
|
|
|
|
MaxTimeSkew: 0,
|
|
|
|
})
|
|
|
|
|
|
|
|
// start the retain services
|
|
|
|
runCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
var group errgroup.Group
|
|
|
|
group.Go(func() error {
|
|
|
|
return retainEnabled.Run(runCtx)
|
|
|
|
})
|
|
|
|
|
|
|
|
// expect that disabled and debug endpoints do not delete any pieces
|
|
|
|
req := retain.Request{
|
|
|
|
SatelliteID: satellite0.ID,
|
|
|
|
CreatedBefore: time.Now(),
|
|
|
|
Filter: filter,
|
|
|
|
}
|
|
|
|
|
|
|
|
// expect that enabled endpoint deletes the correct pieces
|
|
|
|
queued := retainEnabled.Queue(req)
|
|
|
|
require.True(t, queued)
|
|
|
|
retainEnabled.TestWaitUntilEmpty()
|
|
|
|
|
|
|
|
// check we have deleted nothing for satellite1
|
|
|
|
satellite1Pieces, err := getAllPieceIDs(ctx, store, satellite1.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces, len(satellite1Pieces))
|
|
|
|
|
|
|
|
// check we did not delete recent pieces or retained pieces for satellite0
|
|
|
|
// also check that we deleted the correct pieces for satellite0
|
|
|
|
satellite0Pieces, err := getAllPieceIDs(ctx, store, satellite0.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces-numOldPieces, len(satellite0Pieces))
|
|
|
|
|
2019-08-19 19:52:47 +01:00
|
|
|
for _, id := range pieceIDs[:numPiecesToKeep] {
|
|
|
|
require.Contains(t, satellite0Pieces, id, "piece should not have been deleted (not in bloom filter)")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range pieceIDs[numPiecesToKeep+numOldPieces:] {
|
|
|
|
require.Contains(t, satellite0Pieces, id, "piece should not have been deleted (recent piece)")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range pieceIDs[numPiecesToKeep : numPiecesToKeep+numOldPieces] {
|
|
|
|
require.NotContains(t, satellite0Pieces, id, "piece should have been deleted")
|
|
|
|
}
|
|
|
|
|
|
|
|
// shut down retain services
|
|
|
|
cancel()
|
|
|
|
err = group.Wait()
|
|
|
|
require.True(t, errs2.IsCanceled(err))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-11-20 16:28:49 +00:00
|
|
|
func getAllPieceIDs(ctx context.Context, store *pieces.Store, satellite storj.NodeID) (pieceIDs []storj.PieceID, err error) {
|
2019-08-19 19:52:47 +01:00
|
|
|
err = store.WalkSatellitePieces(ctx, satellite, func(pieceAccess pieces.StoredPieceAccess) error {
|
|
|
|
pieceIDs = append(pieceIDs, pieceAccess.PieceID())
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return pieceIDs, err
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// generateTestIDs generates n piece ids.
|
2019-08-19 19:52:47 +01:00
|
|
|
func generateTestIDs(n int) []storj.PieceID {
|
|
|
|
ids := make([]storj.PieceID, n)
|
|
|
|
for i := range ids {
|
|
|
|
ids[i] = testrand.PieceID()
|
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|