private/testcontext: ensure we call cleanup everywhere

Change-Id: Icb921144b651611d78f3736629430d05c3b8a7d3
This commit is contained in:
Egon Elbre 2019-12-17 16:16:38 +02:00
parent 7455ab771b
commit 7a36507a0a
14 changed files with 46 additions and 23 deletions

View File

@ -188,6 +188,7 @@ func TestNewClient(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
ident, err := testidentity.PregeneratedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
require.NotNil(t, ident)

View File

@ -48,6 +48,8 @@ func TestNewCA(t *testing.T) {
func TestFullCertificateAuthority_NewIdentity(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
ca, err := identity.NewCA(ctx, identity.NewCAOptions{
Difficulty: 12,
Concurrency: 4,
@ -70,6 +72,8 @@ func TestFullCertificateAuthority_NewIdentity(t *testing.T) {
func TestFullCertificateAuthority_Sign(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
caOpts := identity.NewCAOptions{
Difficulty: 12,
Concurrency: 4,

View File

@ -63,9 +63,10 @@ func TestHidden(t *testing.T) {
// Setup test config file
ctx := testcontext.New(t)
testConfigFile := ctx.File("testconfig.yaml")
defer ctx.Cleanup()
testConfigFile := ctx.File("testconfig.yaml")
// Run the command through the exec call.
Exec(cmd)

View File

@ -66,6 +66,7 @@ func TestUserEmailCase(t *testing.T) {
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
for _, testCase := range []struct {
email string
}{

View File

@ -22,6 +22,7 @@ func TestProgress(t *testing.T) {
// test basic graceful exit progress crud
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
geDB := db.GracefulExit()
@ -60,6 +61,7 @@ func TestTransferQueueItem(t *testing.T) {
// test basic graceful exit transfer queue crud
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
geDB := db.GracefulExit()

View File

@ -44,6 +44,7 @@ func TestBasicBucketOperations(t *testing.T) {
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
consoleDB := db.Console()
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1"})
require.NoError(t, err)
@ -91,6 +92,7 @@ func TestListBucketsAllAllowed(t *testing.T) {
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
consoleDB := db.Console()
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1"})
require.NoError(t, err)
@ -153,6 +155,7 @@ func TestListBucketsNotAllowed(t *testing.T) {
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
consoleDB := db.Console()
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1"})
require.NoError(t, err)

View File

@ -611,91 +611,90 @@ func TestCommitSegmentPointer(t *testing.T) {
// all tests needs to generate error
tests := []struct {
// defines how modify pointer before CommitSegment
Modify func(pointer *pb.Pointer, fullIDMap map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit)
Modify func(ctx context.Context, pointer *pb.Pointer, fullIDMap map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit)
ErrorMessage string
}{
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.ExpirationDate = pointer.ExpirationDate.Add(time.Second * 100)
},
ErrorMessage: "pointer expiration date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.Redundancy.MinReq += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.Redundancy.RepairThreshold += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.Redundancy.SuccessThreshold += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.Redundancy.Total += 100
},
// this error is triggered earlier then Create/Commit RS comparison
ErrorMessage: "invalid no order limit for piece",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.Redundancy.ErasureShareSize += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.Redundancy.Type = 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Type = pb.Pointer_INLINE
},
ErrorMessage: "pointer type is INLINE but remote segment is set",
},
{
// no piece hash removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.RemotePieces[0].Hash = nil
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
// set piece number to be out of range of limit slice
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.RemotePieces[0].PieceNum = int32(len(limits))
},
ErrorMessage: "invalid piece number",
},
{
// invalid timestamp removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.RemotePieces[0].Hash.Timestamp = time.Now().Add(-24 * time.Hour)
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
// invalid hash PieceID removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.RemotePieces[0].Hash.PieceId = storj.PieceID{1}
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
Modify: func(pointer *pb.Pointer, fullIDMap map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, fullIDMap map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.Remote.RemotePieces[0].Hash.PieceSize = 1
ctx := testcontext.New(t)
snFullID := fullIDMap[pointer.Remote.RemotePieces[0].NodeId]
require.NotNil(t, snFullID)
signer := signing.SignerFromFullIdentity(snFullID)
@ -706,24 +705,23 @@ func TestCommitSegmentPointer(t *testing.T) {
ErrorMessage: "all pieces needs to have the same size",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
pointer.SegmentSize = 100
},
ErrorMessage: "expected piece size is different from provided",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
// nil piece hash signature removes piece from pointer, not enough pieces for successful upload
pointer.Remote.RemotePieces[0].Hash.Signature = nil
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
// invalid piece hash signature removes piece from pointer, not enough pieces for successful upload
pointer.Remote.RemotePieces[0].Hash.Signature = nil
ctx := testcontext.New(t)
ca, err := testidentity.NewTestCA(ctx)
require.NoError(t, err)
badFullID, err := ca.NewIdentity()
@ -737,7 +735,7 @@ func TestCommitSegmentPointer(t *testing.T) {
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
firstPiece := pointer.Remote.RemotePieces[0]
pointer.Remote.RemotePieces[1] = firstPiece
pointer.Remote.RemotePieces[2] = firstPiece
@ -745,7 +743,7 @@ func TestCommitSegmentPointer(t *testing.T) {
ErrorMessage: "piece num 0 is duplicated",
},
{
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
Modify: func(ctx context.Context, pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
firstNodeID := pointer.Remote.RemotePieces[0].NodeId
pointer.Remote.RemotePieces[1].NodeId = firstNodeID
},
@ -769,7 +767,7 @@ func TestCommitSegmentPointer(t *testing.T) {
for i, test := range tests {
pointer, limits := runCreateSegment(ctx, t, metainfo, fullIDMap)
test.Modify(pointer, fullIDMap, limits)
test.Modify(ctx, pointer, fullIDMap, limits)
_, err = metainfo.CommitSegmentOld(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
require.Error(t, err, "Case #%v", i)

View File

@ -475,6 +475,7 @@ func testDistinctIPs(t *testing.T, ctx *testcontext.Context, planet *testplanet.
func TestAddrtoNetwork_Conversion(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
ip := "8.8.8.8:28967"
network, err := overlay.GetNetwork(ctx, ip)

View File

@ -151,6 +151,7 @@ func BenchmarkClientWrite(b *testing.B) {
// setup db
ctx := testcontext.New(b)
defer ctx.Cleanup()
dbfile := ctx.File("testbolt.db")
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
if err != nil {
@ -188,6 +189,7 @@ func BenchmarkClientNoSyncWrite(b *testing.B) {
// setup db
ctx := testcontext.New(b)
defer ctx.Cleanup()
dbfile := ctx.File("testbolt.db")
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
if err != nil {
@ -232,6 +234,7 @@ func BenchmarkClientBatchWrite(b *testing.B) {
// setup db
ctx := testcontext.New(b)
defer ctx.Cleanup()
dbfile := ctx.File("testbolt.db")
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
if err != nil {
@ -271,6 +274,7 @@ func BenchmarkClientBatchNoSyncWrite(b *testing.B) {
// setup db
ctx := testcontext.New(b)
defer ctx.Cleanup()
dbfile := ctx.File("testbolt.db")
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
if err != nil {

View File

@ -42,6 +42,7 @@ func RunBenchmarks(b *testing.B, store storage.KeyValueStore) {
ctx := testcontext.New(b)
defer ctx.Cleanup()
defer cleanupItems(b, ctx, store, items)
b.Run("Put", func(b *testing.B) {

View File

@ -175,6 +175,7 @@ func BenchmarkPathOperationsInLargeDb(b *testing.B, store storage.KeyValueStore)
ctx := testcontext.New(b)
defer ctx.Cleanup()
initStore(b, ctx, store)
doTest := func(name string, testFunc func(*testing.B, *testcontext.Context, storage.KeyValueStore)) {

View File

@ -28,6 +28,7 @@ func TestDBInit(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
spaceUsedDB := db.PieceSpaceUsedDB()
total, err := spaceUsedDB.GetTotal(ctx)
require.NoError(t, err)
@ -57,6 +58,7 @@ func TestCacheInit(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
spaceUsedDB := db.PieceSpaceUsedDB()
err := spaceUsedDB.Init(ctx)
require.NoError(t, err)
@ -197,6 +199,7 @@ func TestRecalculateCache(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
ID1 := storj.NodeID{1, 1}
cache := pieces.NewBlobsUsageCacheTest(nil,
tt.end,
@ -226,6 +229,7 @@ func TestRecalculateCache(t *testing.T) {
func TestRecalculateCacheMissed(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
ID1 := storj.NodeID{1}
ID2 := storj.NodeID{2}

View File

@ -33,6 +33,7 @@ func TestRetainPieces(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
store := pieces.NewStore(zaptest.NewLogger(t), db.Pieces(), db.V0PieceInfo(), db.PieceExpirationDB(), db.PieceSpaceUsedDB())
testStore := pieces.StoreForTest{Store: store}

View File

@ -75,6 +75,7 @@ func getData(rawDBs map[string]storagenodedb.DBContainer, schemas map[string]*db
func TestMigrate(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
log := zaptest.NewLogger(t)