2022-11-22 16:59:04 +00:00
|
|
|
// Copyright (C) 2022 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package audit_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math/rand"
|
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
|
|
|
"storj.io/storj/private/testplanet"
|
|
|
|
"storj.io/storj/satellite/audit"
|
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
"storj.io/storj/satellite/overlay"
|
|
|
|
"storj.io/storj/satellite/reputation"
|
|
|
|
)
|
|
|
|
|
2022-11-23 15:24:30 +00:00
|
|
|
func TestContainInsertAndGet(t *testing.T) {
|
2022-11-22 16:59:04 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 2,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2022-11-23 15:24:30 +00:00
|
|
|
containment := planet.Satellites[0].DB.Containment()
|
2022-11-22 16:59:04 +00:00
|
|
|
|
|
|
|
input := &audit.PieceLocator{
|
|
|
|
StreamID: testrand.UUID(),
|
|
|
|
Position: metabase.SegmentPositionFromEncoded(uint64(rand.Int63())),
|
|
|
|
NodeID: planet.StorageNodes[0].ID(),
|
|
|
|
PieceNum: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := containment.Insert(ctx, input)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
output, err := containment.Get(ctx, input.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-11-22 23:18:01 +00:00
|
|
|
assert.Equal(t, *input, output.Locator)
|
2022-11-22 16:59:04 +00:00
|
|
|
assert.EqualValues(t, 0, output.ReverifyCount)
|
|
|
|
|
|
|
|
nodeID1 := planet.StorageNodes[1].ID()
|
|
|
|
_, err = containment.Get(ctx, nodeID1)
|
|
|
|
require.Error(t, err, audit.ErrContainedNotFound.New("%v", nodeID1))
|
|
|
|
assert.Truef(t, audit.ErrContainedNotFound.Has(err), "expected ErrContainedNotFound but got %+v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-23 15:24:30 +00:00
|
|
|
func TestContainIncrementPendingEntryExists(t *testing.T) {
|
2022-11-22 16:59:04 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2022-11-23 15:24:30 +00:00
|
|
|
containment := planet.Satellites[0].DB.Containment()
|
2022-11-22 16:59:04 +00:00
|
|
|
|
|
|
|
info1 := &audit.PieceLocator{
|
|
|
|
NodeID: planet.StorageNodes[0].ID(),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := containment.Insert(ctx, info1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// expect reverify count for an entry to be 0 after first IncrementPending call
|
|
|
|
pending, err := containment.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 0, pending.ReverifyCount)
|
|
|
|
|
|
|
|
// expect reverify count to be 0 still after second IncrementPending call
|
|
|
|
err = containment.Insert(ctx, info1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pending, err = containment.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 0, pending.ReverifyCount)
|
|
|
|
|
|
|
|
// after the job is selected for work, its ReverifyCount should be increased to 1
|
2023-02-09 23:03:20 +00:00
|
|
|
job, err := planet.Satellites[0].DB.ReverifyQueue().GetNextJob(ctx, 0)
|
2022-11-22 16:59:04 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, pending.Locator, job.Locator)
|
|
|
|
assert.EqualValues(t, 1, job.ReverifyCount)
|
|
|
|
|
|
|
|
pending, err = containment.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 1, pending.ReverifyCount)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-23 15:24:30 +00:00
|
|
|
func TestContainDelete(t *testing.T) {
|
2022-11-22 16:59:04 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2022-11-23 15:24:30 +00:00
|
|
|
containment := planet.Satellites[0].DB.Containment()
|
2022-11-22 16:59:04 +00:00
|
|
|
|
|
|
|
// add two reverification jobs for the same node
|
|
|
|
info1 := &audit.PieceLocator{
|
|
|
|
NodeID: planet.StorageNodes[0].ID(),
|
|
|
|
StreamID: testrand.UUID(),
|
|
|
|
}
|
|
|
|
info2 := &audit.PieceLocator{
|
|
|
|
NodeID: planet.StorageNodes[0].ID(),
|
|
|
|
StreamID: testrand.UUID(),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := containment.Insert(ctx, info1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = containment.Insert(ctx, info2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// 'get' will choose one of them (we don't really care which)
|
|
|
|
got, err := containment.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if got.Locator != *info1 {
|
|
|
|
require.Equal(t, *info2, got.Locator)
|
|
|
|
}
|
2022-11-22 23:18:01 +00:00
|
|
|
require.EqualValues(t, 0, got.ReverifyCount)
|
2022-11-22 16:59:04 +00:00
|
|
|
|
|
|
|
// delete one of the pending reverifications
|
|
|
|
wasDeleted, stillInContainment, err := containment.Delete(ctx, info2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, wasDeleted)
|
|
|
|
require.True(t, stillInContainment)
|
|
|
|
|
|
|
|
// 'get' now is sure to select info1
|
|
|
|
got, err = containment.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, *info1, got.Locator)
|
|
|
|
require.EqualValues(t, 0, got.ReverifyCount)
|
|
|
|
|
|
|
|
// delete the other pending reverification
|
|
|
|
wasDeleted, stillInContainment, err = containment.Delete(ctx, info1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, wasDeleted)
|
|
|
|
require.False(t, stillInContainment)
|
|
|
|
|
|
|
|
// try to get a pending reverification that isn't in the queue
|
|
|
|
_, err = containment.Get(ctx, info1.NodeID)
|
|
|
|
require.Error(t, err, audit.ErrContainedNotFound.New("%v", info1.NodeID))
|
|
|
|
require.True(t, audit.ErrContainedNotFound.Has(err))
|
|
|
|
|
|
|
|
// and try to delete that pending reverification that isn't in the queue
|
|
|
|
wasDeleted, _, err = containment.Delete(ctx, info1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.False(t, wasDeleted)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStats used to remove nodes from containment. It doesn't anymore.
|
|
|
|
// This is a sanity check.
|
2022-11-23 15:24:30 +00:00
|
|
|
func TestContainUpdateStats(t *testing.T) {
|
2022-11-22 16:59:04 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2022-11-23 15:24:30 +00:00
|
|
|
containment := planet.Satellites[0].DB.Containment()
|
2022-11-22 16:59:04 +00:00
|
|
|
cache := planet.Satellites[0].DB.OverlayCache()
|
|
|
|
|
|
|
|
info1 := &audit.PieceLocator{
|
|
|
|
NodeID: planet.StorageNodes[0].ID(),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := containment.Insert(ctx, info1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// update node stats
|
|
|
|
err = planet.Satellites[0].Reputation.Service.ApplyAudit(ctx, info1.NodeID, overlay.ReputationStatus{}, reputation.AuditSuccess)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// check contained flag set to false
|
|
|
|
node, err := cache.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.False(t, node.Contained)
|
|
|
|
|
|
|
|
// get pending audit
|
|
|
|
_, err = containment.Get(ctx, info1.NodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|