satellite/{audit, accounting}: use reputation store in tests

Change-Id: I86a8ccf5dcee8d108196a9f67a476fe0ccbd8257
This commit is contained in:
Yingrong Zhao 2021-07-13 19:30:06 -04:00
parent e91574cee1
commit 58238d850c
4 changed files with 23 additions and 37 deletions

View File

@ -408,7 +408,7 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
// cfgstruct devDefaults. we need to make sure it's safe to remove
// these lines and then remove them.
config.Debug.Control = false
config.Overlay.AuditHistory.OfflineDQEnabled = false
config.Reputation.AuditHistory.OfflineDQEnabled = false
config.Server.Config.Extensions.Revocation = false
config.Orders.OrdersSemaphoreSize = 0
config.Checker.NodeFailureRate = 0

View File

@ -18,18 +18,11 @@ import (
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
)
func TestRollupNoDeletes(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
// 0 so that we can disqualify a node immediately by triggering a failed audit
config.Overlay.Node.AuditReputationLambda = 0
},
},
},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// In testplanet the setting config.Rollup.DeleteTallies defaults to false.
@ -118,8 +111,6 @@ func TestRollupDeletes(t *testing.T) {
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Rollup.DeleteTallies = true
config.Orders.Expiration = time.Hour
// 0 so that we can disqualify a node immediately by triggering a failed audit
config.Overlay.Node.AuditReputationLambda = 0
},
},
},
@ -370,23 +361,17 @@ func saveBWPhase3(ctx context.Context, ordersDB orders.DB, bwTotals map[storj.No
func dqNodes(ctx *testcontext.Context, planet *testplanet.Planet) (map[storj.NodeID]bool, error) {
dqed := make(map[storj.NodeID]bool)
var updateRequests []*overlay.UpdateRequest
for i, n := range planet.StorageNodes {
if i%2 == 0 {
continue
}
updateRequests = append(updateRequests, &overlay.UpdateRequest{
NodeID: n.ID(),
AuditOutcome: overlay.AuditFailure,
})
err := planet.Satellites[0].Overlay.DB.DisqualifyNode(ctx, n.ID())
if err != nil {
return nil, err
}
dqed[n.ID()] = true
}
_, err := planet.Satellites[0].Overlay.Service.BatchUpdateStats(ctx, updateRequests)
if err != nil {
return nil, err
}
for _, request := range updateRequests {
dqed[request.NodeID] = true
}
return dqed, nil
}

View File

@ -14,7 +14,7 @@ import (
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation"
)
func TestContainIncrementAndGet(t *testing.T) {
@ -132,7 +132,7 @@ func TestContainUpdateStats(t *testing.T) {
require.NoError(t, err)
// update node stats
_, err = planet.Satellites[0].Overlay.Service.BatchUpdateStats(ctx, []*overlay.UpdateRequest{{NodeID: info1.NodeID}})
err = planet.Satellites[0].Reputation.Service.ApplyAudit(ctx, info1.NodeID, reputation.AuditSuccess)
require.NoError(t, err)
// check contained flag set to false

View File

@ -137,7 +137,7 @@ func TestDisqualifiedNodesGetNoDownload(t *testing.T) {
segment := segments[0]
disqualifiedNode := segment.Pieces[0].StorageNode
err = satellitePeer.DB.OverlayCache().DisqualifyNode(ctx, disqualifiedNode)
err = satellitePeer.Reputation.Service.TestDisqualifyNode(ctx, disqualifiedNode)
require.NoError(t, err)
limits, _, err := satellitePeer.Orders.Service.CreateGetOrderLimits(ctx, bucket, segment, 0)
@ -163,7 +163,7 @@ func TestDisqualifiedNodesGetNoUpload(t *testing.T) {
disqualifiedNode := planet.StorageNodes[0]
satellitePeer.Audit.Worker.Loop.Pause()
err := satellitePeer.DB.OverlayCache().DisqualifyNode(ctx, disqualifiedNode.ID())
err := satellitePeer.Reputation.Service.TestDisqualifyNode(ctx, disqualifiedNode.ID())
require.NoError(t, err)
request := overlay.FindStorageNodesRequest{
@ -191,12 +191,22 @@ func TestDisqualifiedNodeRemainsDisqualified(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Overlay.Node.MinimumDiskSpace = 10 * memory.MB
config.Reputation.AuditLambda = 0 // forget about history
config.Reputation.AuditWeight = 1
config.Reputation.AuditDQ = 0 // make sure new reputation scores are larger than the DQ thresholds
config.Reputation.SuspensionGracePeriod = time.Hour
config.Reputation.SuspensionDQEnabled = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellitePeer := planet.Satellites[0]
satellitePeer.Audit.Worker.Loop.Pause()
disqualifiedNode := planet.StorageNodes[0]
err := satellitePeer.DB.OverlayCache().DisqualifyNode(ctx, disqualifiedNode.ID())
err := satellitePeer.Reputation.Service.TestDisqualifyNode(ctx, disqualifiedNode.ID())
require.NoError(t, err)
info := overlay.NodeCheckInInfo{
@ -216,16 +226,7 @@ func TestDisqualifiedNodeRemainsDisqualified(t *testing.T) {
require.NoError(t, err)
assert.True(t, isDisqualified(t, ctx, satellitePeer, disqualifiedNode.ID()))
_, err = satellitePeer.Overlay.Service.BatchUpdateStats(ctx, []*overlay.UpdateRequest{{
NodeID: disqualifiedNode.ID(),
AuditOutcome: overlay.AuditSuccess,
AuditLambda: 0, // forget about history
AuditWeight: 1,
AuditDQ: 0, // make sure new reputation scores are larger than the DQ thresholds
SuspensionGracePeriod: time.Hour,
SuspensionDQEnabled: true,
}})
err = satellitePeer.Reputation.Service.ApplyAudit(ctx, disqualifiedNode.ID(), reputation.AuditSuccess)
require.NoError(t, err)
assert.True(t, isDisqualified(t, ctx, satellitePeer, disqualifiedNode.ID()))
})