satellite/overlay: add random test for node selection from cache

Change-Id: I57b57db919b5afb3fc81176e078ac1e3a873565a
This commit is contained in:
Jessica Grebenschikov 2020-04-24 12:34:53 -07:00 committed by Jess G
parent d27cb60f81
commit 1ecbfc453f

View File

@ -13,8 +13,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
@ -244,6 +246,125 @@ func TestRandomizedSelection(t *testing.T) {
}
})
}
func TestRandomizedSelectionCache(t *testing.T) {
t.Parallel()
totalNodes := 1000
selectIterations := 100
numNodesToSelect := 100
minSelectCount := 3
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Overlay.NodeSelectionCache.Staleness = -time.Hour
config.Overlay.Node.NewNodeFraction = 0.5 // select 50% new nodes
config.Overlay.Node.AuditCount = 1
config.Overlay.Node.UptimeCount = 1
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
overlaydb := satellite.Overlay.DB
nodeSelectionCache := satellite.Overlay.Service.SelectionCache
allIDs := make(storj.NodeIDList, totalNodes)
nodeCounts := make(map[storj.NodeID]int)
expectedNewCount := int(float64(totalNodes) * satellite.Config.Overlay.Node.NewNodeFraction)
// put nodes in cache
for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID()
address := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
n := overlay.NodeCheckInInfo{
NodeID: newID,
Address: &pb.NodeAddress{
Address: address,
Transport: pb.NodeTransport_TCP_TLS_GRPC,
},
LastNet: lastNet,
LastIPPort: address,
IsUp: true,
Capacity: &pb.NodeCapacity{
FreeDisk: 200 * memory.MiB.Int64(),
FreeBandwidth: 1 * memory.TB.Int64(),
},
Version: &pb.NodeVersion{
Version: "v1.1.0",
CommitHash: "",
Timestamp: time.Time{},
Release: true,
},
}
defaults := overlay.NodeSelectionConfig{}
err := overlaydb.UpdateCheckIn(ctx, n, time.Now().UTC(), defaults)
require.NoError(t, err)
if i%2 == 0 { // make half of nodes "new" and half "vetted"
_, err = overlaydb.UpdateStats(ctx, &overlay.UpdateRequest{
NodeID: newID,
IsUp: true,
AuditOutcome: overlay.AuditSuccess,
AuditLambda: 1,
AuditWeight: 1,
AuditDQ: 0.5,
})
require.NoError(t, err)
}
allIDs[i] = newID
nodeCounts[newID] = 0
}
err := nodeSelectionCache.Refresh(ctx)
require.NoError(t, err)
reputable, new := nodeSelectionCache.Size()
require.Equal(t, totalNodes-expectedNewCount, reputable)
require.Equal(t, expectedNewCount, new)
// select numNodesToSelect nodes selectIterations times
for i := 0; i < selectIterations; i++ {
var nodes []*overlay.SelectedNode
var err error
req := overlay.FindStorageNodesRequest{
RequestedCount: numNodesToSelect,
}
nodes, err = nodeSelectionCache.GetNodes(ctx, req)
require.NoError(t, err)
require.Len(t, nodes, numNodesToSelect)
for _, node := range nodes {
nodeCounts[node.ID]++
}
}
belowThreshold := 0
table := []int{}
// expect that each node has been selected at least minSelectCount times
for _, id := range allIDs {
count := nodeCounts[id]
if count < minSelectCount {
belowThreshold++
}
if count >= len(table) {
table = append(table, make([]int, count-len(table)+1)...)
}
table[count]++
}
if belowThreshold > totalNodes*1/100 {
t.Errorf("%d out of %d were below threshold %d", belowThreshold, totalNodes, minSelectCount)
for count, amount := range table {
t.Logf("%3d = %4d", count, amount)
}
}
})
}
func TestNodeInfo(t *testing.T) {
testplanet.Run(t, testplanet.Config{