5ea1602ca5
* init implementation cache Change-Id: Ia54a1943e0707a77189bc5f4a9aaa8339c98d99a * one query to init cache Change-Id: I7c04b3ae104b553ae23fca372351a4328f632c66 * add monit tracking of cache Change-Id: I7d209e12c8f32d43708b23bf2126c5d5098e0a07 * add first test Change-Id: I0646a9349d457a9eb3920f7cd2d62fb72ffc3ab5 * add staleness to cache Change-Id: If002329bfdd53a4b200ad14dbd2ffc8b280aedb8 * add init test Change-Id: I3a3d0aa74cfac1d125fa93cb749316ed2a74d5b1 * fix comment Change-Id: I73353d00ccf0952b38c0f8ef7d1755c15cbfe9d9 * mv to nodeselection pkg Change-Id: I62487f768296c7a7b597fa398a4c42daf6e9c5b7 * add state to cache Change-Id: I081e77ec0e16706faee1a267de9a7fa643d6ac11 * add refresh concurrent test Change-Id: Idcba72508291099f280edc65355273c0acc3d3ce * add a few more tests Change-Id: I9422e9eaa22bf01c11f14bdb892ebcf7b3e5e5fb * fix tests, add min version to select allnodes Change-Id: I926f41d568951ad4ff70c6d4ceb87abb1e3e5009 * update comments Change-Id: I6ffe33e245ca65fb523c880cd72e63ce35776eb9 * fixes and rm Init Change-Id: Ifbe09b668978b5d9af09ca38cb080d02a2154cf4 * fix format Change-Id: I03cc217e28dc1839190c5c6dbdbb602c132a5a38
274 lines
7.1 KiB
Go
274 lines
7.1 KiB
Go
// Copyright (C) 2019 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package overlay_test
|
|
|
|
import (
|
|
"context"
|
|
"strconv"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/zap"
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
"storj.io/common/memory"
|
|
"storj.io/common/pb"
|
|
"storj.io/common/storj"
|
|
"storj.io/common/sync2"
|
|
"storj.io/common/testcontext"
|
|
"storj.io/storj/satellite"
|
|
"storj.io/storj/satellite/overlay"
|
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
|
)
|
|
|
|
var nodeCfg = overlay.NodeSelectionConfig{
|
|
AuditCount: 1,
|
|
UptimeCount: 1,
|
|
NewNodeFraction: 0.2,
|
|
MinimumVersion: "v1.0.0",
|
|
OnlineWindow: 4 * time.Hour,
|
|
DistinctIP: true,
|
|
MinimumDiskSpace: 100 * memory.MiB,
|
|
}
|
|
|
|
const (
|
|
// staleness is how stale the cache can be before we sync with
|
|
// the database to refresh the cache
|
|
|
|
// using a negative time will force the cache to refresh every time
|
|
lowStaleness = -time.Hour
|
|
|
|
// using a positive time will make it so that the cache is only refreshed when
|
|
// it hasn't been in the past hour
|
|
highStaleness = time.Hour
|
|
)
|
|
|
|
func TestRefresh(t *testing.T) {
|
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
|
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
db.OverlayCache(),
|
|
lowStaleness,
|
|
nodeCfg,
|
|
)
|
|
// the cache should have no nodes to start
|
|
err := cache.Refresh(ctx)
|
|
require.NoError(t, err)
|
|
reputable, new := cache.Size()
|
|
require.Equal(t, 0, reputable)
|
|
require.Equal(t, 0, new)
|
|
|
|
// add some nodes to the database
|
|
const nodeCount = 2
|
|
addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount)
|
|
|
|
// confirm nodes are in the cache once
|
|
err = cache.Refresh(ctx)
|
|
require.NoError(t, err)
|
|
reputable, new = cache.Size()
|
|
require.Equal(t, 2, new)
|
|
require.Equal(t, 0, reputable)
|
|
})
|
|
}
|
|
|
|
func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, count int) {
|
|
for i := 0; i < count; i++ {
|
|
subnet := strconv.Itoa(i) + ".1.2"
|
|
addr := subnet + ".3:8080"
|
|
n := overlay.NodeCheckInInfo{
|
|
NodeID: storj.NodeID{byte(i)},
|
|
Address: &pb.NodeAddress{
|
|
Address: addr,
|
|
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
|
},
|
|
LastNet: subnet,
|
|
LastIPPort: addr,
|
|
IsUp: true,
|
|
Capacity: &pb.NodeCapacity{
|
|
FreeDisk: 200 * memory.MiB.Int64(),
|
|
FreeBandwidth: 1 * memory.TB.Int64(),
|
|
},
|
|
Version: &pb.NodeVersion{
|
|
Version: "v1.1.0",
|
|
CommitHash: "",
|
|
Timestamp: time.Time{},
|
|
Release: true,
|
|
},
|
|
}
|
|
err := db.UpdateCheckIn(ctx, n, time.Now().UTC(), nodeCfg)
|
|
require.NoError(t, err)
|
|
}
|
|
}
|
|
|
|
type mockdb struct {
|
|
mu sync.Mutex
|
|
callCount int
|
|
}
|
|
|
|
func (m *mockdb) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*overlay.SelectedNode, err error) {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
sync2.Sleep(ctx, 500*time.Millisecond)
|
|
m.callCount++
|
|
return []*overlay.SelectedNode{}, []*overlay.SelectedNode{}, nil
|
|
}
|
|
func TestRefreshConcurrent(t *testing.T) {
|
|
ctx := testcontext.New(t)
|
|
defer ctx.Cleanup()
|
|
|
|
// concurrent cache.Refresh with high staleness, where high staleness means the
|
|
// cache should only be refreshed the first time we call cache.Refresh
|
|
mockDB := mockdb{}
|
|
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
&mockDB,
|
|
highStaleness,
|
|
nodeCfg,
|
|
)
|
|
|
|
var group errgroup.Group
|
|
group.Go(func() error {
|
|
return cache.Refresh(ctx)
|
|
})
|
|
group.Go(func() error {
|
|
return cache.Refresh(ctx)
|
|
})
|
|
err := group.Wait()
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, mockDB.callCount)
|
|
|
|
// concurrent cache.Refresh with low staleness, where low staleness
|
|
// means that the cache will refresh *every time* cache.Refresh is called
|
|
mockDB = mockdb{}
|
|
cache = overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
&mockDB,
|
|
lowStaleness,
|
|
nodeCfg,
|
|
)
|
|
group.Go(func() error {
|
|
return cache.Refresh(ctx)
|
|
})
|
|
group.Go(func() error {
|
|
return cache.Refresh(ctx)
|
|
})
|
|
err = group.Wait()
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, mockDB.callCount)
|
|
}
|
|
|
|
func TestGetNode(t *testing.T) {
|
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
|
var nodeCfg = overlay.NodeSelectionConfig{
|
|
AuditCount: 0,
|
|
UptimeCount: 0,
|
|
NewNodeFraction: 0.2,
|
|
MinimumVersion: "v1.0.0",
|
|
OnlineWindow: 4 * time.Hour,
|
|
DistinctIP: true,
|
|
MinimumDiskSpace: 100 * memory.MiB,
|
|
}
|
|
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
db.OverlayCache(),
|
|
lowStaleness,
|
|
nodeCfg,
|
|
)
|
|
// the cache should have no nodes to start
|
|
reputable, new := cache.Size()
|
|
require.Equal(t, 0, reputable)
|
|
require.Equal(t, 0, new)
|
|
|
|
// add some nodes to the database
|
|
const nodeCount = 4
|
|
addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount)
|
|
|
|
// confirm cache.GetNodes returns the correct nodes
|
|
selectedNodes, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{RequestedCount: 2})
|
|
require.NoError(t, err)
|
|
reputable, new = cache.Size()
|
|
require.Equal(t, 0, new)
|
|
require.Equal(t, 4, reputable)
|
|
require.Equal(t, 2, len(selectedNodes))
|
|
for _, node := range selectedNodes {
|
|
require.NotEqual(t, node.ID, "")
|
|
require.NotEqual(t, node.Address.Address, "")
|
|
require.NotEqual(t, node.LastIPPort, "")
|
|
require.NotEqual(t, node.LastNet, "")
|
|
require.NotEqual(t, node.LastNet, "")
|
|
}
|
|
})
|
|
}
|
|
func TestGetNodeConcurrent(t *testing.T) {
|
|
ctx := testcontext.New(t)
|
|
defer ctx.Cleanup()
|
|
|
|
// concurrent GetNodes with high staleness, where high staleness means the
|
|
// cache should only be refreshed the first time we call cache.GetNodes
|
|
mockDB := mockdb{}
|
|
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
&mockDB,
|
|
highStaleness,
|
|
nodeCfg,
|
|
)
|
|
|
|
var group errgroup.Group
|
|
group.Go(func() error {
|
|
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
|
return err
|
|
})
|
|
group.Go(func() error {
|
|
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
|
return err
|
|
})
|
|
err := group.Wait()
|
|
require.NoError(t, err)
|
|
// expect only one call to the db via cache.GetNodes
|
|
require.Equal(t, 1, mockDB.callCount)
|
|
|
|
// concurrent get nodes with low staleness, where low staleness means that
|
|
// the cache will refresh each time cache.GetNodes is called
|
|
mockDB = mockdb{}
|
|
cache = overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
&mockDB,
|
|
lowStaleness,
|
|
nodeCfg,
|
|
)
|
|
|
|
group.Go(func() error {
|
|
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
|
return err
|
|
})
|
|
group.Go(func() error {
|
|
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
|
return err
|
|
})
|
|
err = group.Wait()
|
|
require.NoError(t, err)
|
|
// expect two calls to the db via cache.GetNodes
|
|
require.Equal(t, 2, mockDB.callCount)
|
|
}
|
|
|
|
func TestGetNodeError(t *testing.T) {
|
|
ctx := testcontext.New(t)
|
|
defer ctx.Cleanup()
|
|
|
|
mockDB := mockdb{}
|
|
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
|
&mockDB,
|
|
highStaleness,
|
|
nodeCfg,
|
|
)
|
|
|
|
// there should be 0 nodes in the cache
|
|
reputable, new := cache.Size()
|
|
require.Equal(t, 0, reputable)
|
|
require.Equal(t, 0, new)
|
|
|
|
// since the cache has no nodes, we should not be able
|
|
// to get 2 storage nodes from it and we expect an error
|
|
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{RequestedCount: 2})
|
|
require.Error(t, err)
|
|
}
|