satellite/overlay: add selected node cache (#3846)
* init implementation cache Change-Id: Ia54a1943e0707a77189bc5f4a9aaa8339c98d99a * one query to init cache Change-Id: I7c04b3ae104b553ae23fca372351a4328f632c66 * add monit tracking of cache Change-Id: I7d209e12c8f32d43708b23bf2126c5d5098e0a07 * add first test Change-Id: I0646a9349d457a9eb3920f7cd2d62fb72ffc3ab5 * add staleness to cache Change-Id: If002329bfdd53a4b200ad14dbd2ffc8b280aedb8 * add init test Change-Id: I3a3d0aa74cfac1d125fa93cb749316ed2a74d5b1 * fix comment Change-Id: I73353d00ccf0952b38c0f8ef7d1755c15cbfe9d9 * mv to nodeselection pkg Change-Id: I62487f768296c7a7b597fa398a4c42daf6e9c5b7 * add state to cache Change-Id: I081e77ec0e16706faee1a267de9a7fa643d6ac11 * add refresh concurrent test Change-Id: Idcba72508291099f280edc65355273c0acc3d3ce * add a few more tests Change-Id: I9422e9eaa22bf01c11f14bdb892ebcf7b3e5e5fb * fix tests, add min version to select allnodes Change-Id: I926f41d568951ad4ff70c6d4ceb87abb1e3e5009 * update comments Change-Id: I6ffe33e245ca65fb523c880cd72e63ce35776eb9 * fixes and rm Init Change-Id: Ifbe09b668978b5d9af09ca38cb080d02a2154cf4 * fix format Change-Id: I03cc217e28dc1839190c5c6dbdbb602c132a5a38
This commit is contained in:
parent
d7794a4851
commit
5ea1602ca5
195
satellite/overlay/nodeselectioncache.go
Normal file
195
satellite/overlay/nodeselectioncache.go
Normal file
@ -0,0 +1,195 @@
|
||||
// Copyright (C) 2019 Storj Labs, Incache.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// CacheDB implements the database for overlay node selection cache
|
||||
//
|
||||
// architecture: Database
|
||||
type CacheDB interface {
|
||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error)
|
||||
}
|
||||
|
||||
// CacheConfig is a configuration for overlay node selection cache.
|
||||
type CacheConfig struct {
|
||||
Staleness time.Duration `help:"how stale the node selection cache can be" releaseDefault:"3m" devDefault:"5m"`
|
||||
}
|
||||
|
||||
// NodeSelectionCache keeps a list of all the storage nodes that are qualified to store data
|
||||
// We organize the nodes by if they are reputable or a new node on the network.
|
||||
// The cache will sync with the nodes table in the database and get refreshed once the staleness time has past.
|
||||
type NodeSelectionCache struct {
|
||||
log *zap.Logger
|
||||
db CacheDB
|
||||
selectionConfig NodeSelectionConfig
|
||||
staleness time.Duration
|
||||
|
||||
mu sync.RWMutex
|
||||
data *state
|
||||
}
|
||||
|
||||
type state struct {
|
||||
lastRefresh time.Time
|
||||
|
||||
mu sync.RWMutex
|
||||
reputableNodes []*SelectedNode
|
||||
newNodes []*SelectedNode
|
||||
}
|
||||
|
||||
// NewNodeSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data
|
||||
func NewNodeSelectionCache(log *zap.Logger, db CacheDB, staleness time.Duration, config NodeSelectionConfig) *NodeSelectionCache {
|
||||
return &NodeSelectionCache{
|
||||
log: log,
|
||||
db: db,
|
||||
staleness: staleness,
|
||||
selectionConfig: config,
|
||||
data: &state{},
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh populates the cache with all of the reputableNodes and newNode nodes
|
||||
// This method is useful for tests
|
||||
func (cache *NodeSelectionCache) Refresh(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = cache.refresh(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// refresh calls out to the database and refreshes the cache with the most up-to-date
|
||||
// data from the nodes table, then sets time that the last refresh occurred so we know when
|
||||
// to refresh again in the future
|
||||
func (cache *NodeSelectionCache) refresh(ctx context.Context) (cachData *state, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
if cache.data != nil && time.Since(cache.data.lastRefresh) <= cache.staleness {
|
||||
return cache.data, nil
|
||||
}
|
||||
|
||||
reputableNodes, newNodes, err := cache.db.SelectAllStorageNodesUpload(ctx, cache.selectionConfig)
|
||||
if err != nil {
|
||||
return cache.data, err
|
||||
}
|
||||
cache.data = &state{
|
||||
lastRefresh: time.Now().UTC(),
|
||||
reputableNodes: reputableNodes,
|
||||
newNodes: newNodes,
|
||||
}
|
||||
|
||||
mon.IntVal("refresh_cache_size_reputable").Observe(int64(len(reputableNodes)))
|
||||
mon.IntVal("refresh_cache_size_new").Observe(int64(len(newNodes)))
|
||||
return cache.data, nil
|
||||
}
|
||||
|
||||
// GetNodes selects nodes from the cache that will be used to upload a file.
|
||||
// Every node selected will be from a distinct network.
|
||||
// If the cache hasn't been refreshed recently it will do so first.
|
||||
func (cache *NodeSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
cache.mu.RLock()
|
||||
cacheData := cache.data
|
||||
cache.mu.RUnlock()
|
||||
|
||||
// if the cache is stale, then refresh it before we get nodes
|
||||
if time.Since(cacheData.lastRefresh) > cache.staleness {
|
||||
cacheData, err = cache.refresh(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cacheData.GetNodes(ctx, req, cache.selectionConfig.NewNodeFraction)
|
||||
}
|
||||
|
||||
// GetNodes selects nodes from the cache that will be used to upload a file.
|
||||
// If there are new nodes in the cache, we will return a small fraction of those
|
||||
// and then return mostly reputable nodes
|
||||
func (cacheData *state) GetNodes(ctx context.Context, req FindStorageNodesRequest, newNodeFraction float64) (_ []*SelectedNode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
cacheData.mu.RLock()
|
||||
defer cacheData.mu.RUnlock()
|
||||
|
||||
// how many reputableNodes versus newNode nodes should be selected
|
||||
totalcount := req.RequestedCount
|
||||
newNodeCount := int(float64(req.RequestedCount) * newNodeFraction)
|
||||
|
||||
var selectedNodeResults = []*SelectedNode{}
|
||||
var distinctNetworks = map[string]struct{}{}
|
||||
|
||||
// Get a random selection of new nodes out of the cache first so that if there aren't
|
||||
// enough new nodes on the network, we can fall back to using reputable nodes instead
|
||||
randomIndexes := rand.Perm(len(cacheData.newNodes))
|
||||
for _, idx := range randomIndexes {
|
||||
currNode := cacheData.newNodes[idx]
|
||||
if _, ok := distinctNetworks[currNode.LastNet]; ok {
|
||||
continue
|
||||
}
|
||||
for _, excludedID := range req.ExcludedIDs {
|
||||
if excludedID == currNode.ID {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
selectedNodeResults = append(selectedNodeResults, currNode)
|
||||
distinctNetworks[currNode.LastNet] = struct{}{}
|
||||
if len(selectedNodeResults) >= newNodeCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
randomIndexes = rand.Perm(len(cacheData.reputableNodes))
|
||||
for _, idx := range randomIndexes {
|
||||
currNode := cacheData.reputableNodes[idx]
|
||||
|
||||
// don't select a node if we've already selected another node from the same network
|
||||
if _, ok := distinctNetworks[currNode.LastNet]; ok {
|
||||
continue
|
||||
}
|
||||
// don't select a node listed in the excluded list
|
||||
for _, excludedID := range req.ExcludedIDs {
|
||||
if excludedID == currNode.ID {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
selectedNodeResults = append(selectedNodeResults, currNode)
|
||||
distinctNetworks[currNode.LastNet] = struct{}{}
|
||||
if len(selectedNodeResults) >= totalcount {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(selectedNodeResults) < totalcount {
|
||||
return nil, Error.New("unable to select enough nodes from node selection cache, needed: %d, actual: %d",
|
||||
totalcount, len(selectedNodeResults),
|
||||
)
|
||||
}
|
||||
return selectedNodeResults, nil
|
||||
}
|
||||
|
||||
// Size returns how many reputable nodes and new nodes are in the cache
|
||||
func (cache *NodeSelectionCache) Size() (reputableNodeCount int, newNodeCount int) {
|
||||
cache.mu.RLock()
|
||||
cacheData := cache.data
|
||||
cache.mu.RUnlock()
|
||||
return cacheData.size()
|
||||
}
|
||||
|
||||
func (cacheData *state) size() (reputableNodeCount int, newNodeCount int) {
|
||||
cacheData.mu.RLock()
|
||||
defer cacheData.mu.RUnlock()
|
||||
return len(cacheData.reputableNodes), len(cacheData.newNodes)
|
||||
}
|
273
satellite/overlay/nodeselectioncache_test.go
Normal file
273
satellite/overlay/nodeselectioncache_test.go
Normal file
@ -0,0 +1,273 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package overlay_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/sync2"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
)
|
||||
|
||||
var nodeCfg = overlay.NodeSelectionConfig{
|
||||
AuditCount: 1,
|
||||
UptimeCount: 1,
|
||||
NewNodeFraction: 0.2,
|
||||
MinimumVersion: "v1.0.0",
|
||||
OnlineWindow: 4 * time.Hour,
|
||||
DistinctIP: true,
|
||||
MinimumDiskSpace: 100 * memory.MiB,
|
||||
}
|
||||
|
||||
const (
|
||||
// staleness is how stale the cache can be before we sync with
|
||||
// the database to refresh the cache
|
||||
|
||||
// using a negative time will force the cache to refresh every time
|
||||
lowStaleness = -time.Hour
|
||||
|
||||
// using a positive time will make it so that the cache is only refreshed when
|
||||
// it hasn't been in the past hour
|
||||
highStaleness = time.Hour
|
||||
)
|
||||
|
||||
func TestRefresh(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
lowStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
// the cache should have no nodes to start
|
||||
err := cache.Refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
reputable, new := cache.Size()
|
||||
require.Equal(t, 0, reputable)
|
||||
require.Equal(t, 0, new)
|
||||
|
||||
// add some nodes to the database
|
||||
const nodeCount = 2
|
||||
addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount)
|
||||
|
||||
// confirm nodes are in the cache once
|
||||
err = cache.Refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
reputable, new = cache.Size()
|
||||
require.Equal(t, 2, new)
|
||||
require.Equal(t, 0, reputable)
|
||||
})
|
||||
}
|
||||
|
||||
func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
subnet := strconv.Itoa(i) + ".1.2"
|
||||
addr := subnet + ".3:8080"
|
||||
n := overlay.NodeCheckInInfo{
|
||||
NodeID: storj.NodeID{byte(i)},
|
||||
Address: &pb.NodeAddress{
|
||||
Address: addr,
|
||||
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
||||
},
|
||||
LastNet: subnet,
|
||||
LastIPPort: addr,
|
||||
IsUp: true,
|
||||
Capacity: &pb.NodeCapacity{
|
||||
FreeDisk: 200 * memory.MiB.Int64(),
|
||||
FreeBandwidth: 1 * memory.TB.Int64(),
|
||||
},
|
||||
Version: &pb.NodeVersion{
|
||||
Version: "v1.1.0",
|
||||
CommitHash: "",
|
||||
Timestamp: time.Time{},
|
||||
Release: true,
|
||||
},
|
||||
}
|
||||
err := db.UpdateCheckIn(ctx, n, time.Now().UTC(), nodeCfg)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
type mockdb struct {
|
||||
mu sync.Mutex
|
||||
callCount int
|
||||
}
|
||||
|
||||
func (m *mockdb) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*overlay.SelectedNode, err error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
sync2.Sleep(ctx, 500*time.Millisecond)
|
||||
m.callCount++
|
||||
return []*overlay.SelectedNode{}, []*overlay.SelectedNode{}, nil
|
||||
}
|
||||
func TestRefreshConcurrent(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
// concurrent cache.Refresh with high staleness, where high staleness means the
|
||||
// cache should only be refreshed the first time we call cache.Refresh
|
||||
mockDB := mockdb{}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
return cache.Refresh(ctx)
|
||||
})
|
||||
group.Go(func() error {
|
||||
return cache.Refresh(ctx)
|
||||
})
|
||||
err := group.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, mockDB.callCount)
|
||||
|
||||
// concurrent cache.Refresh with low staleness, where low staleness
|
||||
// means that the cache will refresh *every time* cache.Refresh is called
|
||||
mockDB = mockdb{}
|
||||
cache = overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
lowStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
group.Go(func() error {
|
||||
return cache.Refresh(ctx)
|
||||
})
|
||||
group.Go(func() error {
|
||||
return cache.Refresh(ctx)
|
||||
})
|
||||
err = group.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 2, mockDB.callCount)
|
||||
}
|
||||
|
||||
func TestGetNode(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
var nodeCfg = overlay.NodeSelectionConfig{
|
||||
AuditCount: 0,
|
||||
UptimeCount: 0,
|
||||
NewNodeFraction: 0.2,
|
||||
MinimumVersion: "v1.0.0",
|
||||
OnlineWindow: 4 * time.Hour,
|
||||
DistinctIP: true,
|
||||
MinimumDiskSpace: 100 * memory.MiB,
|
||||
}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
lowStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
// the cache should have no nodes to start
|
||||
reputable, new := cache.Size()
|
||||
require.Equal(t, 0, reputable)
|
||||
require.Equal(t, 0, new)
|
||||
|
||||
// add some nodes to the database
|
||||
const nodeCount = 4
|
||||
addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount)
|
||||
|
||||
// confirm cache.GetNodes returns the correct nodes
|
||||
selectedNodes, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{RequestedCount: 2})
|
||||
require.NoError(t, err)
|
||||
reputable, new = cache.Size()
|
||||
require.Equal(t, 0, new)
|
||||
require.Equal(t, 4, reputable)
|
||||
require.Equal(t, 2, len(selectedNodes))
|
||||
for _, node := range selectedNodes {
|
||||
require.NotEqual(t, node.ID, "")
|
||||
require.NotEqual(t, node.Address.Address, "")
|
||||
require.NotEqual(t, node.LastIPPort, "")
|
||||
require.NotEqual(t, node.LastNet, "")
|
||||
require.NotEqual(t, node.LastNet, "")
|
||||
}
|
||||
})
|
||||
}
|
||||
func TestGetNodeConcurrent(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
// concurrent GetNodes with high staleness, where high staleness means the
|
||||
// cache should only be refreshed the first time we call cache.GetNodes
|
||||
mockDB := mockdb{}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
||||
return err
|
||||
})
|
||||
group.Go(func() error {
|
||||
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
||||
return err
|
||||
})
|
||||
err := group.Wait()
|
||||
require.NoError(t, err)
|
||||
// expect only one call to the db via cache.GetNodes
|
||||
require.Equal(t, 1, mockDB.callCount)
|
||||
|
||||
// concurrent get nodes with low staleness, where low staleness means that
|
||||
// the cache will refresh each time cache.GetNodes is called
|
||||
mockDB = mockdb{}
|
||||
cache = overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
lowStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
|
||||
group.Go(func() error {
|
||||
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
||||
return err
|
||||
})
|
||||
group.Go(func() error {
|
||||
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{})
|
||||
return err
|
||||
})
|
||||
err = group.Wait()
|
||||
require.NoError(t, err)
|
||||
// expect two calls to the db via cache.GetNodes
|
||||
require.Equal(t, 2, mockDB.callCount)
|
||||
}
|
||||
|
||||
func TestGetNodeError(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
mockDB := mockdb{}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
nodeCfg,
|
||||
)
|
||||
|
||||
// there should be 0 nodes in the cache
|
||||
reputable, new := cache.Size()
|
||||
require.Equal(t, 0, reputable)
|
||||
require.Equal(t, 0, new)
|
||||
|
||||
// since the cache has no nodes, we should not be able
|
||||
// to get 2 storage nodes from it and we expect an error
|
||||
_, err := cache.GetNodes(ctx, overlay.FindStorageNodesRequest{RequestedCount: 2})
|
||||
require.Error(t, err)
|
||||
}
|
@ -40,6 +40,9 @@ type DB interface {
|
||||
GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (map[storj.NodeID]*SelectedNode, error)
|
||||
// SelectStorageNodes looks up nodes based on criteria
|
||||
SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*SelectedNode, error)
|
||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error)
|
||||
|
||||
// Get looks up the node by nodeID
|
||||
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
|
||||
// KnownOffline filters a set of nodes to offline nodes
|
||||
|
@ -33,6 +33,73 @@ type overlaycache struct {
|
||||
db *satelliteDB
|
||||
}
|
||||
|
||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||
func (cache *overlaycache) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*overlay.SelectedNode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
query := `
|
||||
SELECT id, address, last_net, last_ip_port, (total_audit_count < $1 OR total_uptime_count < $2) as isnew
|
||||
FROM nodes
|
||||
WHERE disqualified IS NULL
|
||||
AND suspended IS NULL
|
||||
AND exit_initiated_at IS NULL
|
||||
AND type = $3
|
||||
AND free_disk >= $4
|
||||
AND last_contact_success > $5
|
||||
`
|
||||
args := []interface{}{
|
||||
// $1, $2
|
||||
selectionCfg.AuditCount, selectionCfg.UptimeCount,
|
||||
// $3
|
||||
int(pb.NodeType_STORAGE),
|
||||
// $4
|
||||
selectionCfg.MinimumDiskSpace.Int64(),
|
||||
// $5
|
||||
time.Now().Add(-selectionCfg.OnlineWindow),
|
||||
}
|
||||
if selectionCfg.MinimumVersion != "" {
|
||||
version, err := version.NewSemVer(selectionCfg.MinimumVersion)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
query += `AND (major > $6 OR (major = $7 AND (minor > $8 OR (minor = $9 AND patch >= $10)))) AND release`
|
||||
args = append(args,
|
||||
// $6 - $10
|
||||
version.Major, version.Major, version.Minor, version.Minor, version.Patch,
|
||||
)
|
||||
}
|
||||
|
||||
rows, err := cache.db.Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
var reputableNodes []*overlay.SelectedNode
|
||||
var newNodes []*overlay.SelectedNode
|
||||
for rows.Next() {
|
||||
var node overlay.SelectedNode
|
||||
node.Address = &pb.NodeAddress{}
|
||||
var lastIPPort sql.NullString
|
||||
var isnew bool
|
||||
err = rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort, &isnew)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if lastIPPort.Valid {
|
||||
node.LastIPPort = lastIPPort.String
|
||||
}
|
||||
|
||||
if isnew {
|
||||
newNodes = append(newNodes, &node)
|
||||
continue
|
||||
}
|
||||
reputableNodes = append(reputableNodes, &node)
|
||||
}
|
||||
|
||||
return reputableNodes, newNodes, Error.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// GetNodesNetwork returns the /24 subnet for each storage node, order is not guaranteed.
|
||||
func (cache *overlaycache) GetNodesNetwork(ctx context.Context, nodeIDs []storj.NodeID) (nodeNets []string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
Loading…
Reference in New Issue
Block a user