2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-11-19 20:39:25 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package overlay_test
|
|
|
|
|
|
|
|
import (
|
2020-02-10 20:59:29 +00:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
"fmt"
|
2020-02-10 20:59:29 +00:00
|
|
|
"net"
|
2019-05-30 18:35:04 +01:00
|
|
|
"runtime"
|
2019-11-06 21:38:52 +00:00
|
|
|
"strings"
|
2018-11-19 20:39:25 +00:00
|
|
|
"testing"
|
2019-10-08 20:03:38 +01:00
|
|
|
"time"
|
2018-11-19 20:39:25 +00:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
2019-05-01 14:45:52 +01:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-01-31 18:49:00 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-11-06 21:38:52 +00:00
|
|
|
"go.uber.org/zap"
|
2023-06-26 09:25:13 +01:00
|
|
|
"golang.org/x/exp/slices"
|
2018-11-19 20:39:25 +00:00
|
|
|
|
2020-02-10 20:59:29 +00:00
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc/rpcpeer"
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/testplanet"
|
2019-11-06 21:38:52 +00:00
|
|
|
"storj.io/storj/satellite"
|
2023-07-07 09:31:58 +01:00
|
|
|
"storj.io/storj/satellite/nodeselection"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2021-07-07 20:20:23 +01:00
|
|
|
"storj.io/storj/satellite/reputation"
|
2018-11-19 20:39:25 +00:00
|
|
|
)
|
|
|
|
|
2020-02-10 20:59:29 +00:00
|
|
|
func TestMinimumDiskSpace(t *testing.T) {
|
2020-04-24 17:11:04 +01:00
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
2020-02-10 20:59:29 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
2020-04-24 17:11:04 +01:00
|
|
|
UniqueIPCount: 2,
|
2020-02-10 20:59:29 +00:00
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.Node.MinimumDiskSpace = 10 * memory.MB
|
2022-06-28 12:53:39 +01:00
|
|
|
config.Overlay.NodeSelectionCache.Staleness = lowStaleness
|
2021-06-15 17:32:12 +01:00
|
|
|
config.Overlay.NodeCheckInWaitPeriod = 0
|
2020-02-10 20:59:29 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2020-04-24 17:11:04 +01:00
|
|
|
saOverlay := planet.Satellites[0].Overlay
|
|
|
|
nodeConfig := planet.Satellites[0].Config.Overlay.Node
|
|
|
|
|
2020-02-10 20:59:29 +00:00
|
|
|
node0 := planet.StorageNodes[0]
|
2020-02-19 18:32:53 +00:00
|
|
|
node0.Contact.Chore.Pause(ctx)
|
2020-05-20 14:40:25 +01:00
|
|
|
nodeInfo := node0.Contact.Service.Local()
|
2020-02-10 20:59:29 +00:00
|
|
|
ident := node0.Identity
|
|
|
|
peer := rpcpeer.Peer{
|
|
|
|
Addr: &net.TCPAddr{
|
2020-05-20 14:40:25 +01:00
|
|
|
IP: net.ParseIP(nodeInfo.Address),
|
2020-02-10 20:59:29 +00:00
|
|
|
Port: 5,
|
|
|
|
},
|
|
|
|
State: tls.ConnectionState{
|
|
|
|
PeerCertificates: []*x509.Certificate{ident.Leaf, ident.CA},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
peerCtx := rpcpeer.NewContext(ctx, &peer)
|
|
|
|
|
|
|
|
// report disk space less than minimum
|
|
|
|
_, err := planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
|
2020-05-20 14:40:25 +01:00
|
|
|
Address: nodeInfo.Address,
|
|
|
|
Version: &nodeInfo.Version,
|
2020-02-10 20:59:29 +00:00
|
|
|
Capacity: &pb.NodeCapacity{
|
2020-02-12 21:19:42 +00:00
|
|
|
FreeDisk: 9 * memory.MB.Int64(),
|
2020-02-10 20:59:29 +00:00
|
|
|
},
|
2020-05-20 14:40:25 +01:00
|
|
|
Operator: &nodeInfo.Operator,
|
2020-02-10 20:59:29 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-24 17:11:04 +01:00
|
|
|
req := overlay.FindStorageNodesRequest{
|
2020-05-07 12:54:48 +01:00
|
|
|
RequestedCount: 2,
|
2020-04-24 17:11:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// request 2 nodes, expect failure from not enough nodes
|
2020-05-06 14:05:31 +01:00
|
|
|
n1, err := saOverlay.Service.FindStorageNodesForUpload(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, overlay.ErrNotEnoughNodes.Has(err))
|
2021-01-28 11:46:18 +00:00
|
|
|
n2, err := saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
2020-02-10 20:59:29 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, overlay.ErrNotEnoughNodes.Has(err))
|
2020-04-24 17:11:04 +01:00
|
|
|
require.Equal(t, len(n2), len(n1))
|
|
|
|
n3, err := saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Equal(t, len(n3), len(n1))
|
2020-02-10 20:59:29 +00:00
|
|
|
|
|
|
|
// report disk space greater than minimum
|
|
|
|
_, err = planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
|
2020-05-20 14:40:25 +01:00
|
|
|
Address: nodeInfo.Address,
|
|
|
|
Version: &nodeInfo.Version,
|
2020-02-10 20:59:29 +00:00
|
|
|
Capacity: &pb.NodeCapacity{
|
2020-02-12 21:19:42 +00:00
|
|
|
FreeDisk: 11 * memory.MB.Int64(),
|
2020-02-10 20:59:29 +00:00
|
|
|
},
|
2020-05-20 14:40:25 +01:00
|
|
|
Operator: &nodeInfo.Operator,
|
2020-02-10 20:59:29 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// request 2 nodes, expect success
|
2020-05-06 14:05:31 +01:00
|
|
|
n1, err = planet.Satellites[0].Overlay.Service.FindStorageNodesForUpload(ctx, req)
|
2020-02-10 20:59:29 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.Equal(t, 2, len(n1))
|
|
|
|
n2, err = saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(n1), len(n2))
|
2021-01-28 11:46:18 +00:00
|
|
|
n3, err = saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(n1), len(n3))
|
2020-02-10 20:59:29 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-06-26 09:25:13 +01:00
|
|
|
func TestOnlineOffline(t *testing.T) {
|
2019-05-01 14:45:52 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
service := satellite.Overlay.Service
|
|
|
|
|
2023-06-26 09:25:13 +01:00
|
|
|
online, offline, err := service.KnownReliable(ctx, []storj.NodeID{
|
2019-05-01 14:45:52 +01:00
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-06-26 09:25:13 +01:00
|
|
|
require.Empty(t, offline)
|
|
|
|
require.Len(t, online, 1)
|
2019-05-01 14:45:52 +01:00
|
|
|
|
2023-06-26 09:25:13 +01:00
|
|
|
online, offline, err = service.KnownReliable(ctx, []storj.NodeID{
|
2019-05-01 14:45:52 +01:00
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
planet.StorageNodes[1].ID(),
|
|
|
|
planet.StorageNodes[2].ID(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-06-26 09:25:13 +01:00
|
|
|
require.Empty(t, offline)
|
|
|
|
require.Len(t, online, 3)
|
2019-05-01 14:45:52 +01:00
|
|
|
|
2023-06-26 09:25:13 +01:00
|
|
|
unreliableNodeID := storj.NodeID{1, 2, 3, 4}
|
|
|
|
online, offline, err = service.KnownReliable(ctx, []storj.NodeID{
|
2019-05-01 14:45:52 +01:00
|
|
|
planet.StorageNodes[0].ID(),
|
2023-06-26 09:25:13 +01:00
|
|
|
unreliableNodeID,
|
2019-05-01 14:45:52 +01:00
|
|
|
planet.StorageNodes[2].ID(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-06-26 09:25:13 +01:00
|
|
|
require.Empty(t, offline)
|
|
|
|
require.Len(t, online, 2)
|
|
|
|
|
2023-07-07 09:31:58 +01:00
|
|
|
require.False(t, slices.ContainsFunc(online, func(node nodeselection.SelectedNode) bool {
|
2023-06-26 09:25:13 +01:00
|
|
|
return node.ID == unreliableNodeID
|
|
|
|
}))
|
2023-07-07 09:31:58 +01:00
|
|
|
require.False(t, slices.ContainsFunc(offline, func(node nodeselection.SelectedNode) bool {
|
2023-06-26 09:25:13 +01:00
|
|
|
return node.ID == unreliableNodeID
|
|
|
|
}))
|
2019-05-01 14:45:52 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-04-07 19:22:12 +01:00
|
|
|
func TestEnsureMinimumRequested(t *testing.T) {
|
2020-04-29 18:58:41 +01:00
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
|
|
|
|
2020-04-07 19:22:12 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
|
2020-04-24 17:11:04 +01:00
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
UniqueIPCount: 5,
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.Node.MinimumDiskSpace = 10 * memory.MB
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.InitialAlpha = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditLambda = 1
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditLambda = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditWeight = 1
|
|
|
|
config.Reputation.AuditDQ = 0.5
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditDQ = 0.5
|
2022-05-07 20:04:12 +01:00
|
|
|
config.Reputation.AuditCount = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditHistory = testAuditHistoryConfig()
|
2020-04-24 17:11:04 +01:00
|
|
|
},
|
|
|
|
},
|
2020-04-07 19:22:12 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
// pause chores that might update node data
|
2023-04-24 11:07:16 +01:00
|
|
|
satellite.RangedLoop.RangedLoop.Service.Loop.Stop()
|
2020-04-07 19:22:12 +01:00
|
|
|
satellite.Repair.Repairer.Loop.Pause()
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
node.Contact.Chore.Pause(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
service := satellite.Overlay.Service
|
2021-07-07 20:20:23 +01:00
|
|
|
repService := satellite.Reputation.Service
|
2020-04-07 19:22:12 +01:00
|
|
|
|
|
|
|
reputable := map[storj.NodeID]bool{}
|
|
|
|
|
2023-07-07 09:31:58 +01:00
|
|
|
countReputable := func(selected []*nodeselection.SelectedNode) (count int) {
|
2020-04-07 19:22:12 +01:00
|
|
|
for _, n := range selected {
|
|
|
|
if reputable[n.ID] {
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
|
|
|
// update half of nodes to be reputable
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
node := planet.StorageNodes[i]
|
|
|
|
reputable[node.ID()] = true
|
2021-11-08 20:51:04 +00:00
|
|
|
err := repService.ApplyAudit(ctx, node.ID(), overlay.ReputationStatus{}, reputation.AuditSuccess)
|
2020-04-07 19:22:12 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2022-05-07 20:04:12 +01:00
|
|
|
err := repService.TestFlushAllNodeInfo(ctx)
|
|
|
|
require.NoError(t, err)
|
2020-04-07 19:22:12 +01:00
|
|
|
|
|
|
|
t.Run("request 5, where 1 new", func(t *testing.T) {
|
|
|
|
requestedCount, newCount := 5, 1
|
|
|
|
newNodeFraction := float64(newCount) / float64(requestedCount)
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
preferences := testNodeSelectionConfig(newNodeFraction)
|
2020-04-24 17:11:04 +01:00
|
|
|
req := overlay.FindStorageNodesRequest{
|
2020-04-07 19:22:12 +01:00
|
|
|
RequestedCount: requestedCount,
|
2020-04-24 17:11:04 +01:00
|
|
|
}
|
|
|
|
nodes, err := service.FindStorageNodesWithPreferences(ctx, req, &preferences)
|
2020-04-07 19:22:12 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, requestedCount)
|
|
|
|
require.Equal(t, requestedCount-newCount, countReputable(nodes))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("request 5, all new", func(t *testing.T) {
|
|
|
|
requestedCount, newCount := 5, 5
|
|
|
|
newNodeFraction := float64(newCount) / float64(requestedCount)
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
preferences := testNodeSelectionConfig(newNodeFraction)
|
2020-04-24 17:11:04 +01:00
|
|
|
req := overlay.FindStorageNodesRequest{
|
2020-04-07 19:22:12 +01:00
|
|
|
RequestedCount: requestedCount,
|
2020-04-24 17:11:04 +01:00
|
|
|
}
|
|
|
|
nodes, err := service.FindStorageNodesWithPreferences(ctx, req, &preferences)
|
2020-04-07 19:22:12 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, requestedCount)
|
|
|
|
require.Equal(t, 0, countReputable(nodes))
|
2020-04-24 17:11:04 +01:00
|
|
|
|
2021-01-28 11:46:18 +00:00
|
|
|
n2, err := service.UploadSelectionCache.GetNodes(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, requestedCount, len(n2))
|
2020-04-07 19:22:12 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
// update all of them to be reputable
|
|
|
|
for i := 5; i < 10; i++ {
|
|
|
|
node := planet.StorageNodes[i]
|
|
|
|
reputable[node.ID()] = true
|
2021-11-08 20:51:04 +00:00
|
|
|
err := repService.ApplyAudit(ctx, node.ID(), overlay.ReputationStatus{}, reputation.AuditSuccess)
|
2020-04-07 19:22:12 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("no new nodes", func(t *testing.T) {
|
|
|
|
requestedCount, newCount := 5, 1.0
|
|
|
|
newNodeFraction := newCount / float64(requestedCount)
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
preferences := testNodeSelectionConfig(newNodeFraction)
|
|
|
|
satellite.Config.Overlay.Node = testNodeSelectionConfig(newNodeFraction)
|
2020-04-07 19:22:12 +01:00
|
|
|
|
|
|
|
nodes, err := service.FindStorageNodesWithPreferences(ctx, overlay.FindStorageNodesRequest{
|
|
|
|
RequestedCount: requestedCount,
|
|
|
|
}, &preferences)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, requestedCount)
|
|
|
|
// all of them should be reputable because there are no new nodes
|
|
|
|
require.Equal(t, 5, countReputable(nodes))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-01-31 18:49:00 +00:00
|
|
|
func TestNodeSelection(t *testing.T) {
|
2019-02-06 13:32:42 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
2020-07-08 15:28:49 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
2020-05-20 20:57:53 +01:00
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditHistory = testAuditHistoryConfig()
|
2020-05-20 20:57:53 +01:00
|
|
|
},
|
|
|
|
},
|
2019-07-31 18:21:06 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
2020-07-08 15:28:49 +01:00
|
|
|
service := satellite.Overlay.Service
|
|
|
|
errNotEnoughNodes := &overlay.ErrNotEnoughNodes
|
|
|
|
tests := []struct {
|
|
|
|
description string
|
|
|
|
requestCount int
|
|
|
|
newNodeFraction float64
|
|
|
|
reputableNodes int
|
|
|
|
expectedCount int
|
|
|
|
shouldFailWith *errs.Class
|
|
|
|
exclude func() (excludedNodes []storj.NodeID)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
description: "all reputable nodes, only reputable nodes requested",
|
|
|
|
requestCount: 6,
|
|
|
|
newNodeFraction: 0,
|
|
|
|
reputableNodes: 6,
|
|
|
|
expectedCount: 6,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "all reputable nodes, up to 100% new nodes requested",
|
|
|
|
requestCount: 5,
|
|
|
|
newNodeFraction: 1,
|
|
|
|
reputableNodes: 6,
|
|
|
|
expectedCount: 5,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "3 reputable and 3 new nodes, 6 reputable nodes requested, not enough reputable nodes",
|
|
|
|
requestCount: 6,
|
|
|
|
newNodeFraction: 0,
|
|
|
|
reputableNodes: 3,
|
|
|
|
expectedCount: 3,
|
|
|
|
shouldFailWith: errNotEnoughNodes,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "50-50 reputable and new nodes, reputable and new nodes requested, not enough reputable nodes",
|
|
|
|
requestCount: 5,
|
|
|
|
newNodeFraction: 0.2,
|
|
|
|
reputableNodes: 3,
|
|
|
|
expectedCount: 4,
|
|
|
|
shouldFailWith: errNotEnoughNodes,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "all new nodes except one, reputable and new nodes requested (happy path)",
|
|
|
|
requestCount: 2,
|
|
|
|
newNodeFraction: 0.5,
|
|
|
|
reputableNodes: 1,
|
|
|
|
expectedCount: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "all new nodes except one, reputable and new nodes requested (not happy path)",
|
|
|
|
requestCount: 4,
|
|
|
|
newNodeFraction: 0.5,
|
|
|
|
reputableNodes: 1,
|
|
|
|
expectedCount: 3,
|
|
|
|
shouldFailWith: errNotEnoughNodes,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "all new nodes, reputable and new nodes requested",
|
|
|
|
requestCount: 6,
|
|
|
|
newNodeFraction: 1,
|
|
|
|
reputableNodes: 0,
|
|
|
|
expectedCount: 6,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "excluded node ids",
|
|
|
|
requestCount: 6,
|
|
|
|
newNodeFraction: 0,
|
|
|
|
reputableNodes: 6,
|
|
|
|
expectedCount: 1,
|
|
|
|
shouldFailWith: errNotEnoughNodes,
|
|
|
|
exclude: func() (excludedNodes []storj.NodeID) {
|
|
|
|
for _, storageNode := range planet.StorageNodes[:5] {
|
|
|
|
excludedNodes = append(excludedNodes, storageNode.ID())
|
|
|
|
}
|
|
|
|
return excludedNodes
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2019-02-06 13:32:42 +00:00
|
|
|
|
2020-07-08 15:28:49 +01:00
|
|
|
for _, tt := range tests {
|
|
|
|
t.Log(tt.description)
|
|
|
|
var excludedNodes []storj.NodeID
|
|
|
|
if tt.exclude != nil {
|
|
|
|
excludedNodes = tt.exclude()
|
|
|
|
}
|
|
|
|
for i, node := range planet.StorageNodes {
|
|
|
|
if i < tt.reputableNodes {
|
|
|
|
_, err := satellite.Overlay.Service.TestVetNode(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
} else {
|
|
|
|
err := satellite.Overlay.Service.TestUnvetNode(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
config := testNodeSelectionConfig(tt.newNodeFraction)
|
2020-07-08 15:28:49 +01:00
|
|
|
response, err := service.FindStorageNodesWithPreferences(ctx, overlay.FindStorageNodesRequest{RequestedCount: tt.requestCount, ExcludedIDs: excludedNodes}, &config)
|
|
|
|
if tt.shouldFailWith != nil {
|
|
|
|
require.Error(t, err)
|
|
|
|
assert.True(t, tt.shouldFailWith.Has(err))
|
|
|
|
} else {
|
2019-07-31 18:21:06 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2020-07-08 15:28:49 +01:00
|
|
|
if len(excludedNodes) > 0 {
|
|
|
|
for _, n := range response {
|
|
|
|
for _, m := range excludedNodes {
|
|
|
|
require.NotEqual(t, n.ID, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Equal(t, tt.expectedCount, len(response))
|
2019-02-06 13:32:42 +00:00
|
|
|
}
|
2019-07-31 18:21:06 +01:00
|
|
|
})
|
|
|
|
}
|
2019-02-06 13:32:42 +00:00
|
|
|
|
2019-10-08 20:03:38 +01:00
|
|
|
func TestNodeSelectionGracefulExit(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
|
2021-07-07 20:20:23 +01:00
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.Node.MinimumDiskSpace = 10 * memory.MB
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.InitialAlpha = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditLambda = 1
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditLambda = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditWeight = 1
|
|
|
|
config.Reputation.AuditDQ = 0.5
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditDQ = 0.5
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditHistory = testAuditHistoryConfig()
|
2022-06-14 16:53:04 +01:00
|
|
|
config.Reputation.AuditCount = 5 // need 5 audits to be vetted
|
2021-07-07 20:20:23 +01:00
|
|
|
},
|
|
|
|
},
|
2019-10-08 20:03:38 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
exitingNodes := make(map[storj.NodeID]bool)
|
|
|
|
|
|
|
|
// This sets audit counts of 0, 1, 2, 3, ... 9
|
|
|
|
// so that we can fine-tune how many nodes are considered new or reputable
|
|
|
|
// by modifying the audit count cutoff passed into FindStorageNodesWithPreferences
|
|
|
|
// nodes at indices 0, 2, 4, 6, 8 are gracefully exiting
|
|
|
|
for i, node := range planet.StorageNodes {
|
|
|
|
for k := 0; k < i; k++ {
|
2021-11-08 20:51:04 +00:00
|
|
|
err := satellite.Reputation.Service.ApplyAudit(ctx, node.ID(), overlay.ReputationStatus{}, reputation.AuditSuccess)
|
2019-10-08 20:03:38 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// make half the nodes gracefully exiting
|
|
|
|
if i%2 == 0 {
|
|
|
|
_, err := satellite.DB.OverlayCache().UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
|
|
|
NodeID: node.ID(),
|
|
|
|
ExitInitiatedAt: time.Now(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
exitingNodes[node.ID()] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-14 16:53:04 +01:00
|
|
|
// There are now 5 new nodes, and 5 reputable (vetted) nodes. 3 of the
|
|
|
|
// new nodes are gracefully exiting, and 2 of the reputable nodes.
|
2019-10-08 20:03:38 +01:00
|
|
|
type test struct {
|
|
|
|
Preferences overlay.NodeSelectionConfig
|
|
|
|
ExcludeCount int
|
|
|
|
RequestCount int
|
|
|
|
ExpectedCount int
|
|
|
|
ShouldFailWith *errs.Class
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, tt := range []test{
|
|
|
|
{ // reputable and new nodes, happy path
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
Preferences: testNodeSelectionConfig(0.5),
|
2019-10-08 20:03:38 +01:00
|
|
|
RequestCount: 5,
|
2022-06-14 16:53:04 +01:00
|
|
|
ExpectedCount: 5, // 2 new + 3 vetted
|
2019-10-08 20:03:38 +01:00
|
|
|
},
|
|
|
|
{ // all reputable nodes, happy path
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
Preferences: testNodeSelectionConfig(0),
|
2022-06-14 16:53:04 +01:00
|
|
|
RequestCount: 3,
|
|
|
|
ExpectedCount: 3,
|
2019-10-08 20:03:38 +01:00
|
|
|
},
|
|
|
|
{ // all new nodes, happy path
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
Preferences: testNodeSelectionConfig(1),
|
2022-06-14 16:53:04 +01:00
|
|
|
RequestCount: 2,
|
|
|
|
ExpectedCount: 2,
|
2019-10-08 20:03:38 +01:00
|
|
|
},
|
|
|
|
{ // reputable and new nodes, requested too many
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
Preferences: testNodeSelectionConfig(0.5),
|
2019-10-08 20:03:38 +01:00
|
|
|
RequestCount: 10,
|
2022-06-14 16:53:04 +01:00
|
|
|
ExpectedCount: 5, // 2 new + 3 vetted
|
2019-10-08 20:03:38 +01:00
|
|
|
ShouldFailWith: &overlay.ErrNotEnoughNodes,
|
|
|
|
},
|
|
|
|
{ // all reputable nodes, requested too many
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
Preferences: testNodeSelectionConfig(0),
|
2019-10-08 20:03:38 +01:00
|
|
|
RequestCount: 10,
|
2022-06-14 16:53:04 +01:00
|
|
|
ExpectedCount: 3,
|
2019-10-08 20:03:38 +01:00
|
|
|
ShouldFailWith: &overlay.ErrNotEnoughNodes,
|
|
|
|
},
|
|
|
|
{ // all new nodes, requested too many
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
Preferences: testNodeSelectionConfig(1),
|
2019-10-08 20:03:38 +01:00
|
|
|
RequestCount: 10,
|
2022-06-14 16:53:04 +01:00
|
|
|
ExpectedCount: 2,
|
2019-10-08 20:03:38 +01:00
|
|
|
ShouldFailWith: &overlay.ErrNotEnoughNodes,
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
t.Logf("#%2d. %+v", i, tt)
|
|
|
|
|
2021-10-15 14:41:48 +01:00
|
|
|
response, err := satellite.Overlay.Service.FindStorageNodesWithPreferences(ctx,
|
|
|
|
overlay.FindStorageNodesRequest{
|
|
|
|
RequestedCount: tt.RequestCount,
|
|
|
|
AsOfSystemInterval: -time.Microsecond,
|
|
|
|
}, &tt.Preferences)
|
2019-10-08 20:03:38 +01:00
|
|
|
|
|
|
|
t.Log(len(response), err)
|
|
|
|
if tt.ShouldFailWith != nil {
|
|
|
|
assert.Error(t, err)
|
|
|
|
assert.True(t, tt.ShouldFailWith.Has(err))
|
|
|
|
} else {
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Equal(t, tt.ExpectedCount, len(response))
|
|
|
|
|
|
|
|
// expect no exiting nodes in selection
|
|
|
|
for _, node := range response {
|
2020-03-28 14:56:05 +00:00
|
|
|
assert.False(t, exitingNodes[node.ID])
|
2019-10-08 20:03:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-03-06 22:04:23 +00:00
|
|
|
func TestFindStorageNodesDistinctNetworks(t *testing.T) {
|
2019-11-06 21:38:52 +00:00
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
// will create 3 storage nodes with same IP; 2 will have unique
|
|
|
|
UniqueIPCount: 2,
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.Node.DistinctIP = true
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
// select one of the nodes that shares an IP with others to exclude
|
|
|
|
var excludedNodes storj.NodeIDList
|
|
|
|
addrCounts := make(map[string]int)
|
|
|
|
var excludedNodeAddr string
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
addrNoPort := strings.Split(node.Addr(), ":")[0]
|
|
|
|
if addrCounts[addrNoPort] > 0 && len(excludedNodes) == 0 {
|
|
|
|
excludedNodes = append(excludedNodes, node.ID())
|
|
|
|
break
|
|
|
|
}
|
|
|
|
addrCounts[addrNoPort]++
|
|
|
|
}
|
|
|
|
require.Len(t, excludedNodes, 1)
|
|
|
|
res, err := satellite.Overlay.Service.Get(ctx, excludedNodes[0])
|
|
|
|
require.NoError(t, err)
|
2020-03-06 22:04:23 +00:00
|
|
|
excludedNodeAddr = res.LastIPPort
|
2019-11-06 21:38:52 +00:00
|
|
|
|
|
|
|
req := overlay.FindStorageNodesRequest{
|
2020-05-07 12:54:48 +01:00
|
|
|
RequestedCount: 2,
|
|
|
|
ExcludedIDs: excludedNodes,
|
2019-11-06 21:38:52 +00:00
|
|
|
}
|
2020-05-06 14:05:31 +01:00
|
|
|
nodes, err := satellite.Overlay.Service.FindStorageNodesForUpload(ctx, req)
|
2019-11-06 21:38:52 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, 2)
|
2020-03-06 22:04:23 +00:00
|
|
|
require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort)
|
|
|
|
require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr)
|
|
|
|
require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr)
|
2021-01-28 11:46:18 +00:00
|
|
|
n2, err := satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, n2, 2)
|
|
|
|
require.NotEqual(t, n2[0].LastIPPort, n2[1].LastIPPort)
|
|
|
|
require.NotEqual(t, n2[0].LastIPPort, excludedNodeAddr)
|
|
|
|
require.NotEqual(t, n2[1].LastIPPort, excludedNodeAddr)
|
|
|
|
n3, err := satellite.Overlay.Service.FindStorageNodesWithPreferences(ctx, req, &satellite.Config.Overlay.Node)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, n3, 2)
|
|
|
|
require.NotEqual(t, n3[0].LastIPPort, n3[1].LastIPPort)
|
|
|
|
require.NotEqual(t, n3[0].LastIPPort, excludedNodeAddr)
|
|
|
|
require.NotEqual(t, n3[1].LastIPPort, excludedNodeAddr)
|
2019-11-06 21:38:52 +00:00
|
|
|
|
|
|
|
req = overlay.FindStorageNodesRequest{
|
2020-05-07 12:54:48 +01:00
|
|
|
RequestedCount: 4,
|
|
|
|
ExcludedIDs: excludedNodes,
|
2019-11-06 21:38:52 +00:00
|
|
|
}
|
2020-05-06 14:05:31 +01:00
|
|
|
n, err := satellite.Overlay.Service.FindStorageNodesForUpload(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
n1, err := satellite.Overlay.Service.FindStorageNodesWithPreferences(ctx, req, &satellite.Config.Overlay.Node)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Equal(t, len(n), len(n1))
|
2021-01-28 11:46:18 +00:00
|
|
|
n2, err = satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
2019-11-06 21:38:52 +00:00
|
|
|
require.Error(t, err)
|
2020-05-18 16:10:17 +01:00
|
|
|
require.Equal(t, len(n1), len(n2))
|
2019-11-06 21:38:52 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-01-10 20:23:39 +00:00
|
|
|
func TestSelectNewStorageNodesExcludedIPs(t *testing.T) {
|
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
// will create 2 storage nodes with same IP; 2 will have unique
|
|
|
|
UniqueIPCount: 2,
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.Node.DistinctIP = true
|
2020-03-18 21:16:13 +00:00
|
|
|
config.Overlay.Node.NewNodeFraction = 1
|
2020-01-10 20:23:39 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
// select one of the nodes that shares an IP with others to exclude
|
|
|
|
var excludedNodes storj.NodeIDList
|
|
|
|
addrCounts := make(map[string]int)
|
|
|
|
var excludedNodeAddr string
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
addrNoPort := strings.Split(node.Addr(), ":")[0]
|
|
|
|
if addrCounts[addrNoPort] > 0 {
|
|
|
|
excludedNodes = append(excludedNodes, node.ID())
|
|
|
|
break
|
|
|
|
}
|
|
|
|
addrCounts[addrNoPort]++
|
|
|
|
}
|
|
|
|
require.Len(t, excludedNodes, 1)
|
|
|
|
res, err := satellite.Overlay.Service.Get(ctx, excludedNodes[0])
|
|
|
|
require.NoError(t, err)
|
2020-03-06 22:04:23 +00:00
|
|
|
excludedNodeAddr = res.LastIPPort
|
2020-01-10 20:23:39 +00:00
|
|
|
|
|
|
|
req := overlay.FindStorageNodesRequest{
|
2020-05-07 12:54:48 +01:00
|
|
|
RequestedCount: 2,
|
|
|
|
ExcludedIDs: excludedNodes,
|
2020-01-10 20:23:39 +00:00
|
|
|
}
|
2020-05-06 14:05:31 +01:00
|
|
|
nodes, err := satellite.Overlay.Service.FindStorageNodesForUpload(ctx, req)
|
2020-01-10 20:23:39 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, 2)
|
2020-03-06 22:04:23 +00:00
|
|
|
require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort)
|
|
|
|
require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr)
|
|
|
|
require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr)
|
2021-01-28 11:46:18 +00:00
|
|
|
n2, err := satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, n2, 2)
|
|
|
|
require.NotEqual(t, n2[0].LastIPPort, n2[1].LastIPPort)
|
|
|
|
require.NotEqual(t, n2[0].LastIPPort, excludedNodeAddr)
|
|
|
|
require.NotEqual(t, n2[1].LastIPPort, excludedNodeAddr)
|
|
|
|
n3, err := satellite.Overlay.Service.FindStorageNodesWithPreferences(ctx, req, &satellite.Config.Overlay.Node)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, n3, 2)
|
|
|
|
require.NotEqual(t, n3[0].LastIPPort, n3[1].LastIPPort)
|
|
|
|
require.NotEqual(t, n3[0].LastIPPort, excludedNodeAddr)
|
|
|
|
require.NotEqual(t, n3[1].LastIPPort, excludedNodeAddr)
|
2020-01-10 20:23:39 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
func TestDistinctIPs(t *testing.T) {
|
2019-05-30 18:35:04 +01:00
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
2019-05-22 21:06:27 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
2019-11-06 21:38:52 +00:00
|
|
|
UniqueIPCount: 3,
|
2021-07-07 20:20:23 +01:00
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.InitialAlpha = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditLambda = 1
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditLambda = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditWeight = 1
|
|
|
|
config.Reputation.AuditDQ = 0.5
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditDQ = 0.5
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditHistory = testAuditHistoryConfig()
|
2022-05-07 20:04:12 +01:00
|
|
|
config.Reputation.AuditCount = 1
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
config.Overlay.Node.DistinctIP = true
|
2021-07-07 20:20:23 +01:00
|
|
|
},
|
2019-05-22 21:06:27 +01:00
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
2020-07-08 15:28:49 +01:00
|
|
|
// Vets nodes[8] and nodes[9].
|
2019-05-22 21:06:27 +01:00
|
|
|
for i := 9; i > 7; i-- {
|
2021-11-08 20:51:04 +00:00
|
|
|
err := satellite.Reputation.Service.ApplyAudit(ctx, planet.StorageNodes[i].ID(), overlay.ReputationStatus{}, reputation.AuditSuccess)
|
2019-05-22 21:06:27 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
2019-07-31 18:21:06 +01:00
|
|
|
testDistinctIPs(t, ctx, planet)
|
|
|
|
})
|
|
|
|
}
|
2019-05-22 21:06:27 +01:00
|
|
|
|
2019-07-31 18:21:06 +01:00
|
|
|
func TestDistinctIPsWithBatch(t *testing.T) {
|
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
2020-07-08 15:28:49 +01:00
|
|
|
UniqueIPCount: 3, // creates 3 additional unique ip addresses, totaling to 4 IPs
|
2020-05-20 20:57:53 +01:00
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.UpdateStatsBatchSize = 1
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.InitialAlpha = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditLambda = 1
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditLambda = 1
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditWeight = 1
|
|
|
|
config.Reputation.AuditDQ = 0.5
|
2022-08-11 15:17:12 +01:00
|
|
|
config.Reputation.UnknownAuditDQ = 0.5
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditHistory = testAuditHistoryConfig()
|
2022-05-07 20:04:12 +01:00
|
|
|
config.Reputation.AuditCount = 1
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
config.Overlay.Node.DistinctIP = true
|
2020-05-20 20:57:53 +01:00
|
|
|
},
|
2019-07-31 18:21:06 +01:00
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
2020-07-08 15:28:49 +01:00
|
|
|
// Vets nodes[8] and nodes[9].
|
2019-07-31 18:21:06 +01:00
|
|
|
for i := 9; i > 7; i-- {
|
2021-11-08 20:51:04 +00:00
|
|
|
err := satellite.Reputation.Service.ApplyAudit(ctx, planet.StorageNodes[i].ID(), overlay.ReputationStatus{}, reputation.AuditSuccess)
|
2019-07-31 18:21:06 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
testDistinctIPs(t, ctx, planet)
|
|
|
|
})
|
|
|
|
}
|
2019-05-22 21:06:27 +01:00
|
|
|
|
2019-07-31 18:21:06 +01:00
|
|
|
func testDistinctIPs(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
service := satellite.Overlay.Service
|
2019-05-22 21:06:27 +01:00
|
|
|
|
2019-07-31 18:21:06 +01:00
|
|
|
tests := []struct {
|
|
|
|
requestCount int
|
|
|
|
preferences overlay.NodeSelectionConfig
|
|
|
|
shouldFailWith *errs.Class
|
|
|
|
}{
|
|
|
|
{ // test only distinct IPs with half new nodes
|
2020-07-08 15:28:49 +01:00
|
|
|
// expect 2 new and 2 vetted
|
2019-07-31 18:21:06 +01:00
|
|
|
requestCount: 4,
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
preferences: testNodeSelectionConfig(0.5),
|
2019-07-31 18:21:06 +01:00
|
|
|
},
|
|
|
|
{ // test not enough distinct IPs
|
2020-07-08 15:28:49 +01:00
|
|
|
requestCount: 5, // expect 3 new, 2 old but fails because only 4 distinct IPs, not 5
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
preferences: testNodeSelectionConfig(0.6),
|
2019-07-31 18:21:06 +01:00
|
|
|
shouldFailWith: &overlay.ErrNotEnoughNodes,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
2021-10-15 14:41:48 +01:00
|
|
|
response, err := service.FindStorageNodesWithPreferences(ctx,
|
|
|
|
overlay.FindStorageNodesRequest{
|
|
|
|
RequestedCount: tt.requestCount,
|
|
|
|
AsOfSystemInterval: -time.Microsecond,
|
|
|
|
}, &tt.preferences)
|
2019-07-31 18:21:06 +01:00
|
|
|
if tt.shouldFailWith != nil {
|
|
|
|
assert.Error(t, err)
|
|
|
|
assert.True(t, tt.shouldFailWith.Has(err))
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
2019-05-22 21:06:27 +01:00
|
|
|
}
|
2019-07-31 18:21:06 +01:00
|
|
|
|
|
|
|
// assert all IPs are unique
|
|
|
|
if tt.preferences.DistinctIP {
|
|
|
|
ips := make(map[string]bool)
|
|
|
|
for _, n := range response {
|
2020-03-06 22:04:23 +00:00
|
|
|
assert.False(t, ips[n.LastIPPort])
|
|
|
|
ips[n.LastIPPort] = true
|
2019-07-31 18:21:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Equal(t, tt.requestCount, len(response))
|
|
|
|
}
|
2019-05-22 21:06:27 +01:00
|
|
|
}
|
2019-06-24 16:33:18 +01:00
|
|
|
|
|
|
|
func TestAddrtoNetwork_Conversion(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
2019-12-17 14:16:38 +00:00
|
|
|
defer ctx.Cleanup()
|
2019-06-24 16:33:18 +01:00
|
|
|
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
runTest := func(t *testing.T, ipAddr, port string, distinctIPEnabled bool, ipv4Mask, ipv6Mask int, expectedNetwork string) {
|
|
|
|
t.Run(fmt.Sprintf("%s-%s-%v-%d-%d", ipAddr, port, distinctIPEnabled, ipv4Mask, ipv6Mask), func(t *testing.T) {
|
|
|
|
ipAndPort := net.JoinHostPort(ipAddr, port)
|
|
|
|
config := overlay.NodeSelectionConfig{
|
|
|
|
DistinctIP: distinctIPEnabled,
|
|
|
|
NetworkPrefixIPv4: ipv4Mask,
|
|
|
|
NetworkPrefixIPv6: ipv6Mask,
|
|
|
|
}
|
|
|
|
resolvedIP, resolvedPort, network, err := overlay.ResolveIPAndNetwork(ctx, ipAndPort, config, overlay.MaskOffLastNet)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, expectedNetwork, network)
|
|
|
|
assert.Equal(t, ipAddr, resolvedIP.String())
|
|
|
|
assert.Equal(t, port, resolvedPort)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
runTest(t, "8.8.255.8", "28967", true, 17, 128, "8.8.128.0")
|
|
|
|
runTest(t, "8.8.255.8", "28967", false, 0, 0, "8.8.255.8:28967")
|
|
|
|
|
|
|
|
runTest(t, "fc00::1:200", "28967", true, 0, 64, "fc00::")
|
|
|
|
runTest(t, "fc00::1:200", "28967", true, 0, 128-16, "fc00::1:0")
|
|
|
|
runTest(t, "fc00::1:200", "28967", false, 0, 0, "[fc00::1:200]:28967")
|
2019-06-24 16:33:18 +01:00
|
|
|
}
|
2020-04-24 17:11:04 +01:00
|
|
|
|
|
|
|
func TestCacheSelectionVsDBSelection(t *testing.T) {
|
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
t.Skip("Test does not work with macOS")
|
|
|
|
}
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
UniqueIPCount: 5,
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
planet.StorageNodes[0].Storage2.Monitor.Loop.Pause()
|
|
|
|
saOverlay := planet.Satellites[0].Overlay
|
|
|
|
nodeConfig := planet.Satellites[0].Config.Overlay.Node
|
|
|
|
|
|
|
|
req := overlay.FindStorageNodesRequest{RequestedCount: 5}
|
2020-05-06 14:05:31 +01:00
|
|
|
n1, err := saOverlay.Service.FindStorageNodesForUpload(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.NoError(t, err)
|
2021-01-28 11:46:18 +00:00
|
|
|
n2, err := saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
2020-04-24 17:11:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(n2), len(n1))
|
|
|
|
n3, err := saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(n3), len(n2))
|
|
|
|
})
|
|
|
|
}
|