2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-06-13 19:22:32 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-11-19 14:40:01 +00:00
|
|
|
package overlay_test
|
2018-06-13 19:22:32 +01:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-04-29 18:58:41 +01:00
|
|
|
"fmt"
|
2019-12-16 13:45:13 +00:00
|
|
|
"sort"
|
2018-06-13 19:22:32 +01:00
|
|
|
"testing"
|
2019-04-26 13:15:06 +01:00
|
|
|
"time"
|
2018-06-13 19:22:32 +01:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
2019-02-09 21:17:49 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-04-24 20:34:53 +01:00
|
|
|
"go.uber.org/zap"
|
2019-03-23 08:06:11 +00:00
|
|
|
"go.uber.org/zap/zaptest"
|
2019-02-06 13:32:42 +00:00
|
|
|
|
2020-04-24 20:34:53 +01:00
|
|
|
"storj.io/common/memory"
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/testplanet"
|
2018-12-27 09:56:25 +00:00
|
|
|
"storj.io/storj/satellite"
|
2022-10-31 21:33:17 +00:00
|
|
|
"storj.io/storj/satellite/nodeevents"
|
2023-06-30 11:35:07 +01:00
|
|
|
"storj.io/storj/satellite/nodeselection/uploadselection"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2021-07-07 20:20:23 +01:00
|
|
|
"storj.io/storj/satellite/reputation"
|
2018-12-17 20:14:16 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
2018-06-13 19:22:32 +01:00
|
|
|
)
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
func TestCache_Database(t *testing.T) {
|
2019-02-06 13:32:42 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-01-19 16:29:15 +00:00
|
|
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
2022-10-31 21:33:17 +00:00
|
|
|
testCache(ctx, t, db.OverlayCache(), db.NodeEvents())
|
2019-01-15 16:08:45 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// returns a NodeSelectionConfig with sensible test values.
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
func testNodeSelectionConfig(newNodeFraction float64) overlay.NodeSelectionConfig {
|
2019-06-13 17:06:37 +01:00
|
|
|
return overlay.NodeSelectionConfig{
|
2020-03-18 21:16:13 +00:00
|
|
|
NewNodeFraction: newNodeFraction,
|
|
|
|
OnlineWindow: time.Hour,
|
2019-06-13 17:06:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:28:46 +01:00
|
|
|
// returns an AuditHistoryConfig with sensible test values.
|
2021-07-07 20:20:23 +01:00
|
|
|
func testAuditHistoryConfig() reputation.AuditHistoryConfig {
|
|
|
|
return reputation.AuditHistoryConfig{
|
2020-08-04 21:28:46 +01:00
|
|
|
WindowSize: time.Hour,
|
|
|
|
TrackingPeriod: time.Hour,
|
|
|
|
GracePeriod: time.Hour,
|
|
|
|
OfflineThreshold: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:33:17 +00:00
|
|
|
func testCache(ctx *testcontext.Context, t *testing.T, store overlay.DB, nodeEvents nodeevents.DB) {
|
2019-06-26 11:38:51 +01:00
|
|
|
valid1ID := testrand.NodeID()
|
|
|
|
valid2ID := testrand.NodeID()
|
2019-07-12 15:35:48 +01:00
|
|
|
valid3ID := testrand.NodeID()
|
2019-06-26 11:38:51 +01:00
|
|
|
missingID := testrand.NodeID()
|
2019-05-30 18:35:04 +01:00
|
|
|
address := &pb.NodeAddress{Address: "127.0.0.1:0"}
|
2020-04-29 18:58:41 +01:00
|
|
|
lastNet := "127.0.0"
|
2018-12-17 18:47:26 +00:00
|
|
|
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
nodeSelectionConfig := testNodeSelectionConfig(0)
|
2022-06-28 12:53:39 +01:00
|
|
|
serviceConfig := overlay.Config{
|
|
|
|
Node: nodeSelectionConfig,
|
|
|
|
NodeSelectionCache: overlay.UploadSelectionCacheConfig{
|
|
|
|
Staleness: lowStaleness,
|
|
|
|
},
|
|
|
|
UpdateStatsBatchSize: 100,
|
|
|
|
}
|
|
|
|
|
|
|
|
serviceCtx, serviceCancel := context.WithCancel(ctx)
|
|
|
|
defer serviceCancel()
|
2023-03-08 12:25:10 +00:00
|
|
|
service, err := overlay.NewService(zaptest.NewLogger(t), store, nodeEvents, "", "", serviceConfig)
|
2020-12-22 19:07:07 +00:00
|
|
|
require.NoError(t, err)
|
2022-06-28 12:53:39 +01:00
|
|
|
ctx.Go(func() error { return service.Run(serviceCtx) })
|
|
|
|
defer ctx.Check(service.Close)
|
|
|
|
|
2020-04-29 18:58:41 +01:00
|
|
|
d := overlay.NodeCheckInInfo{
|
|
|
|
Address: address,
|
|
|
|
LastIPPort: address.Address,
|
|
|
|
LastNet: lastNet,
|
|
|
|
Version: &pb.NodeVersion{Version: "v1.0.0"},
|
|
|
|
IsUp: true,
|
|
|
|
}
|
2018-11-19 14:40:01 +00:00
|
|
|
{ // Put
|
2020-04-29 18:58:41 +01:00
|
|
|
d.NodeID = valid1ID
|
|
|
|
err := store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
2019-06-28 14:04:50 +01:00
|
|
|
require.NoError(t, err)
|
2018-12-17 18:47:26 +00:00
|
|
|
|
2020-04-29 18:58:41 +01:00
|
|
|
d.NodeID = valid2ID
|
|
|
|
err = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
2019-06-28 14:04:50 +01:00
|
|
|
require.NoError(t, err)
|
2019-07-12 15:35:48 +01:00
|
|
|
|
2020-04-29 18:58:41 +01:00
|
|
|
d.NodeID = valid3ID
|
|
|
|
err = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
2019-07-12 15:35:48 +01:00
|
|
|
require.NoError(t, err)
|
2020-01-03 00:00:18 +00:00
|
|
|
// disqualify one node
|
2021-10-27 11:58:29 +01:00
|
|
|
err = service.DisqualifyNode(ctx, valid3ID, overlay.DisqualificationReasonUnknown)
|
2019-07-12 15:35:48 +01:00
|
|
|
require.NoError(t, err)
|
2018-11-19 14:40:01 +00:00
|
|
|
}
|
2018-11-16 16:31:14 +00:00
|
|
|
|
2021-05-25 11:43:19 +01:00
|
|
|
{ // Invalid shouldn't cause a panic.
|
|
|
|
validInfo := func() overlay.NodeCheckInInfo {
|
|
|
|
return overlay.NodeCheckInInfo{
|
|
|
|
Address: address,
|
|
|
|
LastIPPort: address.Address,
|
|
|
|
LastNet: lastNet,
|
|
|
|
Version: &pb.NodeVersion{
|
|
|
|
Version: "v1.0.0",
|
|
|
|
CommitHash: "alpha",
|
|
|
|
},
|
|
|
|
IsUp: true,
|
|
|
|
Operator: &pb.NodeOperator{
|
|
|
|
Email: "\x00",
|
|
|
|
Wallet: "0x1234",
|
|
|
|
WalletFeatures: []string{"zerog"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Currently Postgres returns an error and CockroachDB doesn't return
|
|
|
|
// an error for a non-utf text field.
|
|
|
|
|
|
|
|
d := validInfo()
|
|
|
|
d.Operator.Email = "\x00"
|
|
|
|
_ = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
|
|
|
|
|
|
|
d = validInfo()
|
|
|
|
d.Operator.Wallet = "\x00"
|
|
|
|
_ = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
|
|
|
|
|
|
|
d = validInfo()
|
|
|
|
d.Operator.WalletFeatures[0] = "\x00"
|
|
|
|
_ = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
|
|
|
|
|
|
|
d = validInfo()
|
|
|
|
d.Version.CommitHash = "\x00"
|
|
|
|
_ = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
|
|
|
|
}
|
|
|
|
|
2018-11-19 14:40:01 +00:00
|
|
|
{ // Get
|
2019-08-06 17:35:59 +01:00
|
|
|
_, err := service.Get(ctx, storj.NodeID{})
|
2019-06-20 14:56:04 +01:00
|
|
|
require.Error(t, err)
|
2020-07-14 14:04:38 +01:00
|
|
|
require.Equal(t, overlay.ErrEmptyNode, err)
|
2018-12-17 18:47:26 +00:00
|
|
|
|
2019-08-06 17:35:59 +01:00
|
|
|
valid1, err := service.Get(ctx, valid1ID)
|
2019-06-20 14:56:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, valid1.Id, valid1ID)
|
2018-12-17 18:47:26 +00:00
|
|
|
|
2019-08-06 17:35:59 +01:00
|
|
|
valid2, err := service.Get(ctx, valid2ID)
|
2019-06-20 14:56:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, valid2.Id, valid2ID)
|
2018-11-16 16:31:14 +00:00
|
|
|
|
2019-08-06 17:35:59 +01:00
|
|
|
invalid2, err := service.Get(ctx, missingID)
|
2019-06-20 14:56:04 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, overlay.ErrNodeNotFound.Has(err))
|
|
|
|
require.Nil(t, invalid2)
|
2018-11-16 16:31:14 +00:00
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// TODO: add erroring database test
|
2018-11-19 14:40:01 +00:00
|
|
|
}
|
2018-11-16 16:31:14 +00:00
|
|
|
}
|
2019-02-09 21:17:49 +00:00
|
|
|
|
|
|
|
func TestRandomizedSelection(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
totalNodes := 1000
|
|
|
|
selectIterations := 100
|
|
|
|
numNodesToSelect := 100
|
2019-02-11 12:04:00 +00:00
|
|
|
minSelectCount := 3 // TODO: compute this limit better
|
2019-02-09 21:17:49 +00:00
|
|
|
|
2020-01-19 16:29:15 +00:00
|
|
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
2019-02-09 21:17:49 +00:00
|
|
|
cache := db.OverlayCache()
|
|
|
|
allIDs := make(storj.NodeIDList, totalNodes)
|
|
|
|
nodeCounts := make(map[storj.NodeID]int)
|
|
|
|
|
|
|
|
// put nodes in cache
|
|
|
|
for i := 0; i < totalNodes; i++ {
|
2019-06-26 11:38:51 +01:00
|
|
|
newID := testrand.NodeID()
|
2020-04-29 18:58:41 +01:00
|
|
|
addr := fmt.Sprintf("127.0.%d.0:8080", i)
|
|
|
|
lastNet := fmt.Sprintf("127.0.%d", i)
|
|
|
|
d := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: newID,
|
2023-01-24 15:59:47 +00:00
|
|
|
Address: &pb.NodeAddress{Address: addr},
|
2020-04-29 18:58:41 +01:00
|
|
|
LastIPPort: addr,
|
|
|
|
LastNet: lastNet,
|
|
|
|
Version: &pb.NodeVersion{Version: "v1.0.0"},
|
|
|
|
Capacity: &pb.NodeCapacity{},
|
|
|
|
IsUp: true,
|
|
|
|
}
|
2021-07-07 20:20:23 +01:00
|
|
|
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
|
2019-02-09 21:17:49 +00:00
|
|
|
require.NoError(t, err)
|
2019-06-20 14:56:04 +01:00
|
|
|
|
|
|
|
if i%2 == 0 { // make half of nodes "new" and half "vetted"
|
2021-07-07 20:20:23 +01:00
|
|
|
_, err = cache.TestVetNode(ctx, newID)
|
2019-06-20 14:56:04 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-02-09 21:17:49 +00:00
|
|
|
allIDs[i] = newID
|
|
|
|
nodeCounts[newID] = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// select numNodesToSelect nodes selectIterations times
|
|
|
|
for i := 0; i < selectIterations; i++ {
|
2023-06-30 11:35:07 +01:00
|
|
|
var nodes []*uploadselection.SelectedNode
|
2019-02-11 17:10:32 +00:00
|
|
|
var err error
|
|
|
|
|
2019-04-24 11:35:50 +01:00
|
|
|
if i%2 == 0 {
|
2020-04-09 16:19:44 +01:00
|
|
|
nodes, err = cache.SelectStorageNodes(ctx, numNodesToSelect, 0, &overlay.NodeCriteria{
|
2019-06-20 14:56:04 +01:00
|
|
|
OnlineWindow: time.Hour,
|
|
|
|
})
|
2019-02-11 17:10:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
} else {
|
2020-04-09 16:19:44 +01:00
|
|
|
nodes, err = cache.SelectStorageNodes(ctx, numNodesToSelect, numNodesToSelect, &overlay.NodeCriteria{
|
2019-04-26 13:15:06 +01:00
|
|
|
OnlineWindow: time.Hour,
|
2019-02-11 17:10:32 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2019-02-09 21:17:49 +00:00
|
|
|
require.Len(t, nodes, numNodesToSelect)
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
2020-03-28 14:56:05 +00:00
|
|
|
nodeCounts[node.ID]++
|
2019-02-09 21:17:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-11 12:04:00 +00:00
|
|
|
belowThreshold := 0
|
|
|
|
|
|
|
|
table := []int{}
|
|
|
|
|
2019-02-09 21:17:49 +00:00
|
|
|
// expect that each node has been selected at least minSelectCount times
|
|
|
|
for _, id := range allIDs {
|
|
|
|
count := nodeCounts[id]
|
2019-02-11 12:04:00 +00:00
|
|
|
if count < minSelectCount {
|
|
|
|
belowThreshold++
|
|
|
|
}
|
|
|
|
if count >= len(table) {
|
|
|
|
table = append(table, make([]int, count-len(table)+1)...)
|
|
|
|
}
|
|
|
|
table[count]++
|
|
|
|
}
|
|
|
|
|
|
|
|
if belowThreshold > totalNodes*1/100 {
|
|
|
|
t.Errorf("%d out of %d were below threshold %d", belowThreshold, totalNodes, minSelectCount)
|
2020-04-24 20:34:53 +01:00
|
|
|
for count, amount := range table {
|
|
|
|
t.Logf("%3d = %4d", count, amount)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
func TestRandomizedSelectionCache(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
totalNodes := 1000
|
|
|
|
selectIterations := 100
|
|
|
|
numNodesToSelect := 100
|
|
|
|
minSelectCount := 3
|
|
|
|
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
2022-06-28 12:53:39 +01:00
|
|
|
config.Overlay.NodeSelectionCache.Staleness = lowStaleness
|
2020-04-24 20:34:53 +01:00
|
|
|
config.Overlay.Node.NewNodeFraction = 0.5 // select 50% new nodes
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditCount = 1
|
|
|
|
config.Reputation.AuditLambda = 1
|
|
|
|
config.Reputation.AuditWeight = 1
|
|
|
|
config.Reputation.AuditDQ = 0.5
|
|
|
|
config.Reputation.AuditHistory = testAuditHistoryConfig()
|
2020-04-24 20:34:53 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
overlaydb := satellite.Overlay.DB
|
2021-01-28 11:46:18 +00:00
|
|
|
uploadSelectionCache := satellite.Overlay.Service.UploadSelectionCache
|
2020-04-24 20:34:53 +01:00
|
|
|
allIDs := make(storj.NodeIDList, totalNodes)
|
|
|
|
nodeCounts := make(map[storj.NodeID]int)
|
|
|
|
expectedNewCount := int(float64(totalNodes) * satellite.Config.Overlay.Node.NewNodeFraction)
|
|
|
|
|
|
|
|
// put nodes in cache
|
|
|
|
for i := 0; i < totalNodes; i++ {
|
|
|
|
newID := testrand.NodeID()
|
|
|
|
address := fmt.Sprintf("127.0.%d.0:8080", i)
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
lastNet := address
|
2020-04-24 20:34:53 +01:00
|
|
|
|
|
|
|
n := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: newID,
|
|
|
|
Address: &pb.NodeAddress{
|
2023-01-24 15:59:47 +00:00
|
|
|
Address: address,
|
2020-04-24 20:34:53 +01:00
|
|
|
},
|
|
|
|
LastNet: lastNet,
|
|
|
|
LastIPPort: address,
|
|
|
|
IsUp: true,
|
|
|
|
Capacity: &pb.NodeCapacity{
|
2020-07-08 15:28:49 +01:00
|
|
|
FreeDisk: 200 * memory.MiB.Int64(),
|
2020-04-24 20:34:53 +01:00
|
|
|
},
|
|
|
|
Version: &pb.NodeVersion{
|
|
|
|
Version: "v1.1.0",
|
|
|
|
CommitHash: "",
|
|
|
|
Timestamp: time.Time{},
|
|
|
|
Release: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
defaults := overlay.NodeSelectionConfig{}
|
|
|
|
err := overlaydb.UpdateCheckIn(ctx, n, time.Now().UTC(), defaults)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-07 20:20:23 +01:00
|
|
|
if i%2 == 0 {
|
|
|
|
// make half of nodes "new" and half "vetted"
|
|
|
|
_, err = overlaydb.TestVetNode(ctx, newID)
|
2020-04-24 20:34:53 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
allIDs[i] = newID
|
|
|
|
nodeCounts[newID] = 0
|
|
|
|
}
|
|
|
|
|
2021-01-28 11:46:18 +00:00
|
|
|
err := uploadSelectionCache.Refresh(ctx)
|
2020-04-24 20:34:53 +01:00
|
|
|
require.NoError(t, err)
|
2022-06-28 12:53:39 +01:00
|
|
|
reputable, new, err := uploadSelectionCache.Size(ctx)
|
|
|
|
require.NoError(t, err)
|
2020-04-24 20:34:53 +01:00
|
|
|
require.Equal(t, totalNodes-expectedNewCount, reputable)
|
|
|
|
require.Equal(t, expectedNewCount, new)
|
|
|
|
|
|
|
|
// select numNodesToSelect nodes selectIterations times
|
|
|
|
for i := 0; i < selectIterations; i++ {
|
2023-06-30 11:35:07 +01:00
|
|
|
var nodes []*uploadselection.SelectedNode
|
2020-04-24 20:34:53 +01:00
|
|
|
var err error
|
|
|
|
req := overlay.FindStorageNodesRequest{
|
|
|
|
RequestedCount: numNodesToSelect,
|
|
|
|
}
|
|
|
|
|
2021-01-28 11:46:18 +00:00
|
|
|
nodes, err = uploadSelectionCache.GetNodes(ctx, req)
|
2020-04-24 20:34:53 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, numNodesToSelect)
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
nodeCounts[node.ID]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
belowThreshold := 0
|
|
|
|
|
|
|
|
table := []int{}
|
|
|
|
|
|
|
|
// expect that each node has been selected at least minSelectCount times
|
|
|
|
for _, id := range allIDs {
|
|
|
|
count := nodeCounts[id]
|
|
|
|
if count < minSelectCount {
|
|
|
|
belowThreshold++
|
|
|
|
}
|
|
|
|
if count >= len(table) {
|
|
|
|
table = append(table, make([]int, count-len(table)+1)...)
|
|
|
|
}
|
|
|
|
table[count]++
|
|
|
|
}
|
|
|
|
|
|
|
|
if belowThreshold > totalNodes*1/100 {
|
|
|
|
t.Errorf("%d out of %d were below threshold %d", belowThreshold, totalNodes, minSelectCount)
|
2019-02-11 12:04:00 +00:00
|
|
|
for count, amount := range table {
|
|
|
|
t.Logf("%3d = %4d", count, amount)
|
|
|
|
}
|
2019-02-09 21:17:49 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-05-30 20:52:33 +01:00
|
|
|
|
2019-06-11 14:30:28 +01:00
|
|
|
func TestNodeInfo(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
planet.StorageNodes[0].Storage2.Monitor.Loop.Pause()
|
|
|
|
|
|
|
|
node, err := planet.Satellites[0].Overlay.Service.Get(ctx, planet.StorageNodes[0].ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-05-19 17:21:44 +01:00
|
|
|
dossier := planet.StorageNodes[0].Contact.Service.Local()
|
|
|
|
|
2019-06-11 14:30:28 +01:00
|
|
|
assert.Equal(t, pb.NodeType_STORAGE, node.Type)
|
|
|
|
assert.NotEmpty(t, node.Operator.Email)
|
|
|
|
assert.NotEmpty(t, node.Operator.Wallet)
|
2020-05-19 17:21:44 +01:00
|
|
|
assert.Equal(t, dossier.Operator, node.Operator)
|
2019-06-11 14:30:28 +01:00
|
|
|
assert.NotEmpty(t, node.Capacity.FreeDisk)
|
2020-05-19 17:21:44 +01:00
|
|
|
assert.Equal(t, dossier.Capacity, node.Capacity)
|
2019-06-11 14:30:28 +01:00
|
|
|
assert.NotEmpty(t, node.Version.Version)
|
2020-05-19 17:21:44 +01:00
|
|
|
assert.Equal(t, dossier.Version.Version, node.Version.Version)
|
2019-06-11 14:30:28 +01:00
|
|
|
})
|
|
|
|
}
|
2019-09-19 19:37:31 +01:00
|
|
|
|
2019-12-16 13:45:13 +00:00
|
|
|
func TestKnownReliable(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2021-03-18 19:55:06 +00:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
2021-07-07 20:20:23 +01:00
|
|
|
config.Reputation.AuditHistory = reputation.AuditHistoryConfig{
|
2021-03-18 19:55:06 +00:00
|
|
|
WindowSize: time.Hour,
|
|
|
|
TrackingPeriod: 2 * time.Hour,
|
|
|
|
GracePeriod: time.Hour,
|
|
|
|
OfflineThreshold: 0.6,
|
|
|
|
OfflineDQEnabled: false,
|
|
|
|
OfflineSuspensionEnabled: true,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
2019-12-16 13:45:13 +00:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
service := satellite.Overlay.Service
|
2021-07-07 20:20:23 +01:00
|
|
|
oc := satellite.DB.OverlayCache()
|
2019-12-16 13:45:13 +00:00
|
|
|
|
|
|
|
// Disqualify storage node #0
|
2022-10-11 17:13:29 +01:00
|
|
|
_, err := oc.DisqualifyNode(ctx, planet.StorageNodes[0].ID(), time.Now().UTC(), overlay.DisqualificationReasonUnknown)
|
2019-12-16 13:45:13 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Stop storage node #1
|
2020-03-11 21:11:46 +00:00
|
|
|
offlineNode := planet.StorageNodes[1]
|
|
|
|
err = planet.StopPeer(offlineNode)
|
2019-12-16 13:45:13 +00:00
|
|
|
require.NoError(t, err)
|
2020-03-11 21:11:46 +00:00
|
|
|
// set last contact success to 1 hour ago to make node appear offline
|
|
|
|
checkInInfo := getNodeInfo(offlineNode.ID())
|
|
|
|
err = service.UpdateCheckIn(ctx, checkInInfo, time.Now().Add(-time.Hour))
|
2019-12-16 13:45:13 +00:00
|
|
|
require.NoError(t, err)
|
2020-03-11 21:11:46 +00:00
|
|
|
// Check that storage node #1 is offline
|
|
|
|
node, err := service.Get(ctx, offlineNode.ID())
|
2019-12-16 13:45:13 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.False(t, service.IsOnline(node))
|
|
|
|
|
2021-03-18 19:55:06 +00:00
|
|
|
// unknown audit suspend storage node #2
|
2021-07-07 20:20:23 +01:00
|
|
|
err = oc.TestSuspendNodeUnknownAudit(ctx, planet.StorageNodes[2].ID(), time.Now())
|
2020-03-11 21:11:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-03-18 19:55:06 +00:00
|
|
|
// offline suspend storage node #3
|
2021-07-07 20:20:23 +01:00
|
|
|
err = oc.TestSuspendNodeOffline(ctx, planet.StorageNodes[3].ID(), time.Now())
|
|
|
|
require.NoError(t, err)
|
2021-03-18 19:55:06 +00:00
|
|
|
|
|
|
|
// Check that only storage nodes #4 and #5 are reliable
|
2023-06-26 09:25:13 +01:00
|
|
|
online, _, err := service.KnownReliable(ctx, []storj.NodeID{
|
2019-12-16 13:45:13 +00:00
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
planet.StorageNodes[1].ID(),
|
|
|
|
planet.StorageNodes[2].ID(),
|
|
|
|
planet.StorageNodes[3].ID(),
|
2020-03-11 21:11:46 +00:00
|
|
|
planet.StorageNodes[4].ID(),
|
2021-03-18 19:55:06 +00:00
|
|
|
planet.StorageNodes[5].ID(),
|
2019-12-16 13:45:13 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-06-26 09:25:13 +01:00
|
|
|
require.Len(t, online, 2)
|
2019-12-16 13:45:13 +00:00
|
|
|
|
|
|
|
// Sort the storage nodes for predictable checks
|
2020-05-19 17:21:44 +01:00
|
|
|
expectedReliable := []storj.NodeURL{
|
|
|
|
planet.StorageNodes[4].NodeURL(),
|
2021-03-18 19:55:06 +00:00
|
|
|
planet.StorageNodes[5].NodeURL(),
|
2020-05-19 17:21:44 +01:00
|
|
|
}
|
|
|
|
sort.Slice(expectedReliable, func(i, j int) bool { return expectedReliable[i].ID.Less(expectedReliable[j].ID) })
|
2023-06-26 09:25:13 +01:00
|
|
|
sort.Slice(online, func(i, j int) bool { return online[i].ID.Less(online[j].ID) })
|
2019-12-16 13:45:13 +00:00
|
|
|
|
|
|
|
// Assert the reliable nodes are the expected ones
|
2023-06-26 09:25:13 +01:00
|
|
|
for i, node := range online {
|
|
|
|
assert.Equal(t, expectedReliable[i].ID, node.ID)
|
2020-05-19 17:21:44 +01:00
|
|
|
assert.Equal(t, expectedReliable[i].Address, node.Address.Address)
|
2019-12-16 13:45:13 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-09-19 19:37:31 +01:00
|
|
|
func TestUpdateCheckIn(t *testing.T) {
|
2020-01-19 16:29:15 +00:00
|
|
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { // setup
|
2019-09-19 19:37:31 +01:00
|
|
|
nodeID := storj.NodeID{1, 2, 3}
|
2022-01-05 11:07:03 +00:00
|
|
|
expectedEmail := "test@email.test"
|
2020-03-06 22:04:23 +00:00
|
|
|
expectedAddress := "1.2.4.4:8080"
|
2019-09-19 19:37:31 +01:00
|
|
|
info := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: nodeID,
|
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Address: expectedAddress,
|
|
|
|
},
|
|
|
|
IsUp: true,
|
|
|
|
Capacity: &pb.NodeCapacity{
|
2020-02-12 21:19:42 +00:00
|
|
|
FreeDisk: int64(5678),
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
|
|
|
Operator: &pb.NodeOperator{
|
2021-01-18 14:33:13 +00:00
|
|
|
Email: expectedEmail,
|
|
|
|
Wallet: "0x123",
|
|
|
|
WalletFeatures: []string{"example"},
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
|
|
|
Version: &pb.NodeVersion{
|
|
|
|
Version: "v0.0.0",
|
|
|
|
CommitHash: "",
|
|
|
|
Timestamp: time.Time{},
|
|
|
|
Release: false,
|
|
|
|
},
|
2020-03-06 22:04:23 +00:00
|
|
|
LastIPPort: expectedAddress,
|
|
|
|
LastNet: "1.2.4",
|
2019-09-19 19:37:31 +01:00
|
|
|
}
|
|
|
|
expectedNode := &overlay.NodeDossier{
|
|
|
|
Node: pb.Node{
|
2020-03-06 22:04:23 +00:00
|
|
|
Id: nodeID,
|
2019-09-19 19:37:31 +01:00
|
|
|
Address: &pb.NodeAddress{
|
2023-01-24 15:59:47 +00:00
|
|
|
Address: info.Address.GetAddress(),
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Type: pb.NodeType_STORAGE,
|
|
|
|
Operator: pb.NodeOperator{
|
2021-01-18 14:33:13 +00:00
|
|
|
Email: info.Operator.GetEmail(),
|
|
|
|
Wallet: info.Operator.GetWallet(),
|
|
|
|
WalletFeatures: info.Operator.GetWalletFeatures(),
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
|
|
|
Capacity: pb.NodeCapacity{
|
2020-02-12 21:19:42 +00:00
|
|
|
FreeDisk: info.Capacity.GetFreeDisk(),
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
|
|
|
Version: pb.NodeVersion{
|
|
|
|
Version: "v0.0.0",
|
|
|
|
CommitHash: "",
|
|
|
|
Timestamp: time.Time{},
|
|
|
|
Release: false,
|
|
|
|
},
|
2023-01-27 18:02:24 +00:00
|
|
|
Reputation: overlay.NodeStats{
|
|
|
|
Status: overlay.ReputationStatus{Email: expectedEmail},
|
|
|
|
},
|
2019-09-19 19:37:31 +01:00
|
|
|
Contained: false,
|
|
|
|
Disqualified: nil,
|
|
|
|
PieceCount: 0,
|
2019-10-11 22:18:05 +01:00
|
|
|
ExitStatus: overlay.ExitStatus{NodeID: nodeID},
|
2020-03-06 22:04:23 +00:00
|
|
|
LastIPPort: expectedAddress,
|
|
|
|
LastNet: "1.2.4",
|
2019-09-19 19:37:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// confirm the node doesn't exist in nodes table yet
|
|
|
|
_, err := db.OverlayCache().Get(ctx, nodeID)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "node not found")
|
|
|
|
|
|
|
|
// check-in for that node id, which should add the node
|
|
|
|
// to the nodes tables in the database
|
2020-03-10 22:05:01 +00:00
|
|
|
startOfTest := time.Now()
|
2020-04-29 17:29:19 +01:00
|
|
|
err = db.OverlayCache().UpdateCheckIn(ctx, info, startOfTest.Add(time.Second), overlay.NodeSelectionConfig{})
|
2019-09-19 19:37:31 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// confirm that the node is now in the nodes table with the
|
|
|
|
// correct fields set
|
|
|
|
actualNode, err := db.OverlayCache().Get(ctx, nodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, actualNode.Reputation.LastContactSuccess.After(startOfTest))
|
2019-11-15 22:43:06 +00:00
|
|
|
require.True(t, actualNode.Reputation.LastContactFailure.UTC().Equal(time.Time{}.UTC()))
|
2020-04-13 22:38:33 +01:00
|
|
|
actualNode.Address = expectedNode.Address
|
2019-09-19 19:37:31 +01:00
|
|
|
|
|
|
|
// we need to overwrite the times so that the deep equal considers them the same
|
|
|
|
expectedNode.Reputation.LastContactSuccess = actualNode.Reputation.LastContactSuccess
|
|
|
|
expectedNode.Reputation.LastContactFailure = actualNode.Reputation.LastContactFailure
|
|
|
|
expectedNode.Version.Timestamp = actualNode.Version.Timestamp
|
2019-10-23 02:06:01 +01:00
|
|
|
expectedNode.CreatedAt = actualNode.CreatedAt
|
2019-10-11 22:18:05 +01:00
|
|
|
require.Equal(t, expectedNode, actualNode)
|
2019-09-19 19:37:31 +01:00
|
|
|
|
|
|
|
// confirm that we can update the address field
|
2020-03-10 22:05:01 +00:00
|
|
|
startOfUpdateTest := time.Now()
|
2019-09-19 19:37:31 +01:00
|
|
|
expectedAddress = "9.8.7.6"
|
|
|
|
updatedInfo := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: nodeID,
|
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Address: expectedAddress,
|
|
|
|
},
|
|
|
|
IsUp: true,
|
|
|
|
Version: &pb.NodeVersion{
|
2019-09-26 01:07:39 +01:00
|
|
|
Version: "v0.1.0",
|
|
|
|
CommitHash: "abc123",
|
2020-03-10 22:05:01 +00:00
|
|
|
Timestamp: time.Now(),
|
2019-09-26 01:07:39 +01:00
|
|
|
Release: true,
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
2020-03-06 22:04:23 +00:00
|
|
|
LastIPPort: expectedAddress,
|
|
|
|
LastNet: "9.8.7",
|
2019-09-19 19:37:31 +01:00
|
|
|
}
|
|
|
|
// confirm that the updated node is in the nodes table with the
|
|
|
|
// correct updated fields set
|
2020-04-29 17:29:19 +01:00
|
|
|
err = db.OverlayCache().UpdateCheckIn(ctx, updatedInfo, startOfUpdateTest.Add(time.Second), overlay.NodeSelectionConfig{})
|
2019-09-19 19:37:31 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
updatedNode, err := db.OverlayCache().Get(ctx, nodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, updatedNode.Reputation.LastContactSuccess.After(startOfUpdateTest))
|
2020-03-10 22:05:01 +00:00
|
|
|
require.True(t, updatedNode.Reputation.LastContactFailure.Equal(time.Time{}))
|
2019-09-19 19:37:31 +01:00
|
|
|
require.Equal(t, updatedNode.Address.GetAddress(), expectedAddress)
|
2019-09-26 01:07:39 +01:00
|
|
|
require.Equal(t, updatedInfo.Version.GetVersion(), updatedNode.Version.GetVersion())
|
|
|
|
require.Equal(t, updatedInfo.Version.GetCommitHash(), updatedNode.Version.GetCommitHash())
|
|
|
|
require.Equal(t, updatedInfo.Version.GetRelease(), updatedNode.Version.GetRelease())
|
|
|
|
require.True(t, updatedNode.Version.GetTimestamp().After(info.Version.GetTimestamp()))
|
2019-09-19 19:37:31 +01:00
|
|
|
|
2020-03-10 22:05:01 +00:00
|
|
|
// confirm we can udpate IsUp field
|
|
|
|
startOfUpdateTest2 := time.Now()
|
2019-09-19 19:37:31 +01:00
|
|
|
updatedInfo2 := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: nodeID,
|
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Address: "9.8.7.6",
|
|
|
|
},
|
|
|
|
IsUp: false,
|
|
|
|
Version: &pb.NodeVersion{
|
|
|
|
Version: "v0.0.0",
|
|
|
|
CommitHash: "",
|
|
|
|
Timestamp: time.Time{},
|
|
|
|
Release: false,
|
|
|
|
},
|
|
|
|
}
|
2020-04-29 17:29:19 +01:00
|
|
|
|
|
|
|
err = db.OverlayCache().UpdateCheckIn(ctx, updatedInfo2, startOfUpdateTest2.Add(time.Second), overlay.NodeSelectionConfig{})
|
2019-09-19 19:37:31 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
updated2Node, err := db.OverlayCache().Get(ctx, nodeID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, updated2Node.Reputation.LastContactSuccess.Equal(updatedNode.Reputation.LastContactSuccess))
|
|
|
|
require.True(t, updated2Node.Reputation.LastContactFailure.After(startOfUpdateTest2))
|
2022-10-07 21:24:43 +01:00
|
|
|
|
|
|
|
// check that UpdateCheckIn updates last_offline_email
|
|
|
|
require.NoError(t, db.OverlayCache().UpdateLastOfflineEmail(ctx, []storj.NodeID{updated2Node.Id}, time.Now()))
|
|
|
|
nodeInfo, err := db.OverlayCache().Get(ctx, updated2Node.Id)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, nodeInfo.LastOfflineEmail)
|
|
|
|
lastEmail := nodeInfo.LastOfflineEmail
|
|
|
|
|
|
|
|
// first that it is not updated if node is offline
|
|
|
|
require.NoError(t, db.OverlayCache().UpdateCheckIn(ctx, updatedInfo2, time.Now(), overlay.NodeSelectionConfig{}))
|
|
|
|
nodeInfo, err = db.OverlayCache().Get(ctx, updated2Node.Id)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, lastEmail, nodeInfo.LastOfflineEmail)
|
|
|
|
|
|
|
|
// then that it is nullified if node is online
|
|
|
|
updatedInfo2.IsUp = true
|
|
|
|
require.NoError(t, db.OverlayCache().UpdateCheckIn(ctx, updatedInfo2, time.Now(), overlay.NodeSelectionConfig{}))
|
|
|
|
nodeInfo, err = db.OverlayCache().Get(ctx, updated2Node.Id)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, nodeInfo.LastOfflineEmail)
|
2019-09-19 19:37:31 +01:00
|
|
|
})
|
|
|
|
}
|
2019-12-30 17:10:24 +00:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// TestSuspendedSelection ensures that suspended nodes are not selected by SelectStorageNodes.
|
2020-03-11 21:11:46 +00:00
|
|
|
func TestSuspendedSelection(t *testing.T) {
|
|
|
|
totalNodes := 10
|
|
|
|
|
|
|
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
|
|
|
cache := db.OverlayCache()
|
|
|
|
suspendedIDs := make(map[storj.NodeID]bool)
|
2021-07-07 20:20:23 +01:00
|
|
|
config := overlay.NodeSelectionConfig{}
|
2020-03-11 21:11:46 +00:00
|
|
|
|
|
|
|
// put nodes in cache
|
|
|
|
for i := 0; i < totalNodes; i++ {
|
|
|
|
newID := testrand.NodeID()
|
2020-04-29 18:58:41 +01:00
|
|
|
addr := fmt.Sprintf("127.0.%d.0:8080", i)
|
|
|
|
lastNet := fmt.Sprintf("127.0.%d", i)
|
|
|
|
d := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: newID,
|
2023-01-24 15:59:47 +00:00
|
|
|
Address: &pb.NodeAddress{Address: addr},
|
2020-04-29 18:58:41 +01:00
|
|
|
LastIPPort: addr,
|
|
|
|
LastNet: lastNet,
|
|
|
|
Version: &pb.NodeVersion{Version: "v1.0.0"},
|
|
|
|
Capacity: &pb.NodeCapacity{},
|
|
|
|
IsUp: true,
|
|
|
|
}
|
2020-07-08 15:28:49 +01:00
|
|
|
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), config)
|
2020-03-11 21:11:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if i%2 == 0 { // make half of nodes "new" and half "vetted"
|
2021-07-07 20:20:23 +01:00
|
|
|
_, err = cache.TestVetNode(ctx, newID)
|
2020-03-11 21:11:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// suspend the first four nodes (2 new, 2 vetted)
|
2021-03-18 19:55:06 +00:00
|
|
|
// 2 offline suspended and 2 unknown audit suspended
|
2020-03-11 21:11:46 +00:00
|
|
|
if i < 4 {
|
2021-03-18 19:55:06 +00:00
|
|
|
if i < 2 {
|
2021-07-07 20:20:23 +01:00
|
|
|
err = cache.TestSuspendNodeOffline(ctx, newID, time.Now())
|
|
|
|
require.NoError(t, err)
|
2021-03-18 19:55:06 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-07-15 15:14:13 +01:00
|
|
|
err = cache.TestSuspendNodeUnknownAudit(ctx, newID, time.Now())
|
2020-03-11 21:11:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
suspendedIDs[newID] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-30 11:35:07 +01:00
|
|
|
var nodes []*uploadselection.SelectedNode
|
2020-03-11 21:11:46 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
numNodesToSelect := 10
|
|
|
|
|
|
|
|
// select 10 vetted nodes - 5 vetted, 2 suspended, so expect 3
|
2020-04-09 16:19:44 +01:00
|
|
|
nodes, err = cache.SelectStorageNodes(ctx, numNodesToSelect, 0, &overlay.NodeCriteria{
|
2020-03-11 21:11:46 +00:00
|
|
|
OnlineWindow: time.Hour,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, 3)
|
|
|
|
for _, node := range nodes {
|
2020-03-28 14:56:05 +00:00
|
|
|
require.False(t, suspendedIDs[node.ID])
|
2020-03-11 21:11:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// select 10 new nodes - 5 new, 2 suspended, so expect 3
|
2020-04-09 16:19:44 +01:00
|
|
|
nodes, err = cache.SelectStorageNodes(ctx, numNodesToSelect, numNodesToSelect, &overlay.NodeCriteria{
|
2020-03-11 21:11:46 +00:00
|
|
|
OnlineWindow: time.Hour,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, 3)
|
|
|
|
for _, node := range nodes {
|
2020-03-28 14:56:05 +00:00
|
|
|
require.False(t, suspendedIDs[node.ID])
|
2020-03-11 21:11:46 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:01:21 +01:00
|
|
|
func TestUpdateReputation(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
2021-06-23 00:09:39 +01:00
|
|
|
overlaydb := planet.Satellites[0].Overlay.DB
|
2021-06-17 15:01:21 +01:00
|
|
|
node := planet.StorageNodes[0]
|
|
|
|
|
|
|
|
info, err := service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, info.Disqualified)
|
|
|
|
require.Nil(t, info.UnknownAuditSuspended)
|
|
|
|
require.Nil(t, info.OfflineSuspended)
|
2021-11-08 20:51:04 +00:00
|
|
|
require.Nil(t, info.Reputation.Status.VettedAt)
|
2021-06-17 15:01:21 +01:00
|
|
|
|
|
|
|
t0 := time.Now().Truncate(time.Hour)
|
|
|
|
t1 := t0.Add(time.Hour)
|
|
|
|
t2 := t0.Add(2 * time.Hour)
|
|
|
|
t3 := t0.Add(3 * time.Hour)
|
|
|
|
|
2022-09-29 18:23:14 +01:00
|
|
|
reputationUpdate := overlay.ReputationUpdate{
|
2021-07-13 23:27:50 +01:00
|
|
|
Disqualified: nil,
|
2021-06-17 15:01:21 +01:00
|
|
|
UnknownAuditSuspended: &t1,
|
|
|
|
OfflineSuspended: &t2,
|
|
|
|
VettedAt: &t3,
|
|
|
|
}
|
2022-09-29 18:23:14 +01:00
|
|
|
repChange := []nodeevents.Type{nodeevents.UnknownAuditSuspended, nodeevents.OfflineSuspended}
|
|
|
|
err = service.UpdateReputation(ctx, node.ID(), "", reputationUpdate, repChange)
|
2021-06-17 15:01:21 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
info, err = service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
2022-09-29 18:23:14 +01:00
|
|
|
require.Equal(t, reputationUpdate.Disqualified, info.Disqualified)
|
|
|
|
require.Equal(t, reputationUpdate.UnknownAuditSuspended, info.UnknownAuditSuspended)
|
|
|
|
require.Equal(t, reputationUpdate.OfflineSuspended, info.OfflineSuspended)
|
|
|
|
require.Equal(t, reputationUpdate.VettedAt, info.Reputation.Status.VettedAt)
|
2021-06-17 15:01:21 +01:00
|
|
|
|
2022-09-29 18:23:14 +01:00
|
|
|
reputationUpdate.Disqualified = &t0
|
|
|
|
repChange = []nodeevents.Type{nodeevents.Disqualified}
|
|
|
|
err = service.UpdateReputation(ctx, node.ID(), "", reputationUpdate, repChange)
|
2021-06-17 15:01:21 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
info, err = service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
2022-09-29 18:23:14 +01:00
|
|
|
require.Equal(t, reputationUpdate.Disqualified, info.Disqualified)
|
2021-06-23 00:09:39 +01:00
|
|
|
|
|
|
|
nodeInfo, err := overlaydb.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
|
|
|
NodeID: node.ID(),
|
|
|
|
ExitInitiatedAt: t0,
|
|
|
|
ExitLoopCompletedAt: t1,
|
|
|
|
ExitFinishedAt: t1,
|
|
|
|
ExitSuccess: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, nodeInfo.ExitStatus.ExitFinishedAt)
|
|
|
|
|
|
|
|
// make sure Disqualified field is not updated if a node has finished
|
|
|
|
// graceful exit
|
2022-09-29 18:23:14 +01:00
|
|
|
reputationUpdate.Disqualified = &t0
|
|
|
|
err = service.UpdateReputation(ctx, node.ID(), "", reputationUpdate, repChange)
|
2021-06-23 00:09:39 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
exitedNodeInfo, err := service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, info.Disqualified, exitedNodeInfo.Disqualified)
|
2021-06-17 15:01:21 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-07-07 20:20:23 +01:00
|
|
|
func getNodeInfo(nodeID storj.NodeID) overlay.NodeCheckInInfo {
|
|
|
|
return overlay.NodeCheckInInfo{
|
|
|
|
NodeID: nodeID,
|
|
|
|
IsUp: true,
|
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Address: "1.2.3.4",
|
|
|
|
},
|
|
|
|
Operator: &pb.NodeOperator{
|
2022-01-05 11:07:03 +00:00
|
|
|
Email: "test@email.test",
|
2021-07-07 20:20:23 +01:00
|
|
|
Wallet: "0x123",
|
|
|
|
},
|
|
|
|
Version: &pb.NodeVersion{
|
2022-09-30 16:26:24 +01:00
|
|
|
Version: "v3.0.0",
|
2021-07-07 20:20:23 +01:00
|
|
|
CommitHash: "",
|
|
|
|
Timestamp: time.Time{},
|
|
|
|
Release: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 15:28:49 +01:00
|
|
|
func TestVetAndUnvetNode(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
|
|
|
node := planet.StorageNodes[0]
|
|
|
|
|
|
|
|
// clear existing data
|
|
|
|
err := service.TestUnvetNode(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
dossier, err := service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
2021-11-08 20:51:04 +00:00
|
|
|
require.Nil(t, dossier.Reputation.Status.VettedAt)
|
2020-07-08 15:28:49 +01:00
|
|
|
|
|
|
|
// vet again
|
|
|
|
vettedTime, err := service.TestVetNode(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, vettedTime)
|
|
|
|
dossier, err = service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
2021-11-08 20:51:04 +00:00
|
|
|
require.NotNil(t, dossier.Reputation.Status.VettedAt)
|
2020-07-08 15:28:49 +01:00
|
|
|
|
|
|
|
// unvet again
|
|
|
|
err = service.TestUnvetNode(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
dossier, err = service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
2021-11-08 20:51:04 +00:00
|
|
|
require.Nil(t, dossier.Reputation.Status.VettedAt)
|
2020-07-08 15:28:49 +01:00
|
|
|
})
|
|
|
|
}
|
2022-02-25 10:43:19 +00:00
|
|
|
|
2022-03-03 00:23:11 +00:00
|
|
|
func TestKnownReliableInExcludedCountries(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
|
|
|
node := planet.StorageNodes[0]
|
|
|
|
|
2023-06-29 09:38:47 +01:00
|
|
|
onlineNodes, _, err := service.Reliable(ctx)
|
2022-03-03 00:23:11 +00:00
|
|
|
require.NoError(t, err)
|
2023-06-29 09:38:47 +01:00
|
|
|
require.Len(t, onlineNodes, 2)
|
2022-03-03 00:23:11 +00:00
|
|
|
|
|
|
|
err = planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "FR")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// first node should be excluded from Reliable result because of country code
|
2023-06-29 09:38:47 +01:00
|
|
|
nodes, err := service.KnownReliableInExcludedCountries(ctx, storj.NodeIDList{onlineNodes[0].ID, onlineNodes[1].ID})
|
2022-03-03 00:23:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, node.ID(), nodes[0])
|
|
|
|
})
|
|
|
|
}
|
2022-09-28 20:53:48 +01:00
|
|
|
|
2022-09-29 18:23:14 +01:00
|
|
|
func TestUpdateReputationNodeEvents(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.SendNodeEmails = true
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
|
|
|
node := planet.StorageNodes[0]
|
|
|
|
email := "test@storj.test"
|
|
|
|
neDB := planet.Satellites[0].DB.NodeEvents()
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
repUpdate := overlay.ReputationUpdate{
|
|
|
|
UnknownAuditSuspended: &now,
|
|
|
|
}
|
|
|
|
|
|
|
|
repChanges := []nodeevents.Type{nodeevents.UnknownAuditSuspended}
|
|
|
|
|
|
|
|
require.NoError(t, service.UpdateReputation(ctx, node.ID(), email, repUpdate, repChanges))
|
|
|
|
|
|
|
|
ne, err := neDB.GetLatestByEmailAndEvent(ctx, email, nodeevents.UnknownAuditSuspended)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, email, ne.Email)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
require.Equal(t, nodeevents.UnknownAuditSuspended, ne.Event)
|
|
|
|
|
|
|
|
repUpdate.UnknownAuditSuspended = nil
|
|
|
|
repChanges = []nodeevents.Type{nodeevents.UnknownAuditUnsuspended}
|
|
|
|
require.NoError(t, service.UpdateReputation(ctx, node.ID(), "test@storj.test", repUpdate, repChanges))
|
|
|
|
|
|
|
|
ne, err = neDB.GetLatestByEmailAndEvent(ctx, email, nodeevents.UnknownAuditUnsuspended)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, email, ne.Email)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
require.Equal(t, nodeevents.UnknownAuditUnsuspended, ne.Event)
|
|
|
|
|
|
|
|
repUpdate.OfflineSuspended = &now
|
|
|
|
repChanges = []nodeevents.Type{nodeevents.OfflineSuspended}
|
|
|
|
require.NoError(t, service.UpdateReputation(ctx, node.ID(), "test@storj.test", repUpdate, repChanges))
|
|
|
|
|
|
|
|
ne, err = neDB.GetLatestByEmailAndEvent(ctx, email, nodeevents.OfflineSuspended)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, email, ne.Email)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
require.Equal(t, nodeevents.OfflineSuspended, ne.Event)
|
|
|
|
|
|
|
|
repUpdate.OfflineSuspended = nil
|
|
|
|
repChanges = []nodeevents.Type{nodeevents.OfflineUnsuspended}
|
|
|
|
require.NoError(t, service.UpdateReputation(ctx, node.ID(), "test@storj.test", repUpdate, repChanges))
|
|
|
|
|
|
|
|
ne, err = neDB.GetLatestByEmailAndEvent(ctx, email, nodeevents.OfflineUnsuspended)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, email, ne.Email)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
require.Equal(t, nodeevents.OfflineUnsuspended, ne.Event)
|
|
|
|
|
|
|
|
repUpdate.Disqualified = &now
|
|
|
|
repChanges = []nodeevents.Type{nodeevents.Disqualified}
|
|
|
|
require.NoError(t, service.UpdateReputation(ctx, node.ID(), "test@storj.test", repUpdate, repChanges))
|
|
|
|
|
|
|
|
ne, err = neDB.GetLatestByEmailAndEvent(ctx, email, nodeevents.Disqualified)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, email, ne.Email)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
require.Equal(t, nodeevents.Disqualified, ne.Event)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-10-11 17:13:29 +01:00
|
|
|
func TestDisqualifyNodeEmails(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.SendNodeEmails = true
|
|
|
|
config.Overlay.Node.OnlineWindow = 4 * time.Hour
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
|
|
|
node := planet.StorageNodes[0]
|
|
|
|
node.Contact.Chore.Pause(ctx)
|
|
|
|
|
|
|
|
require.NoError(t, service.DisqualifyNode(ctx, node.ID(), overlay.DisqualificationReasonUnknown))
|
|
|
|
|
|
|
|
ne, err := planet.Satellites[0].DB.NodeEvents().GetLatestByEmailAndEvent(ctx, node.Config.Operator.Email, nodeevents.Disqualified)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-01 14:54:46 +00:00
|
|
|
func TestUpdateCheckInNodeEventOnline(t *testing.T) {
|
2022-09-28 20:53:48 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.SendNodeEmails = true
|
2022-11-01 14:54:46 +00:00
|
|
|
config.Overlay.Node.OnlineWindow = 4 * time.Hour
|
2022-09-28 20:53:48 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
|
|
|
node := planet.StorageNodes[0]
|
2022-11-01 14:54:46 +00:00
|
|
|
node.Contact.Chore.Pause(ctx)
|
2022-09-28 20:53:48 +01:00
|
|
|
|
|
|
|
checkInInfo := getNodeInfo(node.ID())
|
|
|
|
require.NoError(t, service.UpdateCheckIn(ctx, checkInInfo, time.Now().Add(-24*time.Hour)))
|
|
|
|
require.NoError(t, service.UpdateCheckIn(ctx, checkInInfo, time.Now()))
|
|
|
|
|
2022-11-01 14:54:46 +00:00
|
|
|
ne, err := planet.Satellites[0].DB.NodeEvents().GetLatestByEmailAndEvent(ctx, checkInInfo.Operator.Email, nodeevents.Online)
|
2022-09-28 20:53:48 +01:00
|
|
|
require.NoError(t, err)
|
2022-11-01 14:54:46 +00:00
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
2022-09-28 20:53:48 +01:00
|
|
|
})
|
|
|
|
}
|
2022-09-30 16:26:24 +01:00
|
|
|
|
|
|
|
func TestUpdateCheckInBelowMinVersionEvent(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.SendNodeEmails = true
|
2023-02-01 22:19:02 +00:00
|
|
|
// testplanet storagenode default version is "v0.0.1".
|
|
|
|
// set this as minimum version so storagenode doesn't start below it.
|
|
|
|
config.Overlay.Node.MinimumVersion = "v0.0.1"
|
2022-09-30 16:26:24 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
service := planet.Satellites[0].Overlay.Service
|
|
|
|
node := planet.StorageNodes[0]
|
|
|
|
node.Contact.Chore.Pause(ctx)
|
2023-02-01 22:19:02 +00:00
|
|
|
email := node.Config.Operator.Email
|
2022-09-30 16:26:24 +01:00
|
|
|
|
2023-02-01 22:19:02 +00:00
|
|
|
getNE := func() nodeevents.NodeEvent {
|
|
|
|
ne, err := planet.Satellites[0].DB.NodeEvents().GetLatestByEmailAndEvent(ctx, email, nodeevents.BelowMinVersion)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, node.ID(), ne.NodeID)
|
|
|
|
require.Equal(t, email, ne.Email)
|
|
|
|
require.Equal(t, nodeevents.BelowMinVersion, ne.Event)
|
|
|
|
return ne
|
|
|
|
}
|
|
|
|
|
|
|
|
nd, err := service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, nd.LastSoftwareUpdateEmail)
|
|
|
|
|
|
|
|
// Set version below minimum
|
|
|
|
now := time.Now()
|
2022-09-30 16:26:24 +01:00
|
|
|
checkInInfo := getNodeInfo(node.ID())
|
2023-02-01 22:19:02 +00:00
|
|
|
checkInInfo.Operator.Email = email
|
2022-09-30 16:26:24 +01:00
|
|
|
|
2023-02-01 22:19:02 +00:00
|
|
|
checkInInfo.Version = &pb.NodeVersion{Version: "v0.0.0"}
|
|
|
|
require.NoError(t, service.UpdateCheckIn(ctx, checkInInfo, now))
|
|
|
|
|
|
|
|
nd, err = service.Get(ctx, node.ID())
|
2022-09-30 16:26:24 +01:00
|
|
|
require.NoError(t, err)
|
2023-02-01 22:19:02 +00:00
|
|
|
|
|
|
|
lastEmail := nd.LastSoftwareUpdateEmail
|
|
|
|
require.NotNil(t, lastEmail)
|
|
|
|
|
|
|
|
// check that software update node event was inserted into nodeevents.DB
|
|
|
|
ne0 := getNE()
|
|
|
|
require.True(t, ne0.CreatedAt.After(now))
|
|
|
|
|
|
|
|
// check in again and check that another email wasn't sent
|
|
|
|
now = now.Add(24 * time.Hour)
|
|
|
|
require.NoError(t, service.UpdateCheckIn(ctx, checkInInfo, now))
|
|
|
|
|
|
|
|
nd, err = service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, lastEmail, nd.LastSoftwareUpdateEmail)
|
|
|
|
|
|
|
|
// a node event should not have been inserted, so should be the same as the last node event
|
|
|
|
ne1 := getNE()
|
|
|
|
require.Equal(t, ne1.CreatedAt, ne0.CreatedAt)
|
|
|
|
|
|
|
|
// check in again after cooldown period has passed and check that email was sent
|
|
|
|
require.NoError(t, service.UpdateCheckIn(ctx, checkInInfo, now.Add(planet.Satellites[0].Config.Overlay.NodeSoftwareUpdateEmailCooldown)))
|
|
|
|
|
|
|
|
nd, err = service.Get(ctx, node.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, nd.LastSoftwareUpdateEmail)
|
|
|
|
require.True(t, nd.LastSoftwareUpdateEmail.After(*lastEmail))
|
|
|
|
|
|
|
|
ne2 := getNE()
|
|
|
|
require.True(t, ne2.CreatedAt.After(ne1.CreatedAt))
|
2022-09-30 16:26:24 +01:00
|
|
|
})
|
|
|
|
}
|