satellite/overlay: remove old updateaddress method

The UpdateAddress method use to be used when storage node's checked in with the Satellite, but once the contact service was created this method was no longer used. This PR finally removes it.

Change-Id: Ib3f83c8003269671d97d54f21ee69665fa663f24
This commit is contained in:
Jessica Grebenschikov 2020-04-29 10:58:41 -07:00 committed by Egon Elbre
parent 4f492a1ca9
commit 6a6427526b
8 changed files with 166 additions and 164 deletions

View File

@ -4,6 +4,7 @@
package gracefulexit_test
import (
"fmt"
"testing"
"time"
@ -41,10 +42,16 @@ func TestGetExitingNodes(t *testing.T) {
{testrand.NodeID(), time.Now(), time.Time{}, time.Now(), false, false},
}
for _, data := range testData {
n := pb.Node{Id: data.nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
for i, data := range testData {
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{NodeID: data.nodeID,
Address: &pb.NodeAddress{Address: addr},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(t, err)
req := &overlay.ExitStatusRequest{
@ -117,10 +124,16 @@ func TestGetGracefulExitNodesByTimeframe(t *testing.T) {
{testrand.NodeID(), time.Time{}, time.Time{}, time.Time{}},
}
for _, data := range testData {
n := pb.Node{Id: data.nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
for i, data := range testData {
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{NodeID: data.nodeID,
Address: &pb.NodeAddress{Address: addr},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(t, err)
req := &overlay.ExitStatusRequest{

View File

@ -41,10 +41,18 @@ func BenchmarkOverlay(b *testing.B) {
}
}
for _, id := range all {
n := pb.Node{Id: id}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
for i, id := range all {
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{
NodeID: id,
Address: &pb.NodeAddress{Address: addr},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
IsUp: true,
}
err := overlaydb.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(b, err)
}
@ -66,12 +74,19 @@ func BenchmarkOverlay(b *testing.B) {
}
})
b.Run("UpdateAddress", func(b *testing.B) {
b.Run("UpdateCheckIn", func(b *testing.B) {
for i := 0; i < b.N; i++ {
id := all[i%len(all)]
n := pb.Node{Id: id}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{
NodeID: id,
Address: &pb.NodeAddress{Address: addr},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
}
err := overlaydb.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(b, err)
}
})
@ -220,15 +235,18 @@ func BenchmarkNodeSelection(b *testing.B) {
excludedNets = append(excludedNets, lastNet)
}
dossier := &overlay.NodeDossier{
Node: pb.Node{Id: nodeID},
LastIPPort: address + ":12121",
addr := address + ":12121"
d := overlay.NodeCheckInInfo{
NodeID: nodeID,
Address: &pb.NodeAddress{Address: addr},
LastIPPort: addr,
LastNet: lastNet,
Capacity: pb.NodeCapacity{
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{
FreeDisk: 1_000_000_000,
},
}
err := overlaydb.UpdateAddress(ctx, dossier, nodeSelectionConfig)
err := overlaydb.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(b, err)
_, err = overlaydb.UpdateNodeInfo(ctx, nodeID, &pb.InfoResponse{

View File

@ -4,8 +4,10 @@
package overlay_test
import (
"fmt"
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
@ -33,16 +35,18 @@ func TestDB_PieceCounts(t *testing.T) {
nodes[i].PieceCount = int(math.Pow10(i + 1))
}
for _, node := range nodes {
n := pb.Node{
Id: node.ID,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "0.0.0.0",
},
for i, node := range nodes {
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{
NodeID: node.ID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
}
d := overlay.NodeDossier{Node: n, LastIPPort: "0.0.0.0", LastNet: "0.0.0.0"}
require.NoError(t, overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{}))
err := overlaydb.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(t, err)
}
// check that they are initialized to zero
@ -85,16 +89,20 @@ func BenchmarkDB_PieceCounts(b *testing.B) {
counts[testrand.NodeID()] = testrand.Intn(100000)
}
var i int
for nodeID := range counts {
n := pb.Node{
Id: nodeID,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "0.0.0.0",
},
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
i++
d := overlay.NodeCheckInInfo{
NodeID: nodeID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
}
d := overlay.NodeDossier{Node: n, LastIPPort: "0.0.0.0", LastNet: "0.0.0.0"}
require.NoError(b, overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{}))
err := overlaydb.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(b, err)
}
b.Run("Update", func(b *testing.B) {

View File

@ -145,6 +145,10 @@ func TestOffline(t *testing.T) {
}
func TestEnsureMinimumRequested(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skip("Test does not work with macOS")
}
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
@ -305,7 +309,15 @@ func testNodeSelection(t *testing.T, ctx *testcontext.Context, planet *testplane
satellite := planet.Satellites[0]
// ensure all storagenodes are in overlay
for _, storageNode := range planet.StorageNodes {
err := satellite.Overlay.Service.Put(ctx, storageNode.ID(), storageNode.Local().Node)
n := storageNode.Local()
d := overlay.NodeCheckInInfo{
NodeID: storageNode.ID(),
Address: n.Address,
LastIPPort: storageNode.Addr(),
LastNet: n.LastNet,
Version: &n.Version,
}
err := satellite.Overlay.DB.UpdateCheckIn(ctx, d, time.Now().UTC(), satellite.Config.Overlay.Node)
assert.NoError(t, err)
}

View File

@ -53,8 +53,6 @@ type DB interface {
KnownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) ([]*pb.Node, error)
// Reliable returns all nodes that are reliable
Reliable(context.Context, *NodeCriteria) (storj.NodeIDList, error)
// Update updates node address
UpdateAddress(ctx context.Context, value *NodeDossier, defaults NodeSelectionConfig) error
// BatchUpdateStats updates multiple storagenode's stats in one transaction
BatchUpdateStats(ctx context.Context, updateRequests []*UpdateRequest, batchSize int) (failed storj.NodeIDList, err error)
// UpdateStats all parts of single storagenode's stats.
@ -386,37 +384,6 @@ func (service *Service) Reliable(ctx context.Context) (nodes storj.NodeIDList, e
return service.db.Reliable(ctx, criteria)
}
// Put adds a node id and proto definition into the overlay.
func (service *Service) Put(ctx context.Context, nodeID storj.NodeID, value pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
// If we get a Node without an ID
// we don't want to add to the database
if nodeID.IsZero() {
return nil
}
if nodeID != value.Id {
return errors.New("invalid request")
}
if value.Address == nil {
return errors.New("node has no address")
}
// Resolve the IP and the subnet from the address that is sent
resolvedIPPort, resolvedNetwork, err := ResolveIPAndNetwork(ctx, value.Address.Address)
if err != nil {
return Error.Wrap(err)
}
n := NodeDossier{
Node: value,
LastNet: resolvedNetwork,
LastIPPort: resolvedIPPort,
}
return service.db.UpdateAddress(ctx, &n, service.config.Node)
}
// BatchUpdateStats updates multiple storagenode's stats in one transaction
func (service *Service) BatchUpdateStats(ctx context.Context, requests []*UpdateRequest) (failed storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -5,6 +5,7 @@ package overlay_test
import (
"context"
"fmt"
"reflect"
"sort"
"testing"
@ -55,19 +56,30 @@ func testCache(ctx context.Context, t *testing.T, store overlay.DB) {
valid3ID := testrand.NodeID()
missingID := testrand.NodeID()
address := &pb.NodeAddress{Address: "127.0.0.1:0"}
lastNet := "127.0.0"
nodeSelectionConfig := testNodeSelectionConfig(0, 0, false)
serviceConfig := overlay.Config{Node: nodeSelectionConfig, UpdateStatsBatchSize: 100}
service := overlay.NewService(zaptest.NewLogger(t), store, serviceConfig)
d := overlay.NodeCheckInInfo{
Address: address,
LastIPPort: address.Address,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
IsUp: true,
}
{ // Put
err := service.Put(ctx, valid1ID, pb.Node{Id: valid1ID, Address: address})
d.NodeID = valid1ID
err := store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
require.NoError(t, err)
err = service.Put(ctx, valid2ID, pb.Node{Id: valid2ID, Address: address})
d.NodeID = valid2ID
err = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
require.NoError(t, err)
err = service.Put(ctx, valid3ID, pb.Node{Id: valid3ID, Address: address})
d.NodeID = valid3ID
err = store.UpdateCheckIn(ctx, d, time.Now().UTC(), nodeSelectionConfig)
require.NoError(t, err)
// disqualify one node
@ -153,14 +165,18 @@ func TestRandomizedSelection(t *testing.T) {
// put nodes in cache
for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID()
n := pb.Node{Id: newID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, defaults)
require.NoError(t, err)
_, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{
Type: pb.NodeType_STORAGE,
Capacity: &pb.NodeCapacity{},
})
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{
NodeID: newID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{},
IsUp: true,
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), defaults)
require.NoError(t, err)
if i%2 == 0 { // make half of nodes "new" and half "vetted"
@ -506,14 +522,18 @@ func TestCache_DowntimeTracking(t *testing.T) {
// put nodes in cache
for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID()
n := pb.Node{Id: newID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, defaults)
require.NoError(t, err)
_, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{
Type: pb.NodeType_STORAGE,
Capacity: &pb.NodeCapacity{},
})
addr := fmt.Sprintf("127.0.%d.0:8080", 0)
lastNet := fmt.Sprintf("127.0.%d", 0)
d := overlay.NodeCheckInInfo{
NodeID: newID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{},
IsUp: true,
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), defaults)
require.NoError(t, err)
allIDs[i] = newID
@ -599,14 +619,18 @@ func TestSuspendedSelection(t *testing.T) {
// put nodes in cache
for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID()
n := pb.Node{Id: newID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, defaults)
require.NoError(t, err)
_, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{
Type: pb.NodeType_STORAGE,
Capacity: &pb.NodeCapacity{},
})
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{
NodeID: newID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{},
IsUp: true,
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), defaults)
require.NoError(t, err)
if i%2 == 0 { // make half of nodes "new" and half "vetted"

View File

@ -5,6 +5,7 @@ package overlay_test
import (
"context"
"fmt"
"testing"
"time"
@ -30,7 +31,7 @@ func TestStatDB(t *testing.T) {
func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
{ // TestKnownUnreliableOrOffline
for _, tt := range []struct {
for i, tt := range []struct {
nodeID storj.NodeID
suspended bool
disqualified bool
@ -43,11 +44,18 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
{storj.NodeID{4}, false, false, true, false}, // offline
{storj.NodeID{5}, false, false, false, true}, // gracefully exited
} {
startingRep := overlay.NodeSelectionConfig{}
n := pb.Node{Id: tt.nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, startingRep)
addr := fmt.Sprintf("127.0.%d.0:8080", i)
lastNet := fmt.Sprintf("127.0.%d", i)
d := overlay.NodeCheckInInfo{
NodeID: tt.nodeID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{},
IsUp: true,
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(t, err)
if tt.suspended {
@ -95,10 +103,17 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
{ // TestUpdateOperator
nodeID := storj.NodeID{10}
n := pb.Node{Id: nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
addr := "127.0.1.0:8080"
lastNet := "127.0.1"
d := overlay.NodeCheckInInfo{
NodeID: nodeID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{},
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), overlay.NodeSelectionConfig{})
require.NoError(t, err)
update, err := cache.UpdateNodeInfo(ctx, nodeID, &pb.InfoResponse{

View File

@ -322,61 +322,6 @@ func (cache *overlaycache) Reliable(ctx context.Context, criteria *overlay.NodeC
return nodes, Error.Wrap(rows.Err())
}
// Update updates node address
func (cache *overlaycache) UpdateAddress(ctx context.Context, info *overlay.NodeDossier, defaults overlay.NodeSelectionConfig) (err error) {
defer mon.Task()(&ctx)(&err)
if info == nil || info.Id.IsZero() {
return overlay.ErrEmptyNode
}
address := info.Address
if address == nil {
address = &pb.NodeAddress{}
}
query := `
INSERT INTO nodes
(
id, address, last_net, protocol, type,
email, wallet, free_disk,
uptime_success_count, total_uptime_count,
last_contact_success,
last_contact_failure,
audit_reputation_alpha, audit_reputation_beta,
major, minor, patch, hash, timestamp, release,
last_ip_port
)
VALUES (
$1, $2, $3, $4, $5,
'', '', -1,
0, 0,
$8::timestamptz,
'0001-01-01 00:00:00+00'::timestamptz,
$6, $7,
0, 0, 0, '', '0001-01-01 00:00:00+00'::timestamptz, false,
$9
)
ON CONFLICT (id)
DO UPDATE
SET
address=$2,
last_net=$3,
protocol=$4,
last_ip_port=$9
`
_, err = cache.db.ExecContext(ctx, query,
// args $1 - $5
info.Id.Bytes(), address.Address, info.LastNet, int(address.Transport), int(pb.NodeType_INVALID),
// args $6 - $7
1, 0,
// args $8
time.Now(),
// args $9
info.LastIPPort,
)
return Error.Wrap(err)
}
// BatchUpdateStats updates multiple storagenode's stats in one transaction
func (cache *overlaycache) BatchUpdateStats(ctx context.Context, updateRequests []*overlay.UpdateRequest, batchSize int) (failed storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)