Relocate IP Lookup to ensure it is always set (#2061)
This commit is contained in:
parent
04c20b0ac0
commit
4b75752d6b
@ -625,6 +625,14 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatelliteIDs []strin
|
||||
planet.config.Reconfigure.StorageNode(i, &config)
|
||||
}
|
||||
|
||||
newIPCount := planet.config.Reconfigure.NewIPCount
|
||||
if newIPCount > 0 {
|
||||
if i >= count-newIPCount {
|
||||
config.Server.Address = fmt.Sprintf("127.0.0.%d:0", i+1)
|
||||
config.Server.PrivateAddress = fmt.Sprintf("127.0.0.%d:0", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
verInfo := planet.NewVersionInfo()
|
||||
|
||||
peer, err := storagenode.New(log, identity, db, config, verInfo)
|
||||
|
@ -21,6 +21,7 @@ type Reconfigure struct {
|
||||
|
||||
NewStorageNodeDB func(index int) (storagenode.DB, error)
|
||||
StorageNode func(index int, config *storagenode.Config)
|
||||
NewIPCount int
|
||||
}
|
||||
|
||||
// DisablePeerCAWhitelist returns a `Reconfigure` that sets `UsePeerCAWhitelist` for
|
||||
|
@ -6,6 +6,7 @@ package overlay
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
@ -264,6 +265,14 @@ func (cache *Cache) Put(ctx context.Context, nodeID storj.NodeID, value pb.Node)
|
||||
if nodeID != value.Id {
|
||||
return errors.New("invalid request")
|
||||
}
|
||||
if value.Address == nil {
|
||||
return errors.New("node has no address")
|
||||
}
|
||||
//Resolve IP Address to ensure it is set
|
||||
value.LastIp, err = getIP(value.Address.Address)
|
||||
if err != nil {
|
||||
return OverlayError.Wrap(err)
|
||||
}
|
||||
return cache.db.UpdateAddress(ctx, &value)
|
||||
}
|
||||
|
||||
@ -340,3 +349,15 @@ func (cache *Cache) GetMissingPieces(ctx context.Context, pieces []*pb.RemotePie
|
||||
}
|
||||
return missingPieces, nil
|
||||
}
|
||||
|
||||
func getIP(target string) (string, error) {
|
||||
host, _, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ipAddr, err := net.ResolveIPAddr("ip", host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ipAddr.String(), nil
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ func testCache(ctx context.Context, t *testing.T, store overlay.DB) {
|
||||
valid1ID := storj.NodeID{}
|
||||
valid2ID := storj.NodeID{}
|
||||
missingID := storj.NodeID{}
|
||||
address := &pb.NodeAddress{Address: "127.0.0.1:0"}
|
||||
|
||||
_, _ = rand.Read(valid1ID[:])
|
||||
_, _ = rand.Read(valid2ID[:])
|
||||
@ -44,12 +45,12 @@ func testCache(ctx context.Context, t *testing.T, store overlay.DB) {
|
||||
cache := overlay.NewCache(zaptest.NewLogger(t), store, overlay.NodeSelectionConfig{OnlineWindow: time.Hour})
|
||||
|
||||
{ // Put
|
||||
err := cache.Put(ctx, valid1ID, pb.Node{Id: valid1ID})
|
||||
err := cache.Put(ctx, valid1ID, pb.Node{Id: valid1ID, Address: address})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = cache.Put(ctx, valid2ID, pb.Node{Id: valid2ID})
|
||||
err = cache.Put(ctx, valid2ID, pb.Node{Id: valid2ID, Address: address})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -4,20 +4,18 @@
|
||||
package overlay_test
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite"
|
||||
)
|
||||
|
||||
func TestOffline(t *testing.T) {
|
||||
@ -221,17 +219,15 @@ func TestNodeSelection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDistinctIPs(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Test does not work with macOS")
|
||||
}
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Discovery.RefreshInterval = 60 * time.Second
|
||||
config.Discovery.DiscoveryInterval = 60 * time.Second
|
||||
config.Discovery.GraveyardInterval = 60 * time.Second
|
||||
},
|
||||
NewIPCount: 3,
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
var err error
|
||||
satellite := planet.Satellites[0]
|
||||
service := satellite.Overlay.Service
|
||||
tests := []struct {
|
||||
@ -242,8 +238,7 @@ func TestDistinctIPs(t *testing.T) {
|
||||
shouldFailWith *errs.Class
|
||||
}{
|
||||
{ // test only distinct IPs with half new nodes
|
||||
duplicateCount: 7,
|
||||
requestCount: 4,
|
||||
requestCount: 4,
|
||||
preferences: overlay.NodeSelectionConfig{
|
||||
AuditCount: 1,
|
||||
NewNodePercentage: 0.5,
|
||||
@ -251,19 +246,8 @@ func TestDistinctIPs(t *testing.T) {
|
||||
DistinctIP: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
duplicateCount: 9,
|
||||
requestCount: 2,
|
||||
preferences: overlay.NodeSelectionConfig{
|
||||
AuditCount: 0,
|
||||
NewNodePercentage: 0,
|
||||
OnlineWindow: time.Hour,
|
||||
DistinctIP: true,
|
||||
},
|
||||
},
|
||||
{ // test not enough distinct IPs
|
||||
duplicateCount: 7,
|
||||
requestCount: 7,
|
||||
requestCount: 7,
|
||||
preferences: overlay.NodeSelectionConfig{
|
||||
AuditCount: 0,
|
||||
NewNodePercentage: 0,
|
||||
@ -295,18 +279,6 @@ func TestDistinctIPs(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
// update node last IPs
|
||||
for i := 0; i < 10; i++ {
|
||||
node := planet.StorageNodes[i].Local().Node
|
||||
if i < tt.duplicateCount {
|
||||
node.LastIp = "01.23.45.67"
|
||||
} else {
|
||||
node.LastIp = strconv.Itoa(i)
|
||||
}
|
||||
err = service.Put(ctx, planet.StorageNodes[i].ID(), node)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
response, err := service.FindStorageNodesWithPreferences(ctx, overlay.FindStorageNodesRequest{
|
||||
FreeBandwidth: 0,
|
||||
FreeDisk: 0,
|
||||
|
@ -105,12 +105,6 @@ func (transport *Transport) DialNode(ctx context.Context, node *pb.Node, opts ..
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
ipAddr, err := getIP(conn.Target())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.LastIp = ipAddr
|
||||
|
||||
alertSuccess(timedCtx, transport.observers, node)
|
||||
|
||||
return conn, nil
|
||||
@ -160,18 +154,6 @@ func (transport *Transport) WithObservers(obs ...Observer) Client {
|
||||
return tr
|
||||
}
|
||||
|
||||
func getIP(target string) (string, error) {
|
||||
host, _, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ipAddr, err := net.ResolveIPAddr("ip", host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ipAddr.String(), nil
|
||||
}
|
||||
|
||||
func alertFail(ctx context.Context, obs []Observer, node *pb.Node, err error) {
|
||||
for _, o := range obs {
|
||||
o.ConnFailure(ctx, node, err)
|
||||
|
Loading…
Reference in New Issue
Block a user