storagenode: remove dependency to overlay.NodeDossier

This is the last dependency from storage node to satellite.

Change-Id: I12f7abb91e84f823ba5af126c6e2979519838612
This commit is contained in:
Egon Elbre 2020-05-20 16:40:25 +03:00
parent b42778c42e
commit bef84a5f9d
10 changed files with 91 additions and 52 deletions

1
go.sum
View File

@ -648,6 +648,7 @@ storj.io/common v0.0.0-20200517125204-ceb772d8c054 h1:ikw3cNJFMUNrXyctEb3ZnpJGYZ
storj.io/common v0.0.0-20200517125204-ceb772d8c054/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ=
storj.io/common v0.0.0-20200519144636-6a729faf9037 h1:CbUn4bph75bE4icyP8gyw+TcrFxWmu2Xzh0Y8OcP+x0=
storj.io/common v0.0.0-20200519144636-6a729faf9037/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ=
storj.io/common v0.0.0-20200519171747-3ff8acf78c46 h1:Yx73D928PKtyQYPXHuQ5WFES4t+0nufxbhwyf8VodMw=
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw=
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=

View File

@ -40,7 +40,7 @@ func TestBasic(t *testing.T) {
require.NoError(t, err)
defer ctx.Check(conn.Close)
_, err = pb.NewDRPCNodeClient(conn).CheckIn(ctx, &pb.CheckInRequest{
Address: node.GetAddress().GetAddress(),
Address: node.Address,
Version: &node.Version,
Capacity: &node.Capacity,
Operator: &node.Operator,

View File

@ -21,12 +21,12 @@ func TestSatelliteContactEndpoint(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
nodeDossier := planet.StorageNodes[0].Contact.Service.Local()
nodeInfo := planet.StorageNodes[0].Contact.Service.Local()
ident := planet.StorageNodes[0].Identity
peer := rpcpeer.Peer{
Addr: &net.TCPAddr{
IP: net.ParseIP(nodeDossier.Address.GetAddress()),
IP: net.ParseIP(nodeInfo.Address),
Port: 5,
},
State: tls.ConnectionState{
@ -35,15 +35,15 @@ func TestSatelliteContactEndpoint(t *testing.T) {
}
peerCtx := rpcpeer.NewContext(ctx, &peer)
resp, err := planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
Address: nodeDossier.Address.GetAddress(),
Version: &nodeDossier.Version,
Capacity: &nodeDossier.Capacity,
Operator: &nodeDossier.Operator,
Address: nodeInfo.Address,
Version: &nodeInfo.Version,
Capacity: &nodeInfo.Capacity,
Operator: &nodeInfo.Operator,
})
require.NoError(t, err)
require.NotNil(t, resp)
peerID, err := planet.Satellites[0].DB.PeerIdentities().Get(ctx, nodeDossier.Id)
peerID, err := planet.Satellites[0].DB.PeerIdentities().Get(ctx, nodeInfo.ID)
require.NoError(t, err)
require.Equal(t, ident.PeerIdentity(), peerID)
})

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/overlay"
@ -19,19 +20,21 @@ func TestDetectionChore(t *testing.T) {
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
node := planet.StorageNodes[0]
nodeDossier := planet.StorageNodes[0].Contact.Service.Local()
satellite := planet.Satellites[0]
node.Contact.Chore.Pause(ctx)
satellite.DowntimeTracking.DetectionChore.Loop.Pause()
// setup
nodeInfo := planet.StorageNodes[0].Contact.Service.Local()
info := overlay.NodeCheckInInfo{
NodeID: nodeDossier.Id,
IsUp: true,
Address: nodeDossier.Address,
Operator: &nodeDossier.Operator,
Version: &nodeDossier.Version,
NodeID: nodeInfo.ID,
IsUp: true,
Address: &pb.NodeAddress{
Address: nodeInfo.Address,
},
Operator: &nodeInfo.Operator,
Version: &nodeInfo.Version,
}
sixtyOneMinutes := 61 * time.Minute

View File

@ -6,6 +6,7 @@ package overlay_test
import (
"crypto/tls"
"crypto/x509"
"errors"
"net"
"runtime"
"strings"
@ -46,11 +47,11 @@ func TestMinimumDiskSpace(t *testing.T) {
node0 := planet.StorageNodes[0]
node0.Contact.Chore.Pause(ctx)
nodeDossier := node0.Contact.Service.Local()
nodeInfo := node0.Contact.Service.Local()
ident := node0.Identity
peer := rpcpeer.Peer{
Addr: &net.TCPAddr{
IP: net.ParseIP(nodeDossier.Address.GetAddress()),
IP: net.ParseIP(nodeInfo.Address),
Port: 5,
},
State: tls.ConnectionState{
@ -61,12 +62,12 @@ func TestMinimumDiskSpace(t *testing.T) {
// report disk space less than minimum
_, err := planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
Address: nodeDossier.Address.GetAddress(),
Version: &nodeDossier.Version,
Address: nodeInfo.Address,
Version: &nodeInfo.Version,
Capacity: &pb.NodeCapacity{
FreeDisk: 9 * memory.MB.Int64(),
},
Operator: &nodeDossier.Operator,
Operator: &nodeInfo.Operator,
})
require.NoError(t, err)
@ -88,12 +89,12 @@ func TestMinimumDiskSpace(t *testing.T) {
// report disk space greater than minimum
_, err = planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
Address: nodeDossier.Address.GetAddress(),
Version: &nodeDossier.Version,
Address: nodeInfo.Address,
Version: &nodeInfo.Version,
Capacity: &pb.NodeCapacity{
FreeDisk: 11 * memory.MB.Int64(),
},
Operator: &nodeDossier.Operator,
Operator: &nodeInfo.Operator,
})
require.NoError(t, err)
@ -314,15 +315,21 @@ func testNodeSelection(t *testing.T, ctx *testcontext.Context, planet *testplane
// ensure all storagenodes are in overlay
for _, storageNode := range planet.StorageNodes {
n := storageNode.Contact.Service.Local()
lastNet, err := ipToLastNet(n.Address)
require.NoError(t, err)
d := overlay.NodeCheckInInfo{
NodeID: storageNode.ID(),
Address: n.Address,
NodeID: storageNode.ID(),
Address: &pb.NodeAddress{
Address: n.Address,
},
LastIPPort: storageNode.Addr(),
LastNet: n.LastNet,
LastNet: lastNet,
Version: &n.Version,
}
err := satellite.Overlay.DB.UpdateCheckIn(ctx, d, time.Now().UTC(), satellite.Config.Overlay.Node)
assert.NoError(t, err)
err = satellite.Overlay.DB.UpdateCheckIn(ctx, d, time.Now().UTC(), satellite.Config.Overlay.Node)
require.NoError(t, err)
}
type test struct {
@ -410,6 +417,32 @@ func testNodeSelection(t *testing.T, ctx *testcontext.Context, planet *testplane
}
}
// ipToLastNet converts target address to its IP and /24 subnet IPv4 or /64 subnet IPv6
func ipToLastNet(target string) (network string, err error) {
host, _, err := net.SplitHostPort(target)
if err != nil {
return "", err
}
ip := net.ParseIP(host)
if ip == nil {
return "", errors.New("invalid ip " + host)
}
// If addr can be converted to 4byte notation, it is an IPv4 address, else its an IPv6 address
if ipv4 := ip.To4(); ipv4 != nil {
//Filter all IPv4 Addresses into /24 Subnet's
mask := net.CIDRMask(24, 32)
return ipv4.Mask(mask).String(), nil
}
if ipv6 := ip.To16(); ipv6 != nil {
//Filter all IPv6 Addresses into /64 Subnet's
mask := net.CIDRMask(64, 128)
return ipv6.Mask(mask).String(), nil
}
return "", errors.New("unable to get network for address " + ip.String())
}
func TestNodeSelectionGracefulExit(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,

View File

@ -661,9 +661,11 @@ func TestIrreparableSegmentAccordingToOverlay(t *testing.T) {
func updateNodeCheckIn(ctx context.Context, overlayDB overlay.DB, node *testplanet.StorageNode, isUp bool, timestamp time.Time) error {
local := node.Contact.Service.Local()
checkInInfo := overlay.NodeCheckInInfo{
NodeID: node.ID(),
Address: local.Address,
LastIPPort: local.LastIPPort,
NodeID: node.ID(),
Address: &pb.NodeAddress{
Address: local.Address,
},
LastIPPort: local.Address,
IsUp: isUp,
Operator: &local.Operator,
Capacity: &local.Capacity,

View File

@ -136,7 +136,7 @@ func (s *Service) GetDashboardData(ctx context.Context) (_ *Dashboard, err error
defer mon.Task()(&ctx)(&err)
data := new(Dashboard)
data.NodeID = s.contact.Local().Id
data.NodeID = s.contact.Local().ID
data.Wallet = s.walletAddress
data.Version = s.versionInfo.Version
data.StartedAt = s.startedAt

View File

@ -17,7 +17,6 @@ import (
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/common/sync2"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storagenode/trust"
)
@ -40,13 +39,22 @@ type Config struct {
Interval time.Duration `help:"how frequently the node contact chore should run" releaseDefault:"1h" devDefault:"30s"`
}
// NodeInfo contains information necessary for introducing storagenode to satellite.
type NodeInfo struct {
ID storj.NodeID
Address string
Version pb.NodeVersion
Capacity pb.NodeCapacity
Operator pb.NodeOperator
}
// Service is the contact service between storage nodes and satellites
type Service struct {
log *zap.Logger
dialer rpc.Dialer
mu sync.Mutex
self *overlay.NodeDossier
self NodeInfo
trust *trust.Pool
@ -54,7 +62,7 @@ type Service struct {
}
// NewService creates a new contact service
func NewService(log *zap.Logger, dialer rpc.Dialer, self *overlay.NodeDossier, trust *trust.Pool) *Service {
func NewService(log *zap.Logger, dialer rpc.Dialer, self NodeInfo, trust *trust.Pool) *Service {
return &Service{
log: log,
dialer: dialer,
@ -121,7 +129,7 @@ func (service *Service) pingSatelliteOnce(ctx context.Context, id storj.NodeID)
self := service.Local()
_, err = pb.NewDRPCNodeClient(conn).CheckIn(ctx, &pb.CheckInRequest{
Address: self.Address.GetAddress(),
Address: self.Address,
Version: &self.Version,
Capacity: &self.Capacity,
Operator: &self.Operator,
@ -132,11 +140,11 @@ func (service *Service) pingSatelliteOnce(ctx context.Context, id storj.NodeID)
return nil
}
// Local returns the storagenode node-dossier
func (service *Service) Local() overlay.NodeDossier {
// Local returns the storagenode info.
func (service *Service) Local() NodeInfo {
service.mu.Lock()
defer service.mu.Unlock()
return *service.self
return service.self
}
// UpdateSelf updates the local node with the capacity
@ -146,6 +154,5 @@ func (service *Service) UpdateSelf(capacity *pb.NodeCapacity) {
if capacity != nil {
service.self.Capacity = *capacity
}
service.initialized.Release()
}

View File

@ -114,11 +114,11 @@ func (inspector *Endpoint) getDashboardData(ctx context.Context) (_ *pb.Dashboar
}
lastPingedAt := inspector.pingStats.WhenLastPinged()
self := inspector.contact.Local()
return &pb.DashboardResponse{
NodeId: inspector.contact.Local().Id,
NodeId: self.ID,
InternalAddress: "",
ExternalAddress: inspector.contact.Local().Address.Address,
ExternalAddress: self.Address,
LastPinged: lastPingedAt,
DashboardAddress: inspector.dashboardAddress.String(),
Uptime: time.Since(inspector.startTime).String(),

View File

@ -29,7 +29,6 @@ import (
"storj.io/storj/pkg/server"
"storj.io/storj/private/lifecycle"
"storj.io/storj/private/version/checker"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
"storj.io/storj/storage/filestore"
"storj.io/storj/storagenode/bandwidth"
@ -371,15 +370,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
self := &overlay.NodeDossier{
Node: pb.Node{
Id: peer.ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: c.ExternalAddress,
},
},
Type: pb.NodeType_STORAGE,
self := contact.NodeInfo{
ID: peer.ID(),
Address: c.ExternalAddress,
Operator: pb.NodeOperator{
Email: config.Operator.Email,
Wallet: config.Operator.Wallet,