2019-09-04 19:29:34 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package contact
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-05-27 04:44:48 +01:00
|
|
|
"net"
|
2019-11-15 22:43:06 +00:00
|
|
|
"time"
|
2019-09-04 19:29:34 +01:00
|
|
|
|
2022-11-03 11:42:07 +00:00
|
|
|
"github.com/jtolio/eventkit"
|
2019-09-18 21:17:04 +01:00
|
|
|
"github.com/zeebo/errs"
|
2019-09-04 19:29:34 +01:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/identity"
|
|
|
|
"storj.io/common/pb"
|
2023-02-02 22:43:57 +00:00
|
|
|
"storj.io/common/rpc/noise"
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/rpc/rpcstatus"
|
2020-05-19 17:42:00 +01:00
|
|
|
"storj.io/common/storj"
|
2022-11-03 11:42:07 +00:00
|
|
|
"storj.io/drpc/drpcctx"
|
2021-01-18 14:33:13 +00:00
|
|
|
"storj.io/storj/private/nodeoperator"
|
2019-09-19 19:37:31 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2019-09-04 19:29:34 +01:00
|
|
|
)
|
|
|
|
|
2019-10-30 18:57:21 +00:00
|
|
|
var (
|
2021-05-16 17:36:53 +01:00
|
|
|
errPingBackDial = errs.Class("pingback dialing")
|
|
|
|
errCheckInIdentity = errs.Class("check-in identity")
|
|
|
|
errCheckInRateLimit = errs.Class("check-in ratelimit")
|
|
|
|
errCheckInNetwork = errs.Class("check-in network")
|
2019-10-30 18:57:21 +00:00
|
|
|
)
|
|
|
|
|
2019-09-04 19:29:34 +01:00
|
|
|
// Endpoint implements the contact service Endpoints.
|
|
|
|
type Endpoint struct {
|
2021-03-29 09:58:04 +01:00
|
|
|
pb.DRPCNodeUnimplementedServer
|
2019-09-04 19:29:34 +01:00
|
|
|
log *zap.Logger
|
|
|
|
service *Service
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewEndpoint returns a new contact service endpoint.
|
|
|
|
func NewEndpoint(log *zap.Logger, service *Service) *Endpoint {
|
|
|
|
return &Endpoint{
|
|
|
|
log: log,
|
|
|
|
service: service,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-14 01:37:32 +01:00
|
|
|
// CheckIn is periodically called by storage nodes to keep the satellite informed of its existence,
|
2019-09-04 19:29:34 +01:00
|
|
|
// address, and operator information. In return, this satellite keeps the node informed of its
|
|
|
|
// reachability.
|
2019-09-14 01:37:32 +01:00
|
|
|
// When a node checks-in with the satellite, the satellite pings the node back to confirm they can
|
2019-09-10 17:05:07 +01:00
|
|
|
// successfully connect.
|
2019-09-14 01:37:32 +01:00
|
|
|
func (endpoint *Endpoint) CheckIn(ctx context.Context, req *pb.CheckInRequest) (_ *pb.CheckInResponse, err error) {
|
2019-09-04 19:29:34 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
peerID, err := identity.PeerIdentityFromContext(ctx)
|
2019-09-10 17:05:07 +01:00
|
|
|
if err != nil {
|
2019-10-30 18:57:21 +00:00
|
|
|
endpoint.log.Info("failed to get node ID from context", zap.String("node address", req.Address), zap.Error(err))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.Unknown, errCheckInIdentity.New("failed to get ID from context: %v", err).Error())
|
2019-09-10 17:05:07 +01:00
|
|
|
}
|
2019-09-12 17:33:04 +01:00
|
|
|
nodeID := peerID.ID
|
|
|
|
|
2021-05-16 17:36:53 +01:00
|
|
|
// we need a string as a key for the limiter, but nodeID.String() has base58 encoding overhead
|
|
|
|
nodeIDBytesAsString := string(nodeID.Bytes())
|
|
|
|
if !endpoint.service.idLimiter.IsAllowed(nodeIDBytesAsString) {
|
|
|
|
endpoint.log.Info("node rate limited by id", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, errCheckInRateLimit.New("node rate limited by id").Error())
|
|
|
|
}
|
|
|
|
|
2019-09-12 17:33:04 +01:00
|
|
|
err = endpoint.service.peerIDs.Set(ctx, nodeID, peerID)
|
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
endpoint.log.Info("failed to add peer identity entry for ID", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))
|
2019-10-30 18:57:21 +00:00
|
|
|
return nil, rpcstatus.Error(rpcstatus.FailedPrecondition, errCheckInIdentity.New("failed to add peer identity entry for ID: %v", err).Error())
|
2019-09-12 17:33:04 +01:00
|
|
|
}
|
|
|
|
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
resolvedIP, port, resolvedNetwork, err := endpoint.service.overlay.ResolveIPAndNetwork(ctx, req.Address)
|
2019-09-10 17:05:07 +01:00
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
endpoint.log.Info("failed to resolve IP from address", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))
|
2019-10-30 18:57:21 +00:00
|
|
|
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, errCheckInNetwork.New("failed to resolve IP from address: %s, err: %v", req.Address, err).Error())
|
2019-09-10 17:05:07 +01:00
|
|
|
}
|
2022-06-13 00:01:44 +01:00
|
|
|
if !endpoint.service.allowPrivateIP && (!resolvedIP.IsGlobalUnicast() || isPrivateIP(resolvedIP)) {
|
2022-05-27 04:44:48 +01:00
|
|
|
endpoint.log.Info("IP address not allowed", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, errCheckInNetwork.New("IP address not allowed: %s", req.Address).Error())
|
|
|
|
}
|
2019-09-10 17:05:07 +01:00
|
|
|
|
2020-05-19 17:42:00 +01:00
|
|
|
nodeurl := storj.NodeURL{
|
|
|
|
ID: nodeID,
|
|
|
|
Address: req.Address,
|
|
|
|
}
|
2022-12-22 20:28:53 +00:00
|
|
|
|
|
|
|
var noiseInfo *pb.NoiseInfo
|
|
|
|
if req.NoiseKeyAttestation != nil {
|
2023-02-02 22:43:57 +00:00
|
|
|
if err := noise.ValidateKeyAttestation(ctx, req.NoiseKeyAttestation, nodeID); err == nil {
|
2022-12-22 20:28:53 +00:00
|
|
|
noiseInfo = &pb.NoiseInfo{
|
|
|
|
Proto: req.NoiseKeyAttestation.NoiseProto,
|
|
|
|
PublicKey: req.NoiseKeyAttestation.NoisePublicKey,
|
|
|
|
}
|
|
|
|
nodeurl.NoiseInfo = noiseInfo.Convert()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-24 18:30:27 +00:00
|
|
|
pingNodeSuccess, pingNodeSuccessQUIC, pingErrorMessage, err := endpoint.service.PingBack(ctx, nodeurl)
|
2019-09-10 17:05:07 +01:00
|
|
|
if err != nil {
|
2022-01-13 22:51:40 +00:00
|
|
|
return nil, endpoint.checkPingRPCErr(err, nodeurl)
|
2019-09-10 17:05:07 +01:00
|
|
|
}
|
2021-01-18 14:33:13 +00:00
|
|
|
|
|
|
|
// check wallet features
|
|
|
|
if req.Operator != nil {
|
|
|
|
if err := nodeoperator.DefaultWalletFeaturesValidation.Validate(req.Operator.WalletFeatures); err != nil {
|
|
|
|
endpoint.log.Debug("ignoring invalid wallet features",
|
|
|
|
zap.Stringer("Node ID", nodeID),
|
|
|
|
zap.Strings("Wallet Features", req.Operator.WalletFeatures))
|
|
|
|
|
|
|
|
// TODO: Update CheckInResponse to include wallet feature validation error
|
|
|
|
req.Operator.WalletFeatures = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-19 19:37:31 +01:00
|
|
|
nodeInfo := overlay.NodeCheckInInfo{
|
|
|
|
NodeID: peerID.ID,
|
|
|
|
Address: &pb.NodeAddress{
|
2022-12-22 20:28:53 +00:00
|
|
|
Address: req.Address,
|
|
|
|
NoiseInfo: noiseInfo,
|
2019-09-19 19:37:31 +01:00
|
|
|
},
|
2020-03-06 22:04:23 +00:00
|
|
|
LastNet: resolvedNetwork,
|
2022-05-27 04:44:48 +01:00
|
|
|
LastIPPort: net.JoinHostPort(resolvedIP.String(), port),
|
2020-03-06 22:04:23 +00:00
|
|
|
IsUp: pingNodeSuccess,
|
|
|
|
Capacity: req.Capacity,
|
|
|
|
Operator: req.Operator,
|
|
|
|
Version: req.Version,
|
2019-09-10 17:05:07 +01:00
|
|
|
}
|
2021-01-18 14:33:13 +00:00
|
|
|
|
2022-11-03 11:42:07 +00:00
|
|
|
endpoint.emitEvenkitEvent(ctx, req, pingNodeSuccess, pingNodeSuccessQUIC, nodeInfo)
|
|
|
|
|
2019-11-15 22:43:06 +00:00
|
|
|
err = endpoint.service.overlay.UpdateCheckIn(ctx, nodeInfo, time.Now().UTC())
|
2019-09-10 17:05:07 +01:00
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
endpoint.log.Info("failed to update check in", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))
|
2019-09-19 05:46:39 +01:00
|
|
|
return nil, rpcstatus.Error(rpcstatus.Internal, Error.Wrap(err).Error())
|
2019-09-10 17:05:07 +01:00
|
|
|
}
|
|
|
|
|
2021-05-16 18:44:50 +01:00
|
|
|
endpoint.log.Debug("checking in", zap.Stringer("Node ID", nodeID), zap.String("node addr", req.Address), zap.Bool("ping node success", pingNodeSuccess), zap.String("ping node err msg", pingErrorMessage))
|
2019-09-14 01:37:32 +01:00
|
|
|
return &pb.CheckInResponse{
|
2021-03-24 18:30:27 +00:00
|
|
|
PingNodeSuccess: pingNodeSuccess,
|
|
|
|
PingNodeSuccessQuic: pingNodeSuccessQUIC,
|
|
|
|
PingErrorMessage: pingErrorMessage,
|
2019-09-10 17:05:07 +01:00
|
|
|
}, nil
|
|
|
|
}
|
2020-01-13 18:29:16 +00:00
|
|
|
|
2022-11-03 11:42:07 +00:00
|
|
|
func (endpoint *Endpoint) emitEvenkitEvent(ctx context.Context, req *pb.CheckInRequest, pingNodeTCPSuccess bool, pingNodeQUICSuccess bool, nodeInfo overlay.NodeCheckInInfo) {
|
|
|
|
var sourceAddr string
|
|
|
|
transport, found := drpcctx.Transport(ctx)
|
|
|
|
if found {
|
|
|
|
if conn, ok := transport.(net.Conn); ok {
|
|
|
|
a := conn.RemoteAddr()
|
|
|
|
if a != nil {
|
|
|
|
sourceAddr = a.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ek.Event("checkin",
|
|
|
|
eventkit.String("id", nodeInfo.NodeID.String()),
|
|
|
|
eventkit.String("addr", req.Address),
|
|
|
|
eventkit.String("resolved-addr", nodeInfo.LastIPPort),
|
|
|
|
eventkit.String("source-addr", sourceAddr),
|
|
|
|
eventkit.Timestamp("build-time", nodeInfo.Version.Timestamp),
|
|
|
|
eventkit.String("version", nodeInfo.Version.Version),
|
|
|
|
eventkit.String("country", nodeInfo.CountryCode.String()),
|
|
|
|
eventkit.Int64("free-disk", nodeInfo.Capacity.FreeDisk),
|
|
|
|
eventkit.Bool("ping-tpc-success", pingNodeTCPSuccess),
|
|
|
|
eventkit.Bool("ping-quic-success", pingNodeQUICSuccess),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// GetTime returns current timestamp.
|
2020-01-13 18:29:16 +00:00
|
|
|
func (endpoint *Endpoint) GetTime(ctx context.Context, req *pb.GetTimeRequest) (_ *pb.GetTimeResponse, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-10 01:58:59 +00:00
|
|
|
|
|
|
|
peerID, err := identity.PeerIdentityFromContext(ctx)
|
|
|
|
if err != nil {
|
|
|
|
endpoint.log.Info("failed to get node ID from context", zap.Error(err))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, errCheckInIdentity.New("failed to get ID from context: %v", err).Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
currentTimestamp := time.Now().UTC()
|
|
|
|
endpoint.log.Debug("get system current time", zap.Stringer("timestamp", currentTimestamp), zap.Stringer("node id", peerID.ID))
|
|
|
|
return &pb.GetTimeResponse{
|
|
|
|
Timestamp: currentTimestamp,
|
|
|
|
}, nil
|
2020-01-13 18:29:16 +00:00
|
|
|
}
|
2022-01-13 22:51:40 +00:00
|
|
|
|
|
|
|
// PingMe is called by storage node to request a pingBack from the satellite to confirm they can
|
|
|
|
// successfully connect to the node.
|
|
|
|
func (endpoint *Endpoint) PingMe(ctx context.Context, req *pb.PingMeRequest) (_ *pb.PingMeResponse, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
peerID, err := identity.PeerIdentityFromContext(ctx)
|
|
|
|
if err != nil {
|
|
|
|
endpoint.log.Info("failed to get node ID from context", zap.String("node address", req.Address), zap.Error(err))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.Unknown, errCheckInIdentity.New("failed to get ID from context: %v", err).Error())
|
|
|
|
}
|
|
|
|
nodeID := peerID.ID
|
|
|
|
|
|
|
|
nodeURL := storj.NodeURL{
|
|
|
|
ID: nodeID,
|
|
|
|
Address: req.Address,
|
|
|
|
}
|
|
|
|
|
satellite/overlay: configurable meaning of last_net
Up to now, we have been implementing the DistinctIP preference with code
in two places:
1. On check-in, the last_net is determined by taking the /24 or /64
(in ResolveIPAndNetwork()) and we store it with the node record.
2. On node selection, a preference parameter defines whether to return
results that are distinct on last_net.
It can be observed that we have never yet had the need to switch from
DistinctIP to !DistinctIP, or from !DistinctIP to DistinctIP, on the
same satellite, and we will probably never need to do so in an automated
way. It can also be observed that this arrangement makes tests more
complicated, because we often have to arrange for test nodes to have IP
addresses in different /24 networks (a particular pain on macOS).
Those two considerations, plus some pending work on the repair framework
that will make repair take last_net into consideration, motivate this
change.
With this change, in the #2 place, we will _always_ return results that
are distinct on last_net. We implement the DistinctIP preference, then,
by making the #1 place (ResolveIPAndNetwork()) more flexible. When
DistinctIP is enabled, last_net will be calculated as it was before. But
when DistinctIP is _off_, last_net can be the same as address (IP and
port). That will effectively implement !DistinctIP because every
record will have a distinct last_net already.
As a side effect, this flexibility will allow us to change the rules
about last_net construction arbitrarily. We can do tests where last_net
is set to the source IP, or to a /30 prefix, or a /16 prefix, etc., and
be able to exercise the production logic without requiring a virtual
network bridge.
This change should be safe to make without any migration code, because
all known production satellite deployments use DistinctIP, and the
associated last_net values will not change for them. They will only
change for satellites with !DistinctIP, which are mostly test
deployments that can be recreated trivially. For those satellites which
are both permanent and !DistinctIP, node selection will suddenly start
acting as though DistinctIP is enabled, until the operator runs a single
SQL update "UPDATE nodes SET last_net = last_ip_port". That can be done
either before or after deploying software with this change.
I also assert that this will not hurt performance for production
deployments. It's true that adding the distinct requirement to node
selection makes things a little slower, but the distinct requirement is
already present for all production deployments, and they will see no
change.
Refs: https://github.com/storj/storj/issues/5391
Change-Id: I0e7e92498c3da768df5b4d5fb213dcd2d4862924
2023-02-28 22:57:39 +00:00
|
|
|
resolvedIP, _, _, err := endpoint.service.overlay.ResolveIPAndNetwork(ctx, req.Address)
|
2022-05-27 04:44:48 +01:00
|
|
|
if err != nil {
|
|
|
|
endpoint.log.Info("failed to resolve IP from address", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, errCheckInNetwork.New("failed to resolve IP from address: %s, err: %v", req.Address, err).Error())
|
|
|
|
}
|
2022-06-13 00:01:44 +01:00
|
|
|
if !endpoint.service.allowPrivateIP && (!resolvedIP.IsGlobalUnicast() || isPrivateIP(resolvedIP)) {
|
2022-05-27 04:44:48 +01:00
|
|
|
endpoint.log.Info("IP address not allowed", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID))
|
|
|
|
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, errCheckInNetwork.New("IP address not allowed: %s", req.Address).Error())
|
|
|
|
}
|
|
|
|
|
2022-01-13 22:51:40 +00:00
|
|
|
if endpoint.service.timeout > 0 {
|
|
|
|
var cancel func()
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, endpoint.service.timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
|
|
|
switch req.Transport {
|
|
|
|
|
2023-01-24 15:59:47 +00:00
|
|
|
case pb.NodeTransport_QUIC_RPC:
|
2022-01-13 22:51:40 +00:00
|
|
|
err = endpoint.service.pingNodeQUIC(ctx, nodeURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil, endpoint.checkPingRPCErr(err, nodeURL)
|
|
|
|
}
|
|
|
|
return &pb.PingMeResponse{}, nil
|
|
|
|
|
2023-01-24 15:59:47 +00:00
|
|
|
case pb.NodeTransport_TCP_TLS_RPC:
|
2022-01-13 22:51:40 +00:00
|
|
|
client, err := dialNodeURL(ctx, endpoint.service.dialer, nodeURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil, endpoint.checkPingRPCErr(err, nodeURL)
|
|
|
|
}
|
2022-01-20 14:20:24 +00:00
|
|
|
|
|
|
|
defer func() { err = errs.Combine(err, client.Close()) }()
|
|
|
|
|
2022-01-13 22:51:40 +00:00
|
|
|
_, err = client.pingNode(ctx, &pb.ContactPingRequest{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, endpoint.checkPingRPCErr(err, nodeURL)
|
|
|
|
}
|
|
|
|
return &pb.PingMeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2022-01-20 14:20:24 +00:00
|
|
|
return nil, rpcstatus.Errorf(rpcstatus.InvalidArgument, "invalid transport: %v", req.Transport)
|
2022-01-13 22:51:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (endpoint *Endpoint) checkPingRPCErr(err error, nodeURL storj.NodeURL) error {
|
|
|
|
endpoint.log.Info("failed to ping back address", zap.String("node address", nodeURL.Address), zap.Stringer("Node ID", nodeURL.ID), zap.Error(err))
|
|
|
|
if errPingBackDial.Has(err) {
|
|
|
|
err = errCheckInNetwork.New("failed dialing address when attempting to ping node (ID: %s): %s, err: %v", nodeURL.ID, nodeURL.Address, err)
|
|
|
|
return rpcstatus.Error(rpcstatus.NotFound, err.Error())
|
|
|
|
}
|
|
|
|
err = errCheckInNetwork.New("failed to ping node (ID: %s) at address: %s, err: %v", nodeURL.ID, nodeURL.Address, err)
|
|
|
|
return rpcstatus.Error(rpcstatus.NotFound, err.Error())
|
|
|
|
}
|
2022-06-13 00:01:44 +01:00
|
|
|
|
|
|
|
// isPrivateIP is copied Go 1.17's net.IP.IsPrivate. We copied it to ensure we
|
|
|
|
// can compile for the Go version earlier than 1.17.
|
|
|
|
//
|
|
|
|
// TODO(artur): Swap isPrivateIP usages with net.IP.IsPrivate when we no longer
|
|
|
|
// need to build for earlier than Go 1.17. Keep this in sync with stdlib until.
|
|
|
|
func isPrivateIP(ip net.IP) bool {
|
|
|
|
if ip4 := ip.To4(); ip4 != nil {
|
|
|
|
// Following RFC 1918, Section 3. Private Address Space which says:
|
|
|
|
// The Internet Assigned Numbers Authority (IANA) has reserved the
|
|
|
|
// following three blocks of the IP address space for private internets:
|
|
|
|
// 10.0.0.0 - 10.255.255.255 (10/8 prefix)
|
|
|
|
// 172.16.0.0 - 172.31.255.255 (172.16/12 prefix)
|
|
|
|
// 192.168.0.0 - 192.168.255.255 (192.168/16 prefix)
|
|
|
|
return ip4[0] == 10 ||
|
|
|
|
(ip4[0] == 172 && ip4[1]&0xf0 == 16) ||
|
|
|
|
(ip4[0] == 192 && ip4[1] == 168)
|
|
|
|
}
|
|
|
|
// Following RFC 4193, Section 8. IANA Considerations which says:
|
|
|
|
// The IANA has assigned the FC00::/7 prefix to "Unique Local Unicast".
|
|
|
|
return len(ip) == net.IPv6len && ip[0]&0xfe == 0xfc
|
|
|
|
}
|