2019-09-04 19:29:34 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package contact
|
|
|
|
|
|
|
|
import (
|
2019-12-30 19:42:10 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
2020-04-02 21:44:51 +01:00
|
|
|
"time"
|
2019-09-19 20:56:34 +01:00
|
|
|
|
2019-09-10 17:05:07 +01:00
|
|
|
"github.com/zeebo/errs"
|
2019-09-04 19:29:34 +01:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2023-07-03 10:42:48 +01:00
|
|
|
"storj.io/common/nodetag"
|
2019-12-30 19:42:10 +00:00
|
|
|
"storj.io/common/pb"
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/rpc"
|
2021-08-03 04:25:41 +01:00
|
|
|
"storj.io/common/rpc/quic"
|
2019-12-30 19:42:10 +00:00
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/storj"
|
2023-07-07 09:31:58 +01:00
|
|
|
"storj.io/storj/satellite/nodeselection"
|
2019-09-04 19:29:34 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
|
|
|
)
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Config contains configurable values for contact service.
|
2019-09-19 20:56:34 +01:00
|
|
|
type Config struct {
|
2020-04-02 21:44:51 +01:00
|
|
|
ExternalAddress string `user:"true" help:"the public address of the node, useful for nodes behind NAT" default:""`
|
testplanet/satellite: reduce the number of places default values need to be configured
Satellites set their configuration values to default values using
cfgstruct, however, it turns out our tests don't test these values
at all! Instead, they have a completely separate definition system
that is easy to forget about.
As is to be expected, these values have drifted, and it appears
in a few cases test planet is testing unreasonable values that we
won't see in production, or perhaps worse, features enabled in
production were missed and weren't enabled in testplanet.
This change makes it so all values are configured the same,
systematic way, so it's easy to see when test values are different
than dev values or release values, and it's less hard to forget
to enable features in testplanet.
In terms of reviewing, this change should be actually fairly
easy to review, considering private/testplanet/satellite.go keeps
the current config system and the new one and confirms that they
result in identical configurations, so you can be certain that
nothing was missed and the config is all correct.
You can also check the config lock to see what actual config
values changed.
Change-Id: I6715d0794887f577e21742afcf56fd2b9d12170e
2021-05-31 22:15:00 +01:00
|
|
|
Timeout time.Duration `help:"timeout for pinging storage nodes" default:"10m0s" testDefault:"1m"`
|
2022-05-27 04:44:48 +01:00
|
|
|
AllowPrivateIP bool `help:"allow private IPs in CheckIn and PingMe" testDefault:"true" devDefault:"true" default:"false"`
|
2021-05-16 17:36:53 +01:00
|
|
|
|
|
|
|
RateLimitInterval time.Duration `help:"the amount of time that should happen between contact attempts usually" releaseDefault:"10m0s" devDefault:"1ns"`
|
|
|
|
RateLimitBurst int `help:"the maximum burst size for the contact rate limit token bucket" releaseDefault:"2" devDefault:"1000"`
|
|
|
|
RateLimitCacheSize int `help:"the number of nodes or addresses to keep token buckets for" default:"1000"`
|
2019-09-19 20:56:34 +01:00
|
|
|
}
|
|
|
|
|
2019-09-12 17:33:04 +01:00
|
|
|
// Service is the contact service between storage nodes and satellites.
|
2020-12-17 22:07:06 +00:00
|
|
|
// It is responsible for updating general node information like address and capacity.
|
2019-09-12 17:33:04 +01:00
|
|
|
// It is also responsible for updating peer identity information for verifying signatures from that node.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Service
|
2019-09-04 19:29:34 +01:00
|
|
|
type Service struct {
|
2019-09-19 20:56:34 +01:00
|
|
|
log *zap.Logger
|
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
overlay *overlay.Service
|
|
|
|
peerIDs overlay.PeerIdentities
|
|
|
|
dialer rpc.Dialer
|
2020-04-02 21:44:51 +01:00
|
|
|
|
2022-05-27 04:44:48 +01:00
|
|
|
timeout time.Duration
|
|
|
|
idLimiter *RateLimiter
|
|
|
|
allowPrivateIP bool
|
2023-07-03 10:42:48 +01:00
|
|
|
|
|
|
|
nodeTagAuthority nodetag.Authority
|
2019-09-04 19:29:34 +01:00
|
|
|
}
|
|
|
|
|
2019-09-12 17:33:04 +01:00
|
|
|
// NewService creates a new contact service.
|
2023-10-18 19:10:06 +01:00
|
|
|
func NewService(log *zap.Logger, overlay *overlay.Service, peerIDs overlay.PeerIdentities, dialer rpc.Dialer, authority nodetag.Authority, config Config) *Service {
|
2019-09-04 19:29:34 +01:00
|
|
|
return &Service{
|
2023-07-03 10:42:48 +01:00
|
|
|
log: log,
|
|
|
|
overlay: overlay,
|
|
|
|
peerIDs: peerIDs,
|
|
|
|
dialer: dialer,
|
|
|
|
timeout: config.Timeout,
|
|
|
|
idLimiter: NewRateLimiter(config.RateLimitInterval, config.RateLimitBurst, config.RateLimitCacheSize),
|
|
|
|
allowPrivateIP: config.AllowPrivateIP,
|
|
|
|
nodeTagAuthority: authority,
|
2019-09-04 19:29:34 +01:00
|
|
|
}
|
|
|
|
}
|
2019-09-19 20:56:34 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Close closes resources.
|
2019-09-19 20:56:34 +01:00
|
|
|
func (service *Service) Close() error { return nil }
|
2019-12-30 19:42:10 +00:00
|
|
|
|
|
|
|
// PingBack pings the node to test connectivity.
|
2021-03-24 18:30:27 +00:00
|
|
|
func (service *Service) PingBack(ctx context.Context, nodeurl storj.NodeURL) (_ bool, _ bool, _ string, err error) {
|
2019-12-30 19:42:10 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-04-02 21:44:51 +01:00
|
|
|
if service.timeout > 0 {
|
|
|
|
var cancel func()
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, service.timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2019-12-30 19:42:10 +00:00
|
|
|
pingNodeSuccess := true
|
|
|
|
var pingErrorMessage string
|
2021-03-24 18:30:27 +00:00
|
|
|
var pingNodeSuccessQUIC bool
|
2019-12-30 19:42:10 +00:00
|
|
|
|
2020-05-19 17:42:00 +01:00
|
|
|
client, err := dialNodeURL(ctx, service.dialer, nodeurl)
|
2019-12-30 19:42:10 +00:00
|
|
|
if err != nil {
|
|
|
|
// If there is an error from trying to dial and ping the node, return that error as
|
|
|
|
// pingErrorMessage and not as the err. We want to use this info to update
|
|
|
|
// node contact info and do not want to terminate execution by returning an err
|
2020-12-02 22:17:59 +00:00
|
|
|
mon.Event("failed_dial") //mon:locked
|
2019-12-30 19:42:10 +00:00
|
|
|
pingNodeSuccess = false
|
2020-05-19 16:09:00 +01:00
|
|
|
pingErrorMessage = fmt.Sprintf("failed to dial storage node (ID: %s) at address %s: %q",
|
2020-05-19 17:42:00 +01:00
|
|
|
nodeurl.ID, nodeurl.Address, err,
|
2020-05-19 16:09:00 +01:00
|
|
|
)
|
|
|
|
service.log.Debug("pingBack failed to dial storage node",
|
|
|
|
zap.String("pingErrorMessage", pingErrorMessage),
|
|
|
|
)
|
2021-03-24 18:30:27 +00:00
|
|
|
return pingNodeSuccess, pingNodeSuccessQUIC, pingErrorMessage, nil
|
2019-12-30 19:42:10 +00:00
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, client.Close()) }()
|
|
|
|
|
|
|
|
_, err = client.pingNode(ctx, &pb.ContactPingRequest{})
|
|
|
|
if err != nil {
|
2020-12-02 22:17:59 +00:00
|
|
|
mon.Event("failed_ping_node") //mon:locked
|
2019-12-30 19:42:10 +00:00
|
|
|
pingNodeSuccess = false
|
|
|
|
pingErrorMessage = fmt.Sprintf("failed to ping storage node, your node indicated error code: %d, %q", rpcstatus.Code(err), err)
|
2020-05-19 16:09:00 +01:00
|
|
|
service.log.Debug("pingBack pingNode error",
|
2020-05-19 17:42:00 +01:00
|
|
|
zap.Stringer("Node ID", nodeurl.ID),
|
2020-05-19 16:09:00 +01:00
|
|
|
zap.String("pingErrorMessage", pingErrorMessage),
|
|
|
|
)
|
2021-03-24 18:30:27 +00:00
|
|
|
|
|
|
|
return pingNodeSuccess, pingNodeSuccessQUIC, pingErrorMessage, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
pingNodeSuccessQUIC = true
|
|
|
|
err = service.pingNodeQUIC(ctx, nodeurl)
|
|
|
|
if err != nil {
|
|
|
|
// udp ping back is optional right now, it shouldn't affect contact service's
|
|
|
|
// control flow
|
|
|
|
pingNodeSuccessQUIC = false
|
|
|
|
pingErrorMessage = err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
return pingNodeSuccess, pingNodeSuccessQUIC, pingErrorMessage, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (service *Service) pingNodeQUIC(ctx context.Context, nodeurl storj.NodeURL) error {
|
|
|
|
udpDialer := service.dialer
|
|
|
|
udpDialer.Connector = quic.NewDefaultConnector(nil)
|
|
|
|
udpClient, err := dialNodeURL(ctx, udpDialer, nodeurl)
|
|
|
|
if err != nil {
|
|
|
|
mon.Event("failed_dial_quic")
|
|
|
|
return Error.New("failed to dial storage node (ID: %s) at address %s using QUIC: %q", nodeurl.ID.String(), nodeurl.Address, err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
_ = udpClient.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
_, err = udpClient.pingNode(ctx, &pb.ContactPingRequest{})
|
|
|
|
if err != nil {
|
|
|
|
mon.Event("failed_ping_node_quic")
|
|
|
|
return Error.New("failed to ping storage node using QUIC, your node indicated error code: %d, %q", rpcstatus.Code(err), err)
|
2019-12-30 19:42:10 +00:00
|
|
|
}
|
|
|
|
|
2021-03-24 18:30:27 +00:00
|
|
|
return nil
|
2019-12-30 19:42:10 +00:00
|
|
|
}
|
2023-07-03 10:42:48 +01:00
|
|
|
|
|
|
|
func (service *Service) processNodeTags(ctx context.Context, nodeID storj.NodeID, req *pb.SignedNodeTagSets) error {
|
|
|
|
if req != nil {
|
2023-07-07 09:31:58 +01:00
|
|
|
tags := nodeselection.NodeTags{}
|
2023-07-03 10:42:48 +01:00
|
|
|
for _, t := range req.Tags {
|
|
|
|
verifiedTags, signerID, err := verifyTags(ctx, service.nodeTagAuthority, nodeID, t)
|
|
|
|
if err != nil {
|
|
|
|
service.log.Info("Failed to verify tags.", zap.Error(err), zap.Stringer("NodeID", nodeID))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-08-10 13:20:14 +01:00
|
|
|
ts := time.Unix(verifiedTags.SignedAt, 0)
|
2023-07-03 10:42:48 +01:00
|
|
|
for _, vt := range verifiedTags.Tags {
|
2023-07-07 09:31:58 +01:00
|
|
|
tags = append(tags, nodeselection.NodeTag{
|
2023-07-03 10:42:48 +01:00
|
|
|
NodeID: nodeID,
|
|
|
|
Name: vt.Name,
|
|
|
|
Value: vt.Value,
|
|
|
|
SignedAt: ts,
|
|
|
|
Signer: signerID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tags) > 0 {
|
|
|
|
err := service.overlay.UpdateNodeTags(ctx, tags)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func verifyTags(ctx context.Context, authority nodetag.Authority, nodeID storj.NodeID, t *pb.SignedNodeTagSet) (*pb.NodeTagSet, storj.NodeID, error) {
|
|
|
|
signerID, err := storj.NodeIDFromBytes(t.SignerNodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, signerID, errs.New("failed to parse signerNodeID from verifiedTags: '%x', %s", t.SignerNodeId, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
verifiedTags, err := authority.Verify(ctx, t)
|
|
|
|
if err != nil {
|
|
|
|
return nil, signerID, errs.New("received node tags with wrong/unknown signature: '%x', %s", t.Signature, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
signedNodeID, err := storj.NodeIDFromBytes(verifiedTags.NodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, signerID, errs.New("failed to parse nodeID from verifiedTags: '%x', %s", verifiedTags.NodeId, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if signedNodeID != nodeID {
|
|
|
|
return nil, signerID, errs.New("the tag is signed for a different node. Expected NodeID: '%s', Received NodeID: '%s'", nodeID, signedNodeID)
|
|
|
|
}
|
|
|
|
return verifiedTags, signerID, nil
|
|
|
|
}
|