Restrict node info only for trusted satellites (#2737)

This commit is contained in:
Kaloyan Raev 2019-08-09 12:21:41 +03:00 committed by GitHub
parent 05e99a78d9
commit 9dccf59e8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 62 additions and 14 deletions

View File

@ -158,7 +158,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config, ver
return nil, errs.Combine(err, peer.Close())
}
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Kademlia.RoutingTable)
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Kademlia.RoutingTable, nil)
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Kademlia.Endpoint)
peer.Kademlia.Inspector = kademlia.NewInspector(peer.Kademlia.Service, peer.Identity)

View File

@ -9,27 +9,38 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// EndpointError defines errors class for Endpoint
var EndpointError = errs.Class("kademlia endpoint error")
// SatelliteIDVerifier checks if the connection is from a trusted satellite
type SatelliteIDVerifier interface {
VerifySatelliteID(ctx context.Context, id storj.NodeID) error
}
// Endpoint implements the kademlia Endpoints
type Endpoint struct {
log *zap.Logger
service *Kademlia
routingTable *RoutingTable
trust SatelliteIDVerifier
connected int32
}
// NewEndpoint returns a new kademlia endpoint
func NewEndpoint(log *zap.Logger, service *Kademlia, routingTable *RoutingTable) *Endpoint {
func NewEndpoint(log *zap.Logger, service *Kademlia, routingTable *RoutingTable, trust SatelliteIDVerifier) *Endpoint {
return &Endpoint{
log: log,
service: service,
routingTable: routingTable,
log: log,
trust: trust,
}
}
@ -93,6 +104,22 @@ func (endpoint *Endpoint) RequestInfo(ctx context.Context, req *pb.InfoRequest)
defer mon.Task()(&ctx)(&err)
self := endpoint.service.Local()
if self.Type == pb.NodeType_STORAGE {
if endpoint.trust == nil {
return nil, status.Error(codes.Internal, "missing trust verifier")
}
peer, err := identity.PeerIdentityFromContext(ctx)
if err != nil {
return nil, status.Error(codes.Unauthenticated, err.Error())
}
err = endpoint.trust.VerifySatelliteID(ctx, peer.ID)
if err != nil {
return nil, status.Error(codes.PermissionDenied, "info requested from untrusted peer")
}
}
return &pb.InfoResponse{
Type: self.Type,
Operator: &self.Operator,

View File

@ -272,7 +272,7 @@ func (k *Kademlia) FetchInfo(ctx context.Context, node pb.Node) (_ *pb.InfoRespo
info, err := k.dialer.FetchInfo(ctx, node)
if err != nil {
return nil, NodeErr.Wrap(NodeErr.New("%s : %s failed to fetch info from node ID %s: %s", k.routingTable.self.Type.String(), k.routingTable.self.Id.String(), node.Id.String(), err))
return nil, NodeErr.Wrap(err)
}
return info, nil
}

View File

@ -11,11 +11,13 @@ import (
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"storj.io/storj/internal/errs2"
"storj.io/storj/internal/memory"
@ -56,6 +58,21 @@ func TestRequestInfo(t *testing.T) {
})
}
func TestRequestInfoUntrusted(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
StorageNode: func(index int, config *storagenode.Config) {
config.Storage.WhitelistedSatellites = nil
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
_, err := planet.Satellites[0].Kademlia.Service.FetchInfo(ctx, planet.StorageNodes[0].Local().Node)
require.Error(t, err)
assert.True(t, errs2.IsRPC(err, codes.PermissionDenied), "unexpected error: %+v", err)
})
}
func TestPingTimeout(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 0,

View File

@ -163,7 +163,7 @@ func testNode(ctx *testcontext.Context, name string, t *testing.T, bn []pb.Node)
k, err := newKademlia(logger, pb.NodeType_STORAGE, bn, lis.Addr().String(), pb.NodeOperator{}, fid, defaultAlpha)
require.NoError(t, err)
s := NewEndpoint(logger, k, k.routingTable)
s := NewEndpoint(logger, k, k.routingTable, nil)
// new ident opts
serverOptions, err := tlsopts.NewOptions(fid, tlsopts.Config{

View File

@ -337,7 +337,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config, ve
return nil, errs.Combine(err, peer.Close())
}
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Kademlia.RoutingTable)
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Kademlia.RoutingTable, nil)
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Kademlia.Endpoint)
peer.Kademlia.Inspector = kademlia.NewInspector(peer.Kademlia.Service, peer.Identity)

View File

@ -20,11 +20,13 @@ func TestMonitor(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
var freeBandwidth int64
for _, storageNode := range planet.StorageNodes {
storageNode.Storage2.Monitor.Loop.Pause()
info, err := storageNode.Kademlia.Service.FetchInfo(ctx, storageNode.Local().Node)
info, err := satellite.Kademlia.Service.FetchInfo(ctx, storageNode.Local().Node)
require.NoError(t, err)
// assume that all storage nodes have the same initial values
@ -40,7 +42,7 @@ func TestMonitor(t *testing.T) {
for _, storageNode := range planet.StorageNodes {
storageNode.Storage2.Monitor.Loop.TriggerWait()
info, err := storageNode.Kademlia.Service.FetchInfo(ctx, storageNode.Local().Node)
info, err := satellite.Kademlia.Service.FetchInfo(ctx, storageNode.Local().Node)
require.NoError(t, err)
stats, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})

View File

@ -181,6 +181,13 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config, ver
}
}
{ // setup trust pool before kademlia
peer.Storage2.Trust, err = trust.NewPool(peer.Transport, config.Storage.WhitelistedSatellites)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
}
{ // setup kademlia
config := config.Kademlia
// TODO: move this setup logic into kademlia package
@ -222,7 +229,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config, ver
return nil, errs.Combine(err, peer.Close())
}
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Kademlia.RoutingTable)
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Kademlia.RoutingTable, peer.Storage2.Trust)
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Kademlia.Endpoint)
peer.Kademlia.Inspector = kademlia.NewInspector(peer.Kademlia.Service, peer.Identity)
@ -230,11 +237,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config, ver
}
{ // setup storage
peer.Storage2.Trust, err = trust.NewPool(peer.Transport, config.Storage.WhitelistedSatellites)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Storage2.Store = pieces.NewStore(peer.Log.Named("pieces"), peer.DB.Pieces(), peer.DB.V0PieceInfo(), peer.DB.PieceExpirationDB())
peer.Storage2.Monitor = monitor.NewService(