Use testplanet to test Refresh (#678)

This commit is contained in:
Egon Elbre 2018-11-19 16:40:01 +02:00 committed by GitHub
parent bb1cf151e3
commit d07433c150
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 100 additions and 193 deletions

View File

@ -9,6 +9,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/storj/pkg/auth/grpcauth"
"storj.io/storj/pkg/kademlia" "storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/overlay" "storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
@ -33,6 +34,46 @@ type Node struct {
Dependencies []io.Closer Dependencies []io.Closer
} }
// newNode creates a new node.
func (planet *Planet) newNode(name string) (*Node, error) {
identity, err := planet.newIdentity()
if err != nil {
return nil, err
}
listener, err := planet.newListener()
if err != nil {
return nil, err
}
node := &Node{
Log: planet.log.Named(name),
Identity: identity,
Listener: listener,
}
node.Transport = transport.NewClient(identity)
node.Provider, err = provider.NewProvider(node.Identity, node.Listener, grpcauth.NewAPIKeyInterceptor())
if err != nil {
return nil, utils.CombineErrors(err, listener.Close())
}
node.Info = pb.Node{
Id: node.Identity.ID.String(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: node.Listener.Addr().String(),
},
}
planet.nodes = append(planet.nodes, node)
planet.nodeInfos = append(planet.nodeInfos, node.Info)
planet.nodeLinks = append(planet.nodeLinks, node.Info.Id+":"+node.Listener.Addr().String())
return node, nil
}
// ID returns node id // ID returns node id
// TODO: switch to storj.NodeID // TODO: switch to storj.NodeID
func (node *Node) ID() string { return node.Info.Id } func (node *Node) ID() string { return node.Info.Id }

View File

@ -17,13 +17,12 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"storj.io/storj/internal/memory" "storj.io/storj/internal/memory"
"storj.io/storj/pkg/auth/grpcauth" "storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
pieceserver "storj.io/storj/pkg/piecestore/psserver" pieceserver "storj.io/storj/pkg/piecestore/psserver"
"storj.io/storj/pkg/piecestore/psserver/psdb" "storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/pointerdb" "storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/provider" "storj.io/storj/pkg/provider"
"storj.io/storj/pkg/transport"
"storj.io/storj/pkg/utils" "storj.io/storj/pkg/utils"
"storj.io/storj/storage/teststore" "storj.io/storj/storage/teststore"
) )
@ -86,6 +85,12 @@ func New(t zaptest.TestingT, satelliteCount, storageNodeCount, uplinkCount int)
} }
} }
for _, n := range planet.nodes {
server := node.NewServer(n.Kademlia)
pb.RegisterNodesServer(n.Provider.GRPC(), server)
// TODO: shutdown
}
// init Satellites // init Satellites
for _, node := range planet.Satellites { for _, node := range planet.Satellites {
server := pointerdb.NewServer( server := pointerdb.NewServer(
@ -166,46 +171,6 @@ func (planet *Planet) Shutdown() error {
return utils.CombineErrors(errs...) return utils.CombineErrors(errs...)
} }
// newNode creates a new node.
func (planet *Planet) newNode(name string) (*Node, error) {
identity, err := planet.newIdentity()
if err != nil {
return nil, err
}
listener, err := planet.newListener()
if err != nil {
return nil, err
}
node := &Node{
Log: planet.log.Named(name),
Identity: identity,
Listener: listener,
}
node.Transport = transport.NewClient(identity)
node.Provider, err = provider.NewProvider(node.Identity, node.Listener, grpcauth.NewAPIKeyInterceptor())
if err != nil {
return nil, utils.CombineErrors(err, listener.Close())
}
node.Info = pb.Node{
Id: node.Identity.ID.String(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: node.Listener.Addr().String(),
},
}
planet.nodes = append(planet.nodes, node)
planet.nodeInfos = append(planet.nodeInfos, node.Info)
planet.nodeLinks = append(planet.nodeLinks, node.Info.Id+":"+node.Listener.Addr().String())
return node, nil
}
// newNodes creates initializes multiple nodes // newNodes creates initializes multiple nodes
func (planet *Planet) newNodes(prefix string, count int) ([]*Node, error) { func (planet *Planet) newNodes(prefix string, count int) ([]*Node, error) {
var xs []*Node var xs []*Node

View File

@ -6,7 +6,6 @@ package overlay
import ( import (
"context" "context"
"crypto/rand" "crypto/rand"
"log"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/zeebo/errs" "github.com/zeebo/errs"
@ -121,16 +120,18 @@ func (o *Cache) Bootstrap(ctx context.Context) error {
// We currently do not penalize nodes that are unresponsive, // We currently do not penalize nodes that are unresponsive,
// but should in the future. // but should in the future.
func (o *Cache) Refresh(ctx context.Context) error { func (o *Cache) Refresh(ctx context.Context) error {
log.Print("starting cache refresh") zap.L().Info("starting cache refresh")
r, err := randomID() r, err := randomID()
if err != nil { if err != nil {
return err return err
} }
rid := node.ID(r) rid := node.ID(r)
near, err := o.DHT.GetNodes(ctx, rid.String(), 128) near, err := o.DHT.GetNodes(ctx, rid.String(), 128)
if err != nil { if err != nil {
return err return err
} }
for _, n := range near { for _, n := range near {
pinged, err := o.DHT.Ping(ctx, *n) pinged, err := o.DHT.Ping(ctx, *n)
if err != nil { if err != nil {
@ -148,6 +149,7 @@ func (o *Cache) Refresh(ctx context.Context) error {
continue continue
} }
} }
// TODO: Kademlia hooks to do this automatically rather than at interval // TODO: Kademlia hooks to do this automatically rather than at interval
nodes, err := o.DHT.GetNodes(ctx, "", 128) nodes, err := o.DHT.GetNodes(ctx, "", 128)
if err != nil { if err != nil {

View File

@ -1,24 +1,18 @@
// Copyright (C) 2018 Storj Labs, Inc. // Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information. // See LICENSE for copying information.
package overlay package overlay_test
import ( import (
"context" "context"
"math/rand"
"net"
"strconv"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"storj.io/storj/internal/testcontext" "storj.io/storj/internal/testcontext"
"storj.io/storj/pkg/dht" "storj.io/storj/internal/testplanet"
"storj.io/storj/pkg/kademlia" "storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/utils"
"storj.io/storj/storage" "storj.io/storj/storage"
"storj.io/storj/storage/boltdb" "storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis" "storj.io/storj/storage/redis"
@ -26,73 +20,69 @@ import (
"storj.io/storj/storage/teststore" "storj.io/storj/storage/teststore"
) )
const ( func testCache(ctx context.Context, t *testing.T, store storage.KeyValueStore) {
testNetSize = 30 cache := overlay.Cache{DB: store}
)
func testOverlay(ctx context.Context, t *testing.T, store storage.KeyValueStore) { { // Put
overlay := Cache{DB: store} err := cache.Put("valid1", pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9001"}})
t.Run("Put", func(t *testing.T) {
err := overlay.Put("valid1", pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9001"}})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
err = overlay.Put("valid2", pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9002"}}) err = cache.Put("valid2", pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9002"}})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
}) }
t.Run("Get", func(t *testing.T) { { // Get
valid2, err := overlay.Get(ctx, "valid2") valid2, err := cache.Get(ctx, "valid2")
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.Equal(t, valid2.Address.Address, "127.0.0.1:9002") assert.Equal(t, valid2.Address.Address, "127.0.0.1:9002")
} }
invalid2, err := overlay.Get(ctx, "invalid2") invalid2, err := cache.Get(ctx, "invalid2")
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, invalid2) assert.Nil(t, invalid2)
if storeClient, ok := store.(*teststore.Client); ok { if storeClient, ok := store.(*teststore.Client); ok {
storeClient.ForceError++ storeClient.ForceError++
_, err := overlay.Get(ctx, "valid1") _, err := cache.Get(ctx, "valid1")
assert.Error(t, err) assert.Error(t, err)
} }
}) }
t.Run("GetAll", func(t *testing.T) { { // GetAll
nodes, err := overlay.GetAll(ctx, []string{"valid2", "valid1", "valid2"}) nodes, err := cache.GetAll(ctx, []string{"valid2", "valid1", "valid2"})
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.Equal(t, nodes[0].Address.Address, "127.0.0.1:9002") assert.Equal(t, nodes[0].Address.Address, "127.0.0.1:9002")
assert.Equal(t, nodes[1].Address.Address, "127.0.0.1:9001") assert.Equal(t, nodes[1].Address.Address, "127.0.0.1:9001")
assert.Equal(t, nodes[2].Address.Address, "127.0.0.1:9002") assert.Equal(t, nodes[2].Address.Address, "127.0.0.1:9002")
} }
nodes, err = overlay.GetAll(ctx, []string{"valid1", "invalid"}) nodes, err = cache.GetAll(ctx, []string{"valid1", "invalid"})
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.Equal(t, nodes[0].Address.Address, "127.0.0.1:9001") assert.Equal(t, nodes[0].Address.Address, "127.0.0.1:9001")
assert.Nil(t, nodes[1]) assert.Nil(t, nodes[1])
} }
nodes, err = overlay.GetAll(ctx, []string{"", ""}) nodes, err = cache.GetAll(ctx, []string{"", ""})
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.Nil(t, nodes[0]) assert.Nil(t, nodes[0])
assert.Nil(t, nodes[1]) assert.Nil(t, nodes[1])
} }
_, err = overlay.GetAll(ctx, []string{}) _, err = cache.GetAll(ctx, []string{})
assert.True(t, OverlayError.Has(err)) assert.True(t, overlay.OverlayError.Has(err))
if storeClient, ok := store.(*teststore.Client); ok { if storeClient, ok := store.(*teststore.Client); ok {
storeClient.ForceError++ storeClient.ForceError++
_, err := overlay.GetAll(ctx, []string{"valid1", "valid2"}) _, err := cache.GetAll(ctx, []string{"valid1", "valid2"})
assert.Error(t, err) assert.Error(t, err)
} }
}) }
} }
func TestRedis(t *testing.T) { func TestCache_Redis(t *testing.T) {
ctx := testcontext.New(t) ctx := testcontext.New(t)
defer ctx.Cleanup() defer ctx.Cleanup()
@ -108,10 +98,10 @@ func TestRedis(t *testing.T) {
} }
defer ctx.Check(store.Close) defer ctx.Check(store.Close)
testOverlay(ctx, t, store) testCache(ctx, t, store)
} }
func TestBolt(t *testing.T) { func TestCache_Bolt(t *testing.T) {
ctx := testcontext.New(t) ctx := testcontext.New(t)
defer ctx.Cleanup() defer ctx.Cleanup()
@ -121,123 +111,28 @@ func TestBolt(t *testing.T) {
} }
defer ctx.Check(client.Close) defer ctx.Check(client.Close)
testOverlay(ctx, t, client) testCache(ctx, t, client)
} }
func TestStore(t *testing.T) { func TestCache_Store(t *testing.T) {
ctx := testcontext.New(t) ctx := testcontext.New(t)
defer ctx.Cleanup() defer ctx.Cleanup()
testOverlay(ctx, t, teststore.New()) testCache(ctx, t, teststore.New())
} }
func TestRefresh(t *testing.T) { func TestCache_Refresh(t *testing.T) {
ctx := context.Background() ctx := testcontext.New(t)
defer ctx.Cleanup()
dhts, bootstrap := bootstrapTestNetwork(t, "127.0.0.1", "9999") planet, err := testplanet.New(t, 1, 30, 0)
dht := newTestKademlia(t, "127.0.0.1", "9999", dhts[rand.Intn(testNetSize)], bootstrap)
cache := &Cache{
DB: teststore.New(),
DHT: dht,
}
err := cache.Bootstrap(ctx)
assert.NoError(t, err)
err = cache.Refresh(ctx)
assert.NoError(t, err)
}
func newTestKademlia(t *testing.T, ip, port string, d dht.DHT, bootstrap pb.Node) *kademlia.Kademlia {
ctx := context.Background()
fid, err := node.NewFullIdentity(ctx, 12, 4)
assert.NoError(t, err)
bootstrapNodes := []pb.Node{bootstrap}
self := pb.Node{Id: fid.ID.String(), Address: &pb.NodeAddress{Address: net.JoinHostPort(ip, port)}}
routing, err := kademlia.NewRoutingTable(self, teststore.New(), teststore.New())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Check(planet.Shutdown)
kad, err := kademlia.NewKademliaWithRoutingTable(self, bootstrapNodes, fid, 5, routing) planet.Start(ctx)
if err != nil {
t.Fatal(utils.CombineErrors(err, routing.Close()))
}
return kad err = planet.Satellites[0].Overlay.Refresh(ctx)
} assert.NoError(t, err)
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
ctx := context.Background()
bid, err := node.NewFullIdentity(ctx, 12, 4)
assert.NoError(t, err)
dhts := []dht.DHT{}
p, err := strconv.Atoi(port)
pm := strconv.Itoa(p)
assert.NoError(t, err)
intro, err := kademlia.GetIntroNode(net.JoinHostPort(ip, pm))
intro.Id = "test"
assert.NoError(t, err)
ca, err := provider.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
self := pb.Node{Id: bid.ID.String(), Address: &pb.NodeAddress{Address: net.JoinHostPort(ip, port)}}
routing, err := kademlia.NewRoutingTable(self, teststore.New(), teststore.New())
if err != nil {
t.Fatal(err)
}
boot, err := kademlia.NewKademliaWithRoutingTable(self, []pb.Node{*intro}, identity, 5, routing)
if err != nil {
t.Fatal(utils.CombineErrors(err, routing.Close()))
}
bootNode := routing.Local()
go func() {
err = boot.ListenAndServe()
assert.NoError(t, err)
}()
p++
err = boot.Bootstrap(context.Background())
assert.NoError(t, err)
for i := 0; i < testNetSize; i++ {
gg := strconv.Itoa(p)
fid, err := node.NewFullIdentity(ctx, 12, 4)
assert.NoError(t, err)
self := pb.Node{Id: fid.ID.String(), Address: &pb.NodeAddress{Address: net.JoinHostPort(ip, gg)}}
routing, err := kademlia.NewRoutingTable(self, teststore.New(), teststore.New())
if err != nil {
t.Fatal(err)
}
dht, err := kademlia.NewKademliaWithRoutingTable(self, []pb.Node{bootNode}, fid, 5, routing)
if err != nil {
t.Fatal(utils.CombineErrors(err, routing.Close()))
}
p++
dhts = append(dhts, dht)
go func() {
err = dht.ListenAndServe()
assert.NoError(t, err)
}()
err = dht.Bootstrap(context.Background())
assert.NoError(t, err)
}
return dhts, bootNode
} }

View File

@ -38,7 +38,7 @@ func NewClient(identity *provider.FullIdentity) Client {
} }
// DialNode returns a grpc connection with tls to a node // DialNode returns a grpc connection with tls to a node
func (o *Transport) DialNode(ctx context.Context, node *pb.Node, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { func (transport *Transport) DialNode(ctx context.Context, node *pb.Node, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if node.Address == nil || node.Address.Address == "" { if node.Address == nil || node.Address.Address == "" {
@ -46,27 +46,31 @@ func (o *Transport) DialNode(ctx context.Context, node *pb.Node, opts ...grpc.Di
} }
// add ID of node we are wanting to connect to // add ID of node we are wanting to connect to
dialOpt, err := o.identity.DialOption(node.GetId()) dialOpt, err := transport.identity.DialOption(node.GetId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return grpc.Dial(node.GetAddress().Address, append([]grpc.DialOption{dialOpt}, opts...)...)
options := append([]grpc.DialOption{dialOpt}, opts...)
return grpc.Dial(node.GetAddress().Address, options...)
} }
// DialAddress returns a grpc connection with tls to an IP address // DialAddress returns a grpc connection with tls to an IP address
func (o *Transport) DialAddress(ctx context.Context, address string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { func (transport *Transport) DialAddress(ctx context.Context, address string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
dialOpt, err := o.identity.DialOption("") dialOpt, err := transport.identity.DialOption("")
if err != nil { if err != nil {
return nil, err return nil, err
} }
return grpc.Dial(address, append([]grpc.DialOption{dialOpt}, opts...)...)
options := append([]grpc.DialOption{dialOpt}, opts...)
return grpc.Dial(address, options...)
} }
// Identity is a getter for the transport's identity // Identity is a getter for the transport's identity
func (o *Transport) Identity() *provider.FullIdentity { func (transport *Transport) Identity() *provider.FullIdentity {
return o.identity return transport.identity
} }
// Close implements io.closer, closing the transport connection(s) // Close implements io.closer, closing the transport connection(s)