Remove BKAD dependency from pkg/kademlia (#294)

* slowly but surely

* hardcode ID for tests so we can get predictable results

* skipping bad test

* removing tests of bkad

* wip

* new algorithm for worker

* clean up

* remove skipped test

* changes

* uncomment

* fixed conflicts

* maybe done ?

* cleanup

* boot bkad

* wip

* cleanup

* undo change

* fixes

* wip

* wip

* moving nodeID around

* wip

* wip

* fixes

* fixes after merge

* added TODO

* fixed tests post identity

* linter fixes

* wip

* PR review comments

* wip

* fixing tests

* fix tests

* force db directory

* bad test

* fixes race condition

* small cleanups

* adding db folder

* testing

* wip

* cleanup

* cleanup

* linters

* export Restrict

* add timeout

* testing

* linters

* forgot one

* moar fixes from master merge

* PR comments

* moar PR comments

* removed stun flag

* remove duplicate declaration

* remove old tests

* remove timeout

* fix tests

* missed one

* changed StringToID >> IDFromString

* PR comments

* stupid linter

* moevd overlay mock

* fixed merge conflicts

* fixes

* linter
This commit is contained in:
Dennis Coyle 2018-10-08 11:09:37 -04:00 committed by GitHub
parent 0b4a6188e3
commit dee2c137c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 1287 additions and 777 deletions

9
.gitignore vendored
View File

@ -7,6 +7,8 @@
*.so
*.dylib
*.db
pkg/kademlia/db/*.db
db
# Test binary, build with `go test -c`
*.test
@ -43,4 +45,9 @@ protos/google/*
satellite_*
storagenode_*
uplink_*
*.svg
*.svg
big-upload-testfile
big-download-testfile
small-upload-testfile
small-download-testfile
/bin

View File

@ -11,11 +11,12 @@ import (
"github.com/spf13/cobra"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/datarepair/checker"
"storj.io/storj/pkg/datarepair/repairer"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/miniogw"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/datarepair/checker"
"storj.io/storj/pkg/datarepair/repairer"
mock "storj.io/storj/pkg/overlay/mocks"
psserver "storj.io/storj/pkg/piecestore/rpc/server"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/process"
@ -32,8 +33,8 @@ type Satellite struct {
Kademlia kademlia.Config
PointerDB pointerdb.Config
Overlay overlay.Config
Checker checker.Config
Repairer repairer.Config
Checker checker.Config
Repairer repairer.Config
MockOverlay struct {
Enabled bool `default:"true" help:"if false, use real overlay"`
Host string `default:"" help:"if set, the mock overlay will return storage nodes with this host"`
@ -105,11 +106,11 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
runCfg.Satellite.Identity.Address)
var o provider.Responsibility = runCfg.Satellite.Overlay
if runCfg.Satellite.MockOverlay.Enabled {
o = overlay.MockConfig{Nodes: strings.Join(storagenodes, ",")}
o = mock.Config{Nodes: strings.Join(storagenodes, ",")}
}
errch <- runCfg.Satellite.Identity.Run(ctx,
runCfg.Satellite.Kademlia,
runCfg.Satellite.PointerDB,
runCfg.Satellite.Kademlia,
// runCfg.Satellite.Checker,
// runCfg.Satellite.Repairer,
o)
@ -122,5 +123,9 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
errch <- runCfg.Uplink.Run(ctx)
}()
return <-errch
for v := range errch {
err = fmt.Errorf("%s : %s", err, v)
}
return err
}

View File

@ -15,6 +15,7 @@ import (
// "storj.io/storj/pkg/datarepair/checker"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/overlay"
mock "storj.io/storj/pkg/overlay/mocks"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/process"
"storj.io/storj/pkg/provider"
@ -43,7 +44,7 @@ var (
// Checker checker.Config
// Repairer repairer.Config
Overlay overlay.Config
MockOverlay overlay.MockConfig
MockOverlay mock.Config
}
setupCfg struct {
BasePath string `default:"$CONFDIR" help:"base path for setup"`

0
db/.keep Normal file
View File

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package node
import (
"context"
"storj.io/storj/pkg/pb"
)
// MockClient is a mock implementation of a Node client
type MockClient struct {
response []*pb.Node
}
// Lookup is a mock of a node.Client Lookup
// it echoes the request as the stored response on the struct
func (mc *MockClient) Lookup(ctx context.Context, to pb.Node, find pb.Node) ([]*pb.Node, error) {
return mc.response, nil
}
// NewMockClient initializes a mock client with the default values and returns a pointer to a MockClient
func NewMockClient(response []*pb.Node) *MockClient {
return &MockClient{
response: response,
}
}

22
internal/test/utils.go Normal file
View File

@ -0,0 +1,22 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package test
import (
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
// NewNodeStorageValue provides a convient way to create a node as a storage.Value for testing purposes
func NewNodeStorageValue(t *testing.T, address string) storage.Value {
na := &pb.Node{Id: "", Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: address}}
d, err := proto.Marshal(na)
assert.NoError(t, err)
return d
}

View File

@ -5,11 +5,11 @@ package kademlia
import (
"context"
"net"
"github.com/zeebo/errs"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
@ -38,44 +38,30 @@ type Config struct {
// Run implements provider.Responsibility
func (c Config) Run(ctx context.Context, server *provider.Provider) (
err error) {
defer mon.Task()(&ctx)(&err)
// TODO(jt): don't split the host/port
host, port, err := net.SplitHostPort(c.BootstrapAddr)
if err != nil {
return Error.Wrap(err)
}
// TODO(jt): an intro node shouldn't require an ID, and should only be an
// address
in, err := GetIntroNode("", host, port)
// TODO(coyle): I'm thinking we just remove this function and grab from the config.
in, err := GetIntroNode(c.BootstrapAddr)
if err != nil {
return err
}
// TODO(jt): don't split the host/port
host, port, err = net.SplitHostPort(c.TODOListenAddr)
if err != nil {
return Error.Wrap(err)
}
// TODO(jt): kademlia should register on server.GRPC() instead of listening
// itself
kad, err := NewKademlia(server.Identity().ID, []pb.Node{*in}, host, port)
in.Id = "foo"
kad, err := NewKademlia(server.Identity().ID, []pb.Node{*in}, c.TODOListenAddr, server.Identity())
if err != nil {
return err
}
defer func() { _ = kad.Disconnect() }()
// TODO(jt): ListenAndServe should probably be blocking and we should kick
// it off in a goroutine here
err = kad.ListenAndServe()
if err != nil {
return err
}
mn := node.NewServer(kad)
pb.RegisterNodesServer(server.GRPC(), mn)
// TODO(jt): Bootstrap should probably be blocking and we should kick it off
// in a goroutine here
err = kad.Bootstrap(ctx)
if err != nil {
if err = kad.Bootstrap(ctx); err != nil {
return err
}

0
pkg/kademlia/db/.keep Normal file
View File

View File

@ -5,242 +5,190 @@ package kademlia
import (
"context"
"crypto/rand"
"log"
"errors"
"fmt"
"net"
"strconv"
bkad "github.com/coyle/kademlia"
"github.com/zeebo/errs"
"google.golang.org/grpc"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
const (
alpha = 5
defaultIDLength = 256
defaultBucketSize = 20
defaultReplacementCacheSize = 5
)
// NodeErr is the class for all errors pertaining to node operations
var NodeErr = errs.Class("node error")
// BootstrapErr is the class for all errors pertaining to bootstrapping a node
var BootstrapErr = errs.Class("bootstrap node error")
//TODO: shouldn't default to TCP but not sure what to do yet
var defaultTransport = pb.NodeTransport_TCP
// NodeNotFound is returned when a lookup can not produce the requested node
var NodeNotFound = NodeErr.New("node not found")
type lookupOpts struct {
amount int
}
// Kademlia is an implementation of kademlia adhering to the DHT interface.
type Kademlia struct {
routingTable RoutingTable
alpha int // alpha is a system wide concurrency parameter
routingTable *RoutingTable
bootstrapNodes []pb.Node
ip string
port string
stun bool
dht *bkad.DHT
address string
nodeClient node.Client
identity *provider.FullIdentity
}
// NewKademlia returns a newly configured Kademlia instance
func NewKademlia(id dht.NodeID, bootstrapNodes []pb.Node, ip string, port string) (*Kademlia, error) {
if port == "" {
return nil, NodeErr.New("must specify port in request to NewKademlia")
}
ips, err := net.LookupIP(ip)
if err != nil {
return nil, err
}
if len(ips) <= 0 {
return nil, errs.New("Invalid IP")
}
ip = ips[0].String()
bnodes, err := convertProtoNodes(bootstrapNodes)
if err != nil {
return nil, err
}
bdht, err := bkad.NewDHT(&bkad.MemoryStore{}, &bkad.Options{
ID: id.Bytes(),
IP: ip,
Port: port,
BootstrapNodes: bnodes,
func NewKademlia(id dht.NodeID, bootstrapNodes []pb.Node, address string, identity *provider.FullIdentity) (*Kademlia, error) {
self := pb.Node{Id: id.String(), Address: &pb.NodeAddress{Address: address}}
rt, err := NewRoutingTable(&self, &RoutingOptions{
kpath: fmt.Sprintf("db/kbucket_%s.db", id.String()[:5]),
npath: fmt.Sprintf("db/nbucket_%s.db", id.String()[:5]),
idLength: defaultIDLength,
bucketSize: defaultBucketSize,
rcBucketSize: defaultReplacementCacheSize,
})
if err != nil {
return nil, err
return nil, BootstrapErr.Wrap(err)
}
rt := RoutingTable{
// ht: bdht.HT,
// dht: bdht,
for _, v := range bootstrapNodes {
ok, err := rt.addNode(&v)
if !ok || err != nil {
return nil, err
}
}
return &Kademlia{
k := &Kademlia{
alpha: alpha,
routingTable: rt,
bootstrapNodes: bootstrapNodes,
ip: ip,
port: port,
stun: true,
dht: bdht,
}, nil
address: address,
identity: identity,
}
nc, err := node.NewNodeClient(identity, self, k)
if err != nil {
return nil, BootstrapErr.Wrap(err)
}
k.nodeClient = nc
return k, nil
}
// Disconnect safely closes connections to the Kademlia network
func (k Kademlia) Disconnect() error {
return k.dht.Disconnect()
func (k *Kademlia) Disconnect() error {
// TODO(coyle)
return errors.New("TODO Disconnect")
}
// GetNodes returns all nodes from a starting node up to a maximum limit
// stored in the local routing table limiting the result by the specified restrictions
func (k Kademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
if start == "" {
start = k.dht.GetSelfID()
}
nn, err := k.dht.FindNodes(ctx, start, limit)
if err != nil {
return []*pb.Node{}, err
}
nodes := convertNetworkNodes(nn)
for _, r := range restrictions {
nodes = restrict(r, nodes)
}
return nodes, nil
func (k *Kademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
// TODO(coyle)
return []*pb.Node{}, errors.New("TODO GetNodes")
}
// GetRoutingTable provides the routing table for the Kademlia DHT
func (k *Kademlia) GetRoutingTable(ctx context.Context) (dht.RoutingTable, error) {
return &RoutingTable{
// ht: k.dht.HT,
// dht: k.dht,
}, nil
return k.routingTable, nil
}
// Bootstrap contacts one of a set of pre defined trusted nodes on the network and
// begins populating the local Kademlia node
func (k *Kademlia) Bootstrap(ctx context.Context) error {
return k.dht.Bootstrap()
// What I want to do here is do a normal lookup for myself
// so call lookup(ctx, nodeImLookingFor)
if len(k.bootstrapNodes) == 0 {
return BootstrapErr.New("no bootstrap nodes provided")
}
return k.lookup(ctx, node.IDFromString(k.routingTable.self.GetId()), lookupOpts{amount: 5})
}
func (k *Kademlia) lookup(ctx context.Context, target dht.NodeID, opts lookupOpts) error {
kb := k.routingTable.K()
// look in routing table for targetID
nodes, err := k.routingTable.FindNear(target, kb)
if err != nil {
return err
}
ctx, cf := context.WithCancel(ctx)
w := newWorker(ctx, k.routingTable, nodes, k.nodeClient, target, opts.amount)
w.SetCancellation(cf)
wch := make(chan *pb.Node, k.alpha)
// kick off go routine to fetch work and send on work channel
go w.getWork(ctx, wch)
// kick off alpha works to consume from work channel
for i := 0; i < k.alpha; i++ {
go w.work(ctx, wch)
}
<-ctx.Done()
return nil
}
// Ping checks that the provided node is still accessible on the network
func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error) {
n, err := convertProtoNode(node)
if err != nil {
return pb.Node{}, err
}
ok, err := k.dht.Ping(n)
if err != nil {
return pb.Node{}, err
}
if !ok {
return pb.Node{}, NodeErr.New("node unavailable")
}
return node, nil
// TODO(coyle)
return pb.Node{}, nil
}
// FindNode looks up the provided NodeID first in the local Node, and if it is not found
// begins searching the network for the NodeID. Returns and error if node was not found
func (k *Kademlia) FindNode(ctx context.Context, ID dht.NodeID) (pb.Node, error) {
nodes, err := k.dht.FindNode(ID.Bytes())
if err != nil {
return pb.Node{}, err
}
for _, v := range nodes {
if string(v.ID) == ID.String() {
return pb.Node{Id: string(v.ID), Address: &pb.NodeAddress{
Transport: defaultTransport,
Address: net.JoinHostPort(v.IP.String(), strconv.Itoa(v.Port)),
},
}, nil
}
}
return pb.Node{}, NodeErr.New("node not found")
//TODO(coyle)
return pb.Node{}, NodeErr.New("TODO FindNode")
}
// ListenAndServe connects the kademlia node to the network and listens for incoming requests
func (k *Kademlia) ListenAndServe() error {
if err := k.dht.CreateSocket(); err != nil {
identOpt, err := k.identity.ServerOption()
if err != nil {
return err
}
go func() {
if err := k.dht.Listen(); err != nil {
log.Printf("Failed to listen on the dht: %s\n", err)
}
}()
grpcServer := grpc.NewServer(identOpt)
mn := node.NewServer(k)
pb.RegisterNodesServer(grpcServer, mn)
lis, err := net.Listen("tcp", k.address)
if err != nil {
return err
}
if err := grpcServer.Serve(lis); err != nil {
return err
}
defer grpcServer.Stop()
return nil
}
func convertProtoNodes(n []pb.Node) ([]*bkad.NetworkNode, error) {
nn := make([]*bkad.NetworkNode, len(n))
for i, v := range n {
node, err := convertProtoNode(v)
if err != nil {
return nil, err
}
nn[i] = node
}
return nn, nil
}
func convertNetworkNodes(n []*bkad.NetworkNode) []*pb.Node {
nn := make([]*pb.Node, len(n))
for i, v := range n {
nn[i] = convertNetworkNode(v)
}
return nn
}
func convertNetworkNode(v *bkad.NetworkNode) *pb.Node {
return &pb.Node{
Id: string(v.ID),
Address: &pb.NodeAddress{Transport: defaultTransport, Address: net.JoinHostPort(v.IP.String(), strconv.Itoa(v.Port))},
}
}
func convertProtoNode(v pb.Node) (*bkad.NetworkNode, error) {
host, port, err := net.SplitHostPort(v.GetAddress().GetAddress())
if err != nil {
return nil, err
}
nn := bkad.NewNetworkNode(host, port)
nn.ID = []byte(v.GetId())
return nn, nil
}
// newID generates a new random ID.
// This purely to get things working. We shouldn't use this as the ID in the actual network
func newID() ([]byte, error) {
result := make([]byte, 20)
_, err := rand.Read(result)
return result, err
}
// GetIntroNode determines the best node to bootstrap a new node onto the network
func GetIntroNode(id, ip, port string) (*pb.Node, error) {
addr := "bootstrap.storj.io:8080"
if ip != "" && port != "" {
addr = net.JoinHostPort(ip, port)
}
if id == "" {
i, err := newID()
if err != nil {
return nil, err
}
id = string(i)
func GetIntroNode(addr string) (*pb.Node, error) {
if addr == "" {
addr = "bootstrap.storj.io:8080"
}
return &pb.Node{
Id: id,
Address: &pb.NodeAddress{
Transport: defaultTransport,
Address: addr,
@ -248,7 +196,8 @@ func GetIntroNode(id, ip, port string) (*pb.Node, error) {
}, nil
}
func restrict(r pb.Restriction, n []*pb.Node) []*pb.Node {
// Restrict is used to limit nodes returned that don't match the miniumum storage requirements
func Restrict(r pb.Restriction, n []*pb.Node) []*pb.Node {
oper := r.GetOperand()
op := r.GetOperator()
val := r.GetValue()

View File

@ -5,251 +5,163 @@ package kademlia
import (
"context"
"math/rand"
"strconv"
"net"
"testing"
"time"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
const (
testNetSize = 20
)
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
bid, err := newID()
assert.NoError(t, err)
bnid := NodeID(bid)
dhts := []dht.DHT{}
p, err := strconv.Atoi(port)
pm := strconv.Itoa(p)
assert.NoError(t, err)
intro, err := GetIntroNode(bnid.String(), ip, pm)
assert.NoError(t, err)
boot, err := NewKademlia(&bnid, []pb.Node{*intro}, ip, pm)
assert.NoError(t, err)
//added bootnode to dhts so it could be closed in defer as well
dhts = append(dhts, boot)
rt, err := boot.GetRoutingTable(context.Background())
assert.NoError(t, err)
bootNode := rt.Local()
err = boot.ListenAndServe()
assert.NoError(t, err)
p++
err = boot.Bootstrap(context.Background())
assert.NoError(t, err)
for i := 0; i < testNetSize; i++ {
gg := strconv.Itoa(p)
nid, err := newID()
assert.NoError(t, err)
id := NodeID(nid)
dht, err := NewKademlia(&id, []pb.Node{bootNode}, ip, gg)
assert.NoError(t, err)
p++
dhts = append(dhts, dht)
err = dht.ListenAndServe()
assert.NoError(t, err)
err = dht.Bootstrap(context.Background())
assert.NoError(t, err)
func TestNewKademlia(t *testing.T) {
cases := []struct {
id dht.NodeID
bn []pb.Node
addr string
expectedErr error
}{
{
id: func() *node.ID {
id, err := node.NewID()
assert.NoError(t, err)
return id
}(),
bn: []pb.Node{pb.Node{Id: "foo"}},
addr: "127.0.0.1:8080",
},
}
return dhts, bootNode
for _, v := range cases {
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
actual, err := NewKademlia(v.id, v.bn, v.addr, identity)
assert.Equal(t, v.expectedErr, err)
assert.Equal(t, actual.bootstrapNodes, v.bn)
assert.NotNil(t, actual.nodeClient)
assert.NotNil(t, actual.routingTable)
}
}
func newTestKademlia(t *testing.T, ip, port string, b pb.Node) *Kademlia {
i, err := newID()
func TestLookup(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
id := NodeID(i)
n := []pb.Node{b}
kad, err := NewKademlia(&id, n, ip, port)
assert.NoError(t, err)
return kad
srv, mns := newTestServer([]*pb.Node{&pb.Node{Id: "foo"}})
go func() { _ = srv.Serve(lis) }()
defer srv.Stop()
k := func() *Kademlia {
// make new identity
id, err := node.NewID()
assert.NoError(t, err)
id2, err := node.NewID()
assert.NoError(t, err)
// initialize kademlia
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
k, err := NewKademlia(id, []pb.Node{pb.Node{Id: id2.String(), Address: &pb.NodeAddress{Address: lis.Addr().String()}}}, lis.Addr().String(), identity)
assert.NoError(t, err)
return k
}()
cases := []struct {
k *Kademlia
target dht.NodeID
opts lookupOpts
expected *pb.Node
expectedErr error
}{
{
k: k,
target: func() *node.ID {
id, err := node.NewID()
assert.NoError(t, err)
mns.returnValue = []*pb.Node{&pb.Node{Id: id.String(), Address: &pb.NodeAddress{Address: "127.0.0.1:0"}}}
return id
}(),
opts: lookupOpts{amount: 5},
expected: &pb.Node{},
expectedErr: nil,
},
{
k: k,
target: func() *node.ID {
id, err := node.NewID()
assert.NoError(t, err)
return id
}(),
opts: lookupOpts{amount: 5},
expected: nil,
expectedErr: nil,
},
}
for _, v := range cases {
err := v.k.lookup(context.Background(), v.target, v.opts)
assert.Equal(t, v.expectedErr, err)
time.Sleep(1 * time.Second)
}
}
func TestBootstrap(t *testing.T) {
t.Skip()
dhts, bootNode := bootstrapTestNetwork(t, "127.0.0.1", "3000")
bn, s := testNode(t, []pb.Node{})
defer s.Stop()
defer func(d []dht.DHT) {
for _, v := range d {
_ = v.Disconnect()
}
}(dhts)
n1, s1 := testNode(t, []pb.Node{*bn.routingTable.self})
defer s1.Stop()
cases := []struct {
k *Kademlia
}{
{
k: newTestKademlia(t, "127.0.0.1", "2999", bootNode),
},
}
for _, v := range cases {
defer func() { assert.NoError(t, v.k.Disconnect()) }()
err := v.k.ListenAndServe()
assert.NoError(t, err)
err = v.k.Bootstrap(context.Background())
assert.NoError(t, err)
ctx := context.Background()
rt, err := dhts[0].GetRoutingTable(context.Background())
assert.NoError(t, err)
localID := rt.Local().Id
n := NodeID(localID)
node, err := v.k.FindNode(ctx, &n)
assert.NoError(t, err)
assert.NotEmpty(t, node)
assert.Equal(t, localID, node.Id)
assert.NoError(t, v.k.dht.Disconnect())
}
}
func TestGetNodes(t *testing.T) {
t.Skip()
dhts, bootNode := bootstrapTestNetwork(t, "127.0.0.1", "6001")
defer func(d []dht.DHT) {
for _, v := range d {
assert.NoError(t, v.Disconnect())
}
}(dhts)
cases := []struct {
k *Kademlia
limit int
expectedErr error
restrictions []pb.Restriction
}{
{
k: newTestKademlia(t, "127.0.0.1", "6000", bootNode),
limit: 10,
expectedErr: nil,
},
}
for _, v := range cases {
defer func() { assert.NoError(t, v.k.Disconnect()) }()
ctx := context.Background()
err := v.k.ListenAndServe()
assert.Equal(t, v.expectedErr, err)
time.Sleep(time.Second)
err = v.k.Bootstrap(ctx)
assert.NoError(t, err)
rt, err := v.k.GetRoutingTable(context.Background())
assert.NoError(t, err)
start := rt.Local().Id
nodes, err := v.k.GetNodes(ctx, start, v.limit, v.restrictions...)
assert.Equal(t, v.expectedErr, err)
assert.Len(t, nodes, v.limit)
assert.NoError(t, v.k.dht.Disconnect())
}
}
func TestFindNode(t *testing.T) {
t.Skip()
dhts, bootNode := bootstrapTestNetwork(t, "127.0.0.1", "5001")
defer func(d []dht.DHT) {
for _, v := range d {
assert.NoError(t, v.Disconnect())
}
}(dhts)
cases := []struct {
k *Kademlia
expectedErr error
}{
{
k: newTestKademlia(t, "127.0.0.1", "6000", bootNode),
expectedErr: nil,
},
}
for _, v := range cases {
defer func() { assert.NoError(t, v.k.Disconnect()) }()
ctx := context.Background()
go func() { assert.NoError(t, v.k.ListenAndServe()) }()
time.Sleep(time.Second)
assert.NoError(t, v.k.Bootstrap(ctx))
rt, err := dhts[rand.Intn(testNetSize)].GetRoutingTable(context.Background())
assert.NoError(t, err)
id := NodeID(rt.Local().Id)
node, err := v.k.FindNode(ctx, &id)
assert.Equal(t, v.expectedErr, err)
assert.NotZero(t, node)
assert.Equal(t, node.Id, id.String())
}
}
func TestPing(t *testing.T) {
t.Skip()
dhts, bootNode := bootstrapTestNetwork(t, "127.0.0.1", "4001")
defer func(d []dht.DHT) {
for _, v := range d {
assert.NoError(t, v.Disconnect())
}
}(dhts)
r := dhts[rand.Intn(testNetSize)]
rt, err := r.GetRoutingTable(context.Background())
addr := rt.Local().Address
err := n1.Bootstrap(context.Background())
assert.NoError(t, err)
cases := []struct {
k *Kademlia
input pb.Node
expectedErr error
}{
{
k: newTestKademlia(t, "127.0.0.1", "6000", bootNode),
input: pb.Node{
Id: rt.Local().Id,
Address: &pb.NodeAddress{
Transport: defaultTransport,
Address: addr.Address,
},
},
expectedErr: nil,
},
}
n2, s2 := testNode(t, []pb.Node{*bn.routingTable.self})
defer s2.Stop()
for _, v := range cases {
defer func() { assert.NoError(t, v.k.Disconnect()) }()
ctx := context.Background()
go func() { assert.NoError(t, v.k.ListenAndServe()) }()
time.Sleep(time.Second)
err := v.k.Bootstrap(ctx)
assert.NoError(t, err)
err = n2.Bootstrap(context.Background())
assert.NoError(t, err)
time.Sleep(time.Second)
node, err := v.k.Ping(ctx, v.input)
assert.Equal(t, v.expectedErr, err)
assert.NotEmpty(t, node)
assert.Equal(t, v.input, node)
assert.NoError(t, v.k.dht.Disconnect())
}
nodeIDs, err := n2.routingTable.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
assert.Len(t, nodeIDs, 3)
}
func testNode(t *testing.T, bn []pb.Node) (*Kademlia, *grpc.Server) {
// new address
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
// new ID
id, err := node.NewID()
assert.NoError(t, err)
// New identity
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
// new kademlia
k, err := NewKademlia(id, bn, lis.Addr().String(), identity)
assert.NoError(t, err)
s := node.NewServer(k)
identOpt, err := identity.ServerOption()
assert.NoError(t, err)
grpcServer := grpc.NewServer(identOpt)
pb.RegisterNodesServer(grpcServer, s)
go func() { _ = grpcServer.Serve(lis) }()
return k, grpcServer
}

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import base58 "github.com/jbenet/go-base58"
// NodeID is the unique identifier of a Node in the overlay network
type NodeID string
// String transforms the nodeID to a string type
func (n *NodeID) String() string {
return string(*n)
}
// Bytes transforms the nodeID to type []byte
func (n *NodeID) Bytes() []byte {
return []byte(*n)
}
// StringToNodeID trsansforms a string to a NodeID
func StringToNodeID(s string) *NodeID {
n := NodeID(s)
return &n
}
// NewID returns a pointer to a newly intialized NodeID
// TODO@ASK: this should be removed; superseded by `CASetupConfig.Create` / `IdentitySetupConfig.Create`
func NewID() (*NodeID, error) {
b, err := newID()
if err != nil {
return nil, err
}
bb := NodeID(base58.Encode(b))
return &bb, nil
}

60
pkg/kademlia/queue.go Normal file
View File

@ -0,0 +1,60 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"math/big"
"storj.io/storj/pkg/pb"
)
// An Item is something we manage in a priority queue.
type Item struct {
value *pb.Node // The value of the item; arbitrary.
priority *big.Int // The priority of the item in the queue.
// The index is needed by update and is maintained by the heap.Interface methods.
index int // The index of the item in the heap.
}
// A PriorityQueue implements heap.Interface and holds Items.
type PriorityQueue []*Item
// Len returns the length of the priority queue
func (pq PriorityQueue) Len() int { return len(pq) }
// Less does what you would think
func (pq PriorityQueue) Less(i, j int) bool {
// this sorts the nodes where the node popped has the closest location
if i := pq[i].priority.Cmp(pq[j].priority); i < 0 {
return true
}
return false
}
// Swap swaps two ints
func (pq PriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to the top of the queue
// must call heap.fix to resort
func (pq *PriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*Item)
item.index = n
*pq = append(*pq, item)
}
// Pop returns the item with the lowest priority
func (pq *PriorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
item.index = -1 // for safety
*pq = old[0 : n-1]
return item
}

View File

@ -0,0 +1,62 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"container/heap"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/pb"
)
func TestPriorityQueue(t *testing.T) {
cases := []struct {
target *big.Int
nodes map[string]*pb.Node
pq PriorityQueue
expected []int
}{
{
target: func() *big.Int {
i, ok := new(big.Int).SetString("0001", 2)
assert.True(t, ok)
return i
}(),
nodes: map[string]*pb.Node{
"1001": &pb.Node{Id: "1001"},
"0100": &pb.Node{Id: "0100"},
"1100": &pb.Node{Id: "1100"},
"0010": &pb.Node{Id: "0010"},
},
pq: make(PriorityQueue, 4),
expected: []int{3, 5, 8, 13},
},
}
for _, v := range cases {
i := 0
for id, value := range v.nodes {
bn, ok := new(big.Int).SetString(id, 2)
assert.True(t, ok)
v.pq[i] = &Item{
value: value,
priority: new(big.Int).Xor(v.target, bn),
index: i,
}
i++
}
heap.Init(&v.pq)
i = 0
for v.pq.Len() > 0 {
item := heap.Pop(&v.pq).(*Item)
assert.Equal(t, big.NewInt(int64(v.expected[i])), item.priority)
i++
}
}
}

View File

@ -142,26 +142,16 @@ func (rt *RoutingTable) GetBuckets() (k []dht.Bucket, err error) {
return bs, nil
}
// FindNear returns the node corresponding to the provided nodeID if present in the routing table
// otherwise returns all Nodes closest via XOR to the provided nodeID up to the provided limit
// FindNear returns the node corresponding to the provided nodeID
// returns all Nodes closest via XOR to the provided nodeID up to the provided limit
// always returns limit + self
func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*pb.Node, error) {
//if id is in the routing table
n, err := rt.nodeBucketDB.Get(id.Bytes())
if n != nil {
ns, err := unmarshalNodes(storage.Keys{id.Bytes()}, []storage.Value{n})
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not unmarshal node %s", err)
}
return ns, nil
}
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return []*pb.Node{}, RoutingErr.New("could not get key from rt %s", err)
}
// if id is not in the routing table
nodeIDs, err := rt.nodeBucketDB.List(nil, 0)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get node ids %s", err)
}
sortedIDs := sortByXOR(nodeIDs, id.Bytes())
if len(sortedIDs) >= limit {
sortedIDs = sortedIDs[:limit]
@ -170,10 +160,12 @@ func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*pb.Node, error) {
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get nodes %s", err)
}
unmarshaledNodes, err := unmarshalNodes(ids, serializedNodes)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
}
return unmarshaledNodes, nil
}
@ -184,6 +176,7 @@ func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return RoutingErr.New("could not get node %s", err)
}
if v != nil {
err = rt.updateNode(node)
if err != nil {
@ -191,6 +184,7 @@ func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
}
return nil
}
_, err = rt.addNode(node)
if err != nil {
return RoutingErr.New("could not add node %s", err)

View File

@ -226,7 +226,12 @@ func (rt *RoutingTable) determineFurthestIDWithinK(nodeIDs storage.Keys) ([]byte
// xorTwoIds: helper, finds the xor distance between two byte slices
func xorTwoIds(id []byte, comparisonID []byte) []byte {
var xorArr []byte
for i := 0; i < len(id); i++ {
s := len(id)
if s > len(comparisonID) {
s = len(comparisonID)
}
for i := 0; i < s; i++ {
xor := id[i] ^ comparisonID[i]
xorArr = append(xorArr, xor)
}
@ -313,6 +318,7 @@ func (rt *RoutingTable) getNodesFromIDs(nodeIDs storage.Keys) (storage.Keys, []s
if err != nil {
return nodeIDs, nodes, RoutingErr.New("could not get node id %v, %s", v, err)
}
nodes = append(nodes, n)
}
return nodeIDs, nodes, nil

View File

@ -10,6 +10,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -102,35 +103,35 @@ func TestFindNear(t *testing.T) {
assert.NoError(t, err)
cases := []struct {
testID string
node pb.Node
testID string
node pb.Node
expectedNodes []*pb.Node
limit int
limit int
}{
{testID: "limit 1 on node1: return node1",
node: *node1,
expectedNodes: []*pb.Node{node1},
limit: 1,
node: *node1,
expectedNodes: []*pb.Node{node1},
limit: 1,
},
{testID: "limit 2 on node3: return nodes2, node1",
node: *node3,
expectedNodes: []*pb.Node{node2, node1},
limit: 2,
node: *node3,
expectedNodes: []*pb.Node{node2, node1},
limit: 2,
},
{testID: "limit 1 on node3: return node2",
node: *node3,
expectedNodes: []*pb.Node{node2},
limit: 1,
node: *node3,
expectedNodes: []*pb.Node{node2},
limit: 1,
},
{testID: "limit 3 on node3: return node2, node1",
node: *node3,
expectedNodes: []*pb.Node{node2, node1},
limit: 3,
node: *node3,
expectedNodes: []*pb.Node{node2, node1},
limit: 3,
},
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
ns, err := rt.FindNear(StringToNodeID(c.node.Id), c.limit)
ns, err := rt.FindNear(node.IDFromString(c.node.Id), c.limit)
assert.NoError(t, err)
assert.Equal(t, c.expectedNodes, ns)
})
@ -147,9 +148,9 @@ func TestConnectionSuccess(t *testing.T) {
node1 := &pb.Node{Id: id, Address: address1}
node2 := &pb.Node{Id: id2, Address: address2}
cases := []struct {
testID string
node *pb.Node
id string
testID string
node *pb.Node
id string
address *pb.NodeAddress
}{
{testID: "Update Node",

203
pkg/kademlia/workers.go Normal file
View File

@ -0,0 +1,203 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"container/heap"
"context"
"log"
"math/big"
"sync"
"time"
"github.com/zeebo/errs"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
)
var (
// WorkerError is the class of errors for the worker struct
WorkerError = errs.Class("worker error")
// default timeout is the minimum timeout for worker cancellation
// 250ms was the minimum value allowing current workers to finish work
// before returning
defaultTimeout = 250 * time.Millisecond
)
// worker pops work off a priority queue and does lookups on the work received
type worker struct {
contacted map[string]bool
pq PriorityQueue
mu *sync.Mutex
maxResponse time.Duration
cancel context.CancelFunc
nodeClient node.Client
find dht.NodeID
workInProgress int
k int
}
func newWorker(ctx context.Context, rt *RoutingTable, nodes []*pb.Node, nc node.Client, target dht.NodeID, k int) *worker {
t := new(big.Int).SetBytes(target.Bytes())
pq := func(nodes []*pb.Node) PriorityQueue {
pq := make(PriorityQueue, len(nodes))
for i, node := range nodes {
bnode := new(big.Int).SetBytes([]byte(node.GetId()))
pq[i] = &Item{
value: node,
priority: new(big.Int).Xor(t, bnode),
index: i,
}
}
heap.Init(&pq)
return pq
}(nodes)
return &worker{
contacted: map[string]bool{},
pq: pq,
mu: &sync.Mutex{},
maxResponse: 0 * time.Millisecond,
nodeClient: nc,
find: target,
workInProgress: 0,
k: k,
}
}
// create x workers
// have a worker that gets work off the queue
// send that work on a channel
// have workers get work available off channel
// after queue is empty and no work is in progress, close channel.
func (w *worker) work(ctx context.Context, ch chan *pb.Node) {
// grab uncontacted node from working set
// change status to inprogress
// ask node for target
// if node has target cancel ctx and send node
for {
select {
case <-ctx.Done():
return
case n := <-ch:
// network lookup for nodes
nodes := w.lookup(ctx, n)
// update our priority queue
w.update(nodes)
}
}
}
func (w *worker) getWork(ctx context.Context, ch chan *pb.Node) {
for {
if ctx.Err() != nil {
return
}
w.mu.Lock()
if w.pq.Len() <= 0 && w.workInProgress <= 0 {
w.mu.Unlock()
timeout := defaultTimeout
if timeout < (2 * w.maxResponse) {
timeout = 2 * w.maxResponse
}
time.AfterFunc(timeout, w.cancel)
return
}
if w.pq.Len() <= 0 {
w.mu.Unlock()
// if there is nothing left to get off the queue
// and the work-in-progress is not empty
// let's wait a bit for the workers to populate the queue
time.Sleep(50 * time.Millisecond)
continue
}
w.workInProgress++
ch <- w.pq.Pop().(*Item).value
w.mu.Unlock()
}
}
func (w *worker) lookup(ctx context.Context, node *pb.Node) []*pb.Node {
start := time.Now()
if node.GetAddress() == nil {
return nil
}
nodes, err := w.nodeClient.Lookup(ctx, *node, pb.Node{Id: w.find.String()})
if err != nil {
// TODO(coyle): I think we might want to do another look up on this node or update something
// but for now let's just log and ignore.
log.Printf("Error occurred during lookup for %s on %s :: error = %s", w.find.String(), node.GetId(), err.Error())
return []*pb.Node{}
}
// add node to the previously contacted list so we don't duplicate lookups
w.mu.Lock()
w.contacted[node.GetId()] = true
w.mu.Unlock()
latency := time.Since(start)
if latency > w.maxResponse {
w.maxResponse = latency
}
return nodes
}
func (w *worker) update(nodes []*pb.Node) {
t := new(big.Int).SetBytes(w.find.Bytes())
w.mu.Lock()
defer w.mu.Unlock()
for _, v := range nodes {
// if we have already done a lookup on this node we don't want to do it again for this lookup loop
if w.contacted[v.GetId()] {
continue
}
heap.Push(&w.pq, &Item{
value: v,
priority: new(big.Int).Xor(t, new(big.Int).SetBytes(w.find.Bytes())),
})
}
// reinitialize heap
heap.Init(&w.pq)
// only keep the k closest nodes
if len(w.pq) <= w.k {
w.workInProgress--
return
}
pq := PriorityQueue{}
for i := 0; i < w.k; i++ {
if len(w.pq) > 0 {
item := heap.Pop(&w.pq)
heap.Push(&pq, item)
}
}
// reinitialize heap
heap.Init(&pq)
// set w.pq to the new pq with the k closest nodes
w.pq = pq
w.workInProgress--
}
// SetCancellation adds the cancel function to the worker
func (w *worker) SetCancellation(cf context.CancelFunc) {
w.cancel = cf
}

View File

@ -0,0 +1,213 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information
package kademlia
import (
"context"
"net"
"sync/atomic"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/pkg/dht/mocks"
"storj.io/storj/pkg/node"
mock "storj.io/storj/pkg/overlay/mocks"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
var (
ctx = context.Background()
)
func TestGetWork(t *testing.T) {
cases := []struct {
name string
worker *worker
expected *pb.Node
ch chan *pb.Node
}{
{
name: "test valid chore returned",
worker: newWorker(context.Background(), nil, []*pb.Node{&pb.Node{Id: "1001"}}, nil, node.IDFromString("1000"), 5),
expected: &pb.Node{Id: "1001"},
ch: make(chan *pb.Node, 2),
},
{
name: "test no chore left",
worker: func() *worker {
w := newWorker(context.Background(), nil, []*pb.Node{&pb.Node{Id: "foo"}}, nil, node.IDFromString("foo"), 5)
w.maxResponse = 0
w.pq.Pop()
assert.Len(t, w.pq, 0)
return w
}(),
expected: nil,
ch: make(chan *pb.Node, 2),
},
}
for _, v := range cases {
ctx, cf := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cf()
v.worker.cancel = cf
v.worker.getWork(ctx, v.ch)
if v.expected != nil {
actual := <-v.ch
assert.Equal(t, v.expected, actual)
} else {
assert.Len(t, v.ch, 0)
}
}
}
func TestWorkerLookup(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDHT := mock_dht.NewMockDHT(ctrl)
mockRT := mock_dht.NewMockRoutingTable(ctrl)
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
srv, mock := newTestServer(nil)
go func() { _ = srv.Serve(lis) }()
defer srv.Stop()
cases := []struct {
name string
worker *worker
work *pb.Node
expected []*pb.Node
}{
{
name: "test valid chore returned",
worker: func() *worker {
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
nc, err := node.NewNodeClient(identity, pb.Node{Id: "foo", Address: &pb.NodeAddress{Address: "127.0.0.1:0"}}, mockDHT)
assert.NoError(t, err)
mock.returnValue = []*pb.Node{&pb.Node{Id: "foo"}}
return newWorker(context.Background(), nil, []*pb.Node{&pb.Node{Id: "foo"}}, nc, node.IDFromString("foo"), 5)
}(),
work: &pb.Node{Id: "foo", Address: &pb.NodeAddress{Address: lis.Addr().String()}},
expected: []*pb.Node{&pb.Node{Id: "foo"}},
},
}
for _, v := range cases {
mockDHT.EXPECT().GetRoutingTable(gomock.Any()).Return(mockRT, nil)
mockRT.EXPECT().ConnectionSuccess(gomock.Any()).Return(nil)
actual := v.worker.lookup(context.Background(), v.work)
assert.Equal(t, v.expected, actual)
assert.Equal(t, int32(1), mock.queryCalled)
}
}
func TestUpdate(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockDHT := mock_dht.NewMockDHT(ctrl)
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
srv, _ := newTestServer(nil)
go func() { _ = srv.Serve(lis) }()
defer srv.Stop()
cases := []struct {
name string
worker *worker
input []*pb.Node
expectedQueueLength int
expected []*pb.Node
expectedErr error
}{
{
name: "test nil nodes",
worker: func() *worker {
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
nc, err := node.NewNodeClient(identity, pb.Node{Id: "foo", Address: &pb.NodeAddress{Address: ":7070"}}, mockDHT)
assert.NoError(t, err)
return newWorker(context.Background(), nil, []*pb.Node{&pb.Node{Id: "0000"}}, nc, node.IDFromString("foo"), 2)
}(),
expectedQueueLength: 1,
input: nil,
expectedErr: WorkerError.New("nodes must not be empty"),
expected: []*pb.Node{&pb.Node{Id: "0000"}},
},
{
name: "test combined less than k",
worker: func() *worker {
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
nc, err := node.NewNodeClient(identity, pb.Node{Id: "foo", Address: &pb.NodeAddress{Address: ":7070"}}, mockDHT)
assert.NoError(t, err)
return newWorker(context.Background(), nil, []*pb.Node{&pb.Node{Id: "0001"}}, nc, node.IDFromString("1100"), 2)
}(),
expectedQueueLength: 2,
expected: []*pb.Node{&pb.Node{Id: "0100"}, &pb.Node{Id: "1001"}},
input: []*pb.Node{&pb.Node{Id: "1001"}, &pb.Node{Id: "0100"}},
expectedErr: nil,
},
}
for _, v := range cases {
v.worker.update(v.input)
assert.Len(t, v.worker.pq, v.expectedQueueLength)
i := 0
for v.worker.pq.Len() > 0 {
assert.Equal(t, v.expected[i], v.worker.pq.Pop().(*Item).value)
i++
}
}
}
func newTestServer(nn []*pb.Node) (*grpc.Server, *mockNodeServer) {
ca, err := provider.NewCA(ctx, 12, 4)
if err != nil {
return nil, nil
}
identity, err := ca.NewIdentity()
if err != nil {
return nil, nil
}
identOpt, err := identity.ServerOption()
if err != nil {
return nil, nil
}
grpcServer := grpc.NewServer(identOpt)
mn := &mockNodeServer{queryCalled: 0}
pb.RegisterNodesServer(grpcServer, mn)
pb.RegisterOverlayServer(grpcServer, mock.NewOverlay(nn))
return grpcServer, mn
}
type mockNodeServer struct {
queryCalled int32
returnValue []*pb.Node
}
func (mn *mockNodeServer) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResponse, error) {
atomic.AddInt32(&mn.queryCalled, 1)
return &pb.QueryResponse{Response: mn.returnValue}, nil
}

View File

@ -8,6 +8,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pool"
"storj.io/storj/pkg/provider"
@ -18,9 +19,10 @@ import (
var NodeClientErr = errs.Class("node client error")
// NewNodeClient instantiates a node client
func NewNodeClient(identity *provider.FullIdentity, self pb.Node) (Client, error) {
func NewNodeClient(identity *provider.FullIdentity, self pb.Node, dht dht.DHT) (Client, error) {
client := transport.NewClient(identity)
return &Node{
dht: dht,
self: self,
tc: client,
cache: pool.NewConnectionPool(),

49
pkg/node/id.go Normal file
View File

@ -0,0 +1,49 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package node
import (
"crypto/rand"
base58 "github.com/jbenet/go-base58"
)
// ID is the unique identifier of a Node in the overlay network
type ID string
// String transforms the ID to a string type
func (n *ID) String() string {
return string(*n)
}
// Bytes transforms the ID to type []byte
func (n *ID) Bytes() []byte {
return []byte(*n)
}
// IDFromString trsansforms a string to a ID
func IDFromString(s string) *ID {
n := ID(s)
return &n
}
// NewID returns a pointer to a newly intialized ID
// TODO@ASK: this should be removed; superseded by `CASetupConfig.Create` / `IdentitySetupConfig.Create`
func NewID() (*ID, error) {
b, err := newID()
if err != nil {
return nil, err
}
bb := ID(base58.Encode(b))
return &bb, nil
}
// newID generates a new random ID.
// This purely to get things working. We shouldn't use this as the ID in the actual network
func newID() ([]byte, error) {
result := make([]byte, 20)
_, err := rand.Read(result)
return result, err
}

View File

@ -1,7 +1,7 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
package node
import (
"testing"
@ -11,17 +11,17 @@ import (
func TestString(t *testing.T) {
expected := "test node"
node := NodeID(expected)
node := ID(expected)
result := node.String()
assert.Equal(t, expected, result)
}
func TestStringToNodeID(t *testing.T) {
func TestIDFromString(t *testing.T) {
str := "test node"
node := NodeID(str)
expected := StringToNodeID(str)
node := ID(str)
expected := IDFromString(str)
assert.Equal(t, expected.String(), node.String())
}

View File

@ -5,9 +5,11 @@ package node
import (
"context"
"log"
"google.golang.org/grpc"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pool"
"storj.io/storj/pkg/transport"
@ -15,6 +17,7 @@ import (
// Node is the storj definition for a node in the network
type Node struct {
dht dht.DHT
self pb.Node
tc transport.Client
cache pool.Pool
@ -35,14 +38,28 @@ func (n *Node) Lookup(ctx context.Context, to pb.Node, find pb.Node) ([]*pb.Node
if err != nil {
return nil, err
}
if err := n.cache.Add(ctx, to.GetId(), c); err != nil {
log.Printf("Error %s occurred adding %s to cache", err, to.GetId())
}
conn = c
}
c := pb.NewNodesClient(conn)
resp, err := c.Query(ctx, &pb.QueryRequest{Sender: &n.self, Target: &find})
resp, err := c.Query(ctx, &pb.QueryRequest{Limit: 20, Sender: &n.self, Target: &find, Pingback: true})
if err != nil {
return nil, err
}
rt, err := n.dht.GetRoutingTable(ctx)
if err != nil {
return nil, err
}
if err := rt.ConnectionSuccess(&to); err != nil {
return nil, err
}
return resp.Response, nil
}

View File

@ -8,10 +8,11 @@ import (
"net"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/dht/mocks"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
@ -26,9 +27,9 @@ func TestLookup(t *testing.T) {
expectedErr error
}{
{
self: pb.Node{Id: NewNodeID(t), Address: &pb.NodeAddress{Address: ":7070"}},
to: pb.Node{}, // filled after server has been started
find: pb.Node{Id: NewNodeID(t), Address: &pb.NodeAddress{Address: ":9090"}},
self: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":7070"}},
to: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":8080"}},
find: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":9090"}},
expectedErr: nil,
},
}
@ -43,13 +44,20 @@ func TestLookup(t *testing.T) {
assert.NoError(t, err)
go func() { assert.NoError(t, srv.Serve(lis)) }()
defer srv.Stop()
ctrl := gomock.NewController(t)
mdht := mock_dht.NewMockDHT(ctrl)
mrt := mock_dht.NewMockRoutingTable(ctrl)
mdht.EXPECT().GetRoutingTable(gomock.Any()).Return(mrt, nil)
mrt.EXPECT().ConnectionSuccess(gomock.Any()).Return(nil)
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
nc, err := NewNodeClient(identity, v.self)
nc, err := NewNodeClient(identity, v.self, mdht)
assert.NoError(t, err)
_, err = nc.Lookup(ctx, v.to, v.find)
@ -92,7 +100,7 @@ func (mn *mockNodeServer) Query(ctx context.Context, req *pb.QueryRequest) (*pb.
// NewNodeID returns the string representation of a dht node ID
func NewNodeID(t *testing.T) string {
id, err := kademlia.NewID()
id, err := NewID()
assert.NoError(t, err)
return id.String()

View File

@ -6,38 +6,57 @@ package node
import (
"context"
"go.uber.org/zap"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
)
// Server implements the grpc Node Server
type Server struct {
dht dht.DHT
dht dht.DHT
logger *zap.Logger
}
// NewServer returns a newly instantiated Node Server
func NewServer(dht dht.DHT) *Server {
return &Server{
dht: dht,
logger: zap.L(),
}
}
// Query is a node to node communication query
func (s *Server) Query(ctx context.Context, req pb.QueryRequest) (pb.QueryResponse, error) {
func (s *Server) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResponse, error) {
if s.logger == nil {
s.logger = zap.L()
}
rt, err := s.dht.GetRoutingTable(ctx)
if err != nil {
return pb.QueryResponse{}, NodeClientErr.New("could not get routing table %s", err)
return &pb.QueryResponse{}, NodeClientErr.New("could not get routing table %s", err)
}
_, err = s.dht.Ping(ctx, *req.Sender)
if err != nil {
err = rt.ConnectionFailed(req.Sender)
if req.GetPingback() {
_, err = s.dht.Ping(ctx, *req.Sender)
if err != nil {
return pb.QueryResponse{}, NodeClientErr.New("could not respond to connection failed %s", err)
err = rt.ConnectionFailed(req.Sender)
if err != nil {
s.logger.Error("could not respond to connection failed", zap.Error(err))
}
s.logger.Error("connection to node failed", zap.Error(err), zap.String("nodeID", req.Sender.Id))
}
err = rt.ConnectionSuccess(req.Sender)
if err != nil {
s.logger.Error("could not respond to connection success", zap.Error(err))
}
return pb.QueryResponse{}, NodeClientErr.New("connection to node %s failed", req.Sender.Id)
}
err = rt.ConnectionSuccess(req.Sender)
if err != nil {
return pb.QueryResponse{}, NodeClientErr.New("could not respond to connection success %s", err)
}
id := kademlia.StringToNodeID(req.Target.Id)
id := IDFromString(req.Target.Id)
nodes, err := rt.FindNear(id, int(req.Limit))
if err != nil {
return pb.QueryResponse{}, NodeClientErr.New("could not find near %s", err)
return &pb.QueryResponse{}, NodeClientErr.New("could not find near %s", err)
}
return pb.QueryResponse{Sender: req.Sender, Response: nodes}, nil
return &pb.QueryResponse{Sender: req.Sender, Response: nodes}, nil
}

View File

@ -5,7 +5,6 @@ package node
import (
"context"
"errors"
"fmt"
"testing"
@ -37,7 +36,7 @@ func TestQuery(t *testing.T) {
findNear []*pb.Node
limit int
nearErr error
res pb.QueryResponse
res *pb.QueryResponse
err error
}{
{caseName: "ping success, return sender",
@ -50,7 +49,7 @@ func TestQuery(t *testing.T) {
findNear: []*pb.Node{target},
limit: 2,
nearErr: nil,
res: pb.QueryResponse{Sender: sender, Response: []*pb.Node{target}},
res: &pb.QueryResponse{Sender: sender, Response: []*pb.Node{target}},
err: nil,
},
{caseName: "ping success, return nearest",
@ -63,51 +62,12 @@ func TestQuery(t *testing.T) {
findNear: []*pb.Node{sender, node},
limit: 2,
nearErr: nil,
res: pb.QueryResponse{Sender: sender, Response: []*pb.Node{sender, node}},
res: &pb.QueryResponse{Sender: sender, Response: []*pb.Node{sender, node}},
err: nil,
},
{caseName: "ping success, connectionSuccess errors",
rt: mockRT,
getRTErr: nil,
pingNode: *sender,
pingErr: nil,
successErr: errors.New("connection fails error"),
failErr: nil,
findNear: []*pb.Node{},
limit: 2,
nearErr: nil,
res: pb.QueryResponse{},
err: errors.New("query error"),
},
{caseName: "ping fails, return error",
rt: mockRT,
getRTErr: nil,
pingNode: pb.Node{},
pingErr: errors.New("ping err"),
successErr: nil,
failErr: nil,
findNear: []*pb.Node{},
limit: 2,
nearErr: nil,
res: pb.QueryResponse{},
err: errors.New("query error"),
},
{caseName: "ping fails, connectionFailed errors",
rt: mockRT,
getRTErr: nil,
pingNode: pb.Node{},
pingErr: errors.New("ping err"),
successErr: nil,
failErr: errors.New("connection fails error"),
findNear: []*pb.Node{},
limit: 2,
nearErr: nil,
res: pb.QueryResponse{},
err: errors.New("query error"),
},
}
for i, v := range cases {
req := pb.QueryRequest{Sender: sender, Target: &pb.Node{Id: "B"}, Limit: int64(2)}
req := pb.QueryRequest{Pingback: true, Sender: sender, Target: &pb.Node{Id: "B"}, Limit: int64(2)}
mockDHT.EXPECT().GetRoutingTable(gomock.Any()).Return(v.rt, v.getRTErr)
mockDHT.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(v.pingNode, v.pingErr)
if v.pingErr != nil {
@ -118,7 +78,7 @@ func TestQuery(t *testing.T) {
mockRT.EXPECT().FindNear(gomock.Any(), v.limit).Return(v.findNear, v.nearErr)
}
}
res, err := s.Query(context.Background(), req)
res, err := s.Query(context.Background(), &req)
if !assert.Equal(t, v.res, res) {
fmt.Printf("case %s (%v) failed\n", v.caseName, i)
}

View File

@ -13,7 +13,7 @@ import (
"go.uber.org/zap"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
@ -119,7 +119,7 @@ func (o *Cache) Put(nodeID string, value pb.Node) error {
return err
}
return o.DB.Put(kademlia.StringToNodeID(nodeID).Bytes(), data)
return o.DB.Put(node.IDFromString(nodeID).Bytes(), data)
}
// Bootstrap walks the initialized network and populates the cache
@ -130,17 +130,17 @@ func (o *Cache) Bootstrap(ctx context.Context) error {
}
for _, v := range nodes {
found, err := o.DHT.FindNode(ctx, kademlia.StringToNodeID(v.Id))
found, err := o.DHT.FindNode(ctx, node.IDFromString(v.Id))
if err != nil {
zap.Error(ErrNodeNotFound)
}
node, err := proto.Marshal(&found)
n, err := proto.Marshal(&found)
if err != nil {
return err
}
if err := o.DB.Put(kademlia.StringToNodeID(found.Id).Bytes(), node); err != nil {
if err := o.DB.Put(node.IDFromString(found.Id).Bytes(), n); err != nil {
return err
}
}
@ -158,7 +158,7 @@ func (o *Cache) Refresh(ctx context.Context) error {
return err
}
rid := kademlia.NodeID(r)
rid := node.ID(r)
near, err := o.DHT.GetNodes(ctx, rid.String(), 128)
if err != nil {
return err

View File

@ -5,6 +5,7 @@ package overlay
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
@ -18,7 +19,9 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis"
@ -44,18 +47,22 @@ const (
)
func newTestKademlia(t *testing.T, ip, port string, d dht.DHT, b pb.Node) *kademlia.Kademlia {
i, err := kademlia.NewID()
i, err := node.NewID()
assert.NoError(t, err)
id := *i
n := []pb.Node{b}
kad, err := kademlia.NewKademlia(&id, n, ip, port)
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
kad, err := kademlia.NewKademlia(&id, n, fmt.Sprintf("%s:%s", ip, port), identity)
assert.NoError(t, err)
return kad
}
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
bid, err := kademlia.NewID()
bid, err := node.NewID()
assert.NoError(t, err)
bnid := *bid
@ -64,10 +71,15 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
p, err := strconv.Atoi(port)
pm := strconv.Itoa(p)
assert.NoError(t, err)
intro, err := kademlia.GetIntroNode(bnid.String(), ip, pm)
intro, err := kademlia.GetIntroNode(fmt.Sprintf("%s:%s", ip, pm))
assert.NoError(t, err)
boot, err := kademlia.NewKademlia(&bnid, []pb.Node{*intro}, ip, pm)
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
boot, err := kademlia.NewKademlia(&bnid, []pb.Node{*intro}, fmt.Sprintf("%s:%s", ip, pm), identity)
assert.NoError(t, err)
rt, err := boot.GetRoutingTable(context.Background())
@ -83,11 +95,16 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
for i := 0; i < testNetSize; i++ {
gg := strconv.Itoa(p)
nid, err := kademlia.NewID()
nid, err := node.NewID()
assert.NoError(t, err)
id := *nid
dht, err := kademlia.NewKademlia(&id, []pb.Node{bootNode}, ip, gg)
ca, err := provider.NewCA(ctx, 12, 4)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
dht, err := kademlia.NewKademlia(&id, []pb.Node{bootNode}, fmt.Sprintf("%s:%s", ip, gg), identity)
assert.NoError(t, err)
p++

View File

@ -13,7 +13,7 @@ import (
"google.golang.org/grpc"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/storage/redis/redisserver"
@ -212,9 +212,9 @@ func TestBulkLookupV2(t *testing.T) {
},
{testID: "valid ids",
nodeIDs: func() []dht.NodeID {
id1 := kademlia.StringToNodeID("n1")
id2 := kademlia.StringToNodeID("n2")
id3 := kademlia.StringToNodeID("n3")
id1 := node.IDFromString("n1")
id2 := node.IDFromString("n2")
id3 := node.IDFromString("n3")
return []dht.NodeID{id1, id2, id3}
}(),
responses: nodes,
@ -222,8 +222,8 @@ func TestBulkLookupV2(t *testing.T) {
},
{testID: "missing ids",
nodeIDs: func() []dht.NodeID {
id1 := kademlia.StringToNodeID("n4")
id2 := kademlia.StringToNodeID("n5")
id1 := node.IDFromString("n4")
id2 := node.IDFromString("n5")
return []dht.NodeID{id1, id2}
}(),
responses: []*pb.Node{nil, nil},
@ -231,10 +231,10 @@ func TestBulkLookupV2(t *testing.T) {
},
{testID: "random order and nil",
nodeIDs: func() []dht.NodeID {
id1 := kademlia.StringToNodeID("n1")
id2 := kademlia.StringToNodeID("n2")
id3 := kademlia.StringToNodeID("n3")
id4 := kademlia.StringToNodeID("n4")
id1 := node.IDFromString("n1")
id2 := node.IDFromString("n2")
id3 := node.IDFromString("n3")
id4 := node.IDFromString("n4")
return []dht.NodeID{id2, id1, id3, id4}
}(),
responses: func() []*pb.Node {

View File

@ -2,7 +2,7 @@
// Source: storj.io/storj/pkg/overlay (interfaces: Client)
// Package mock_overlay is a generated GoMock package.
package mock_overlay
package mocks
import (
context "context"

View File

@ -1,10 +1,11 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
package mocks
import (
"context"
"fmt"
"strings"
"github.com/zeebo/errs"
@ -13,24 +14,23 @@ import (
"storj.io/storj/pkg/provider"
)
// MockOverlay is a mocked overlay implementation
type MockOverlay struct {
// Overlay is a mocked overlay implementation
type Overlay struct {
nodes map[string]*pb.Node
}
// NewMockOverlay creates a new overlay mock
func NewMockOverlay(nodes []*pb.Node) *MockOverlay {
rv := &MockOverlay{nodes: map[string]*pb.Node{}}
// NewOverlay returns a newly initialized mock overlal
func NewOverlay(nodes []*pb.Node) *Overlay {
rv := &Overlay{nodes: map[string]*pb.Node{}}
for _, node := range nodes {
rv.nodes[node.Id] = node
}
return rv
}
// FindStorageNodes finds storage nodes based on the request
func (mo *MockOverlay) FindStorageNodes(ctx context.Context,
req *pb.FindStorageNodesRequest) (resp *pb.FindStorageNodesResponse,
err error) {
// FindStorageNodes is the mock implementation
func (mo *Overlay) FindStorageNodes(ctx context.Context, req *pb.FindStorageNodesRequest) (resp *pb.FindStorageNodesResponse, err error) {
nodes := make([]*pb.Node, 0, len(mo.nodes))
for _, node := range mo.nodes {
nodes = append(nodes, node)
@ -43,13 +43,13 @@ func (mo *MockOverlay) FindStorageNodes(ctx context.Context,
}
// Lookup finds a single storage node based on the request
func (mo *MockOverlay) Lookup(ctx context.Context, req *pb.LookupRequest) (
func (mo *Overlay) Lookup(ctx context.Context, req *pb.LookupRequest) (
*pb.LookupResponse, error) {
return &pb.LookupResponse{Node: mo.nodes[req.NodeID]}, nil
}
//BulkLookup finds multiple storage nodes based on the requests
func (mo *MockOverlay) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (
func (mo *Overlay) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (
*pb.LookupResponses, error) {
var responses []*pb.LookupResponse
for _, r := range reqs.Lookuprequest {
@ -61,18 +61,18 @@ func (mo *MockOverlay) BulkLookup(ctx context.Context, reqs *pb.LookupRequests)
return &pb.LookupResponses{Lookupresponse: responses}, nil
}
// MockConfig specifies static nodes for mock overlay
type MockConfig struct {
// Config specifies static nodes for mock overlay
type Config struct {
Nodes string `help:"a comma-separated list of <node-id>:<ip>:<port>" default:""`
}
// Run runs server with mock overlay
func (c MockConfig) Run(ctx context.Context, server *provider.Provider) error {
func (c Config) Run(ctx context.Context, server *provider.Provider) error {
var nodes []*pb.Node
for _, nodestr := range strings.Split(c.Nodes, ",") {
parts := strings.SplitN(nodestr, ":", 2)
if len(parts) != 2 {
return Error.New("malformed node config: %#v", nodestr)
return fmt.Errorf("malformed node config: %#v", nodestr)
}
id, addr := parts[0], parts[1]
nodes = append(nodes, &pb.Node{
@ -83,6 +83,6 @@ func (c MockConfig) Run(ctx context.Context, server *provider.Provider) error {
}})
}
pb.RegisterOverlayServer(server.GRPC(), NewMockOverlay(nodes))
pb.RegisterOverlayServer(server.GRPC(), NewOverlay(nodes))
return server.Run(ctx)
}

View File

@ -11,7 +11,8 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/internal/test"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -20,18 +21,18 @@ func TestFindStorageNodes(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
id, err := kademlia.NewID()
id, err := node.NewID()
assert.NoError(t, err)
id2, err := kademlia.NewID()
id2, err := node.NewID()
assert.NoError(t, err)
srv := NewMockServer([]storage.ListItem{
{
Key: storage.Key(id.String()),
Value: NewNodeAddressValue(t, "127.0.0.1:9090"),
Value: test.NewNodeStorageValue(t, "127.0.0.1:9090"),
}, {
Key: storage.Key(id2.String()),
Value: NewNodeAddressValue(t, "127.0.0.1:9090"),
Value: test.NewNodeStorageValue(t, "127.0.0.1:9090"),
},
})
assert.NotNil(t, srv)
@ -54,14 +55,14 @@ func TestOverlayLookup(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
id, err := kademlia.NewID()
id, err := node.NewID()
assert.NoError(t, err)
srv := NewMockServer([]storage.ListItem{
{
Key: storage.Key(id.String()),
Value: NewNodeAddressValue(t, "127.0.0.1:9090"),
Value: test.NewNodeStorageValue(t, "127.0.0.1:9090"),
},
})
go func() { assert.NoError(t, srv.Serve(lis)) }()
@ -80,15 +81,15 @@ func TestOverlayBulkLookup(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
id, err := kademlia.NewID()
id, err := node.NewID()
assert.NoError(t, err)
id2, err := kademlia.NewID()
id2, err := node.NewID()
assert.NoError(t, err)
srv := NewMockServer([]storage.ListItem{
{
Key: storage.Key(id.String()),
Value: NewNodeAddressValue(t, "127.0.0.1:9090"),
Value: test.NewNodeStorageValue(t, "127.0.0.1:9090"),
},
})
go func() { assert.NoError(t, srv.Serve(lis)) }()

View File

@ -4,10 +4,6 @@
package overlay
import (
"testing"
proto "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"google.golang.org/grpc"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
@ -43,12 +39,3 @@ func NewMockServer(items []storage.ListItem) *grpc.Server {
return grpcServer
}
// NewNodeAddressValue provides a convient way to create a storage.Value for testing purposes
func NewNodeAddressValue(t *testing.T, address string) storage.Value {
na := &pb.Node{Id: "", Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: address}}
d, err := proto.Marshal(na)
assert.NoError(t, err)
return d
}

View File

@ -35,7 +35,7 @@ func (m *MetaStreamInfo) Reset() { *m = MetaStreamInfo{} }
func (m *MetaStreamInfo) String() string { return proto.CompactTextString(m) }
func (*MetaStreamInfo) ProtoMessage() {}
func (*MetaStreamInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_026f5a060e38d7ef, []int{0}
return fileDescriptor_3b5ea8fe65782bcc, []int{0}
}
func (m *MetaStreamInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetaStreamInfo.Unmarshal(m, b)
@ -108,9 +108,9 @@ func init() {
proto.RegisterType((*MetaStreamInfo)(nil), "streams.MetaStreamInfo")
}
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_026f5a060e38d7ef) }
func init() { proto.RegisterFile("meta.proto", fileDescriptor_3b5ea8fe65782bcc) }
var fileDescriptor_meta_026f5a060e38d7ef = []byte{
var fileDescriptor_3b5ea8fe65782bcc = []byte{
// 250 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4b, 0xc3, 0x40,
0x10, 0xc5, 0x49, 0xfa, 0x4f, 0x86, 0xda, 0xea, 0x8a, 0xb0, 0xe8, 0x25, 0xe8, 0xc1, 0x20, 0xe2,

View File

@ -34,6 +34,7 @@ const (
var NodeTransport_name = map[int32]string{
0: "TCP",
}
var NodeTransport_value = map[string]int32{
"TCP": 0,
}
@ -41,8 +42,9 @@ var NodeTransport_value = map[string]int32{
func (x NodeTransport) String() string {
return proto.EnumName(NodeTransport_name, int32(x))
}
func (NodeTransport) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{0}
return fileDescriptor_61fc82527fbe24ad, []int{0}
}
// NodeType is an enum of possible node types
@ -57,6 +59,7 @@ var NodeType_name = map[int32]string{
0: "ADMIN",
1: "STORAGE",
}
var NodeType_value = map[string]int32{
"ADMIN": 0,
"STORAGE": 1,
@ -65,8 +68,9 @@ var NodeType_value = map[string]int32{
func (x NodeType) String() string {
return proto.EnumName(NodeType_name, int32(x))
}
func (NodeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{1}
return fileDescriptor_61fc82527fbe24ad, []int{1}
}
type Restriction_Operator int32
@ -86,6 +90,7 @@ var Restriction_Operator_name = map[int32]string{
3: "LTE",
4: "GTE",
}
var Restriction_Operator_value = map[string]int32{
"LT": 0,
"EQ": 1,
@ -97,8 +102,9 @@ var Restriction_Operator_value = map[string]int32{
func (x Restriction_Operator) String() string {
return proto.EnumName(Restriction_Operator_name, int32(x))
}
func (Restriction_Operator) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{13, 0}
return fileDescriptor_61fc82527fbe24ad, []int{13, 0}
}
type Restriction_Operand int32
@ -112,6 +118,7 @@ var Restriction_Operand_name = map[int32]string{
0: "freeBandwidth",
1: "freeDisk",
}
var Restriction_Operand_value = map[string]int32{
"freeBandwidth": 0,
"freeDisk": 1,
@ -120,8 +127,9 @@ var Restriction_Operand_value = map[string]int32{
func (x Restriction_Operand) String() string {
return proto.EnumName(Restriction_Operand_name, int32(x))
}
func (Restriction_Operand) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{13, 1}
return fileDescriptor_61fc82527fbe24ad, []int{13, 1}
}
// LookupRequest is is request message for the lookup rpc call
@ -136,7 +144,7 @@ func (m *LookupRequest) Reset() { *m = LookupRequest{} }
func (m *LookupRequest) String() string { return proto.CompactTextString(m) }
func (*LookupRequest) ProtoMessage() {}
func (*LookupRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{0}
return fileDescriptor_61fc82527fbe24ad, []int{0}
}
func (m *LookupRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupRequest.Unmarshal(m, b)
@ -175,7 +183,7 @@ func (m *LookupResponse) Reset() { *m = LookupResponse{} }
func (m *LookupResponse) String() string { return proto.CompactTextString(m) }
func (*LookupResponse) ProtoMessage() {}
func (*LookupResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{1}
return fileDescriptor_61fc82527fbe24ad, []int{1}
}
func (m *LookupResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupResponse.Unmarshal(m, b)
@ -214,7 +222,7 @@ func (m *LookupRequests) Reset() { *m = LookupRequests{} }
func (m *LookupRequests) String() string { return proto.CompactTextString(m) }
func (*LookupRequests) ProtoMessage() {}
func (*LookupRequests) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{2}
return fileDescriptor_61fc82527fbe24ad, []int{2}
}
func (m *LookupRequests) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupRequests.Unmarshal(m, b)
@ -253,7 +261,7 @@ func (m *LookupResponses) Reset() { *m = LookupResponses{} }
func (m *LookupResponses) String() string { return proto.CompactTextString(m) }
func (*LookupResponses) ProtoMessage() {}
func (*LookupResponses) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{3}
return fileDescriptor_61fc82527fbe24ad, []int{3}
}
func (m *LookupResponses) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupResponses.Unmarshal(m, b)
@ -292,7 +300,7 @@ func (m *FindStorageNodesResponse) Reset() { *m = FindStorageNodesRespon
func (m *FindStorageNodesResponse) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesResponse) ProtoMessage() {}
func (*FindStorageNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{4}
return fileDescriptor_61fc82527fbe24ad, []int{4}
}
func (m *FindStorageNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FindStorageNodesResponse.Unmarshal(m, b)
@ -333,7 +341,7 @@ func (m *FindStorageNodesRequest) Reset() { *m = FindStorageNodesRequest
func (m *FindStorageNodesRequest) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesRequest) ProtoMessage() {}
func (*FindStorageNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{5}
return fileDescriptor_61fc82527fbe24ad, []int{5}
}
func (m *FindStorageNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FindStorageNodesRequest.Unmarshal(m, b)
@ -387,7 +395,7 @@ func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (m *NodeAddress) String() string { return proto.CompactTextString(m) }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{6}
return fileDescriptor_61fc82527fbe24ad, []int{6}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeAddress.Unmarshal(m, b)
@ -437,7 +445,7 @@ func (m *OverlayOptions) Reset() { *m = OverlayOptions{} }
func (m *OverlayOptions) String() string { return proto.CompactTextString(m) }
func (*OverlayOptions) ProtoMessage() {}
func (*OverlayOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{7}
return fileDescriptor_61fc82527fbe24ad, []int{7}
}
func (m *OverlayOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OverlayOptions.Unmarshal(m, b)
@ -503,7 +511,7 @@ func (m *NodeRep) Reset() { *m = NodeRep{} }
func (m *NodeRep) String() string { return proto.CompactTextString(m) }
func (*NodeRep) ProtoMessage() {}
func (*NodeRep) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{8}
return fileDescriptor_61fc82527fbe24ad, []int{8}
}
func (m *NodeRep) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRep.Unmarshal(m, b)
@ -536,7 +544,7 @@ func (m *NodeRestrictions) Reset() { *m = NodeRestrictions{} }
func (m *NodeRestrictions) String() string { return proto.CompactTextString(m) }
func (*NodeRestrictions) ProtoMessage() {}
func (*NodeRestrictions) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{9}
return fileDescriptor_61fc82527fbe24ad, []int{9}
}
func (m *NodeRestrictions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRestrictions.Unmarshal(m, b)
@ -585,7 +593,7 @@ func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{10}
return fileDescriptor_61fc82527fbe24ad, []int{10}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
@ -637,6 +645,7 @@ type QueryRequest struct {
Sender *Node `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"`
Target *Node `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"`
Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
Pingback bool `protobuf:"varint,4,opt,name=pingback,proto3" json:"pingback,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -646,7 +655,7 @@ func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{11}
return fileDescriptor_61fc82527fbe24ad, []int{11}
}
func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryRequest.Unmarshal(m, b)
@ -687,6 +696,13 @@ func (m *QueryRequest) GetLimit() int64 {
return 0
}
func (m *QueryRequest) GetPingback() bool {
if m != nil {
return m.Pingback
}
return false
}
type QueryResponse struct {
Sender *Node `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"`
Response []*Node `protobuf:"bytes,2,rep,name=response,proto3" json:"response,omitempty"`
@ -699,7 +715,7 @@ func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (m *QueryResponse) String() string { return proto.CompactTextString(m) }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{12}
return fileDescriptor_61fc82527fbe24ad, []int{12}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryResponse.Unmarshal(m, b)
@ -746,7 +762,7 @@ func (m *Restriction) Reset() { *m = Restriction{} }
func (m *Restriction) String() string { return proto.CompactTextString(m) }
func (*Restriction) ProtoMessage() {}
func (*Restriction) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_ce394371f276e940, []int{13}
return fileDescriptor_61fc82527fbe24ad, []int{13}
}
func (m *Restriction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Restriction.Unmarshal(m, b)
@ -1016,61 +1032,62 @@ var _Nodes_serviceDesc = grpc.ServiceDesc{
Metadata: "overlay.proto",
}
func init() { proto.RegisterFile("overlay.proto", fileDescriptor_overlay_ce394371f276e940) }
func init() { proto.RegisterFile("overlay.proto", fileDescriptor_61fc82527fbe24ad) }
var fileDescriptor_overlay_ce394371f276e940 = []byte{
// 840 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x6d, 0x6f, 0xe3, 0x44,
0x10, 0xae, 0xf3, 0x9e, 0x49, 0x13, 0x7c, 0xa3, 0xa3, 0x35, 0x11, 0x9c, 0x7a, 0x0b, 0x27, 0x8e,
0x22, 0xe5, 0xa4, 0xdc, 0xa9, 0x52, 0x25, 0x50, 0xd5, 0xd2, 0x52, 0x9d, 0x08, 0x2d, 0xb7, 0xb5,
0x84, 0x84, 0xc4, 0x07, 0x27, 0xde, 0xcb, 0x99, 0x26, 0x5e, 0xb3, 0xbb, 0x3e, 0x08, 0xff, 0x08,
0x89, 0xdf, 0xc4, 0xef, 0xe0, 0x13, 0x42, 0xde, 0x5d, 0x3b, 0xb1, 0xdb, 0x9c, 0xb8, 0x4f, 0xf6,
0xcc, 0x3c, 0xf3, 0xec, 0xbc, 0x43, 0x9f, 0xbf, 0x65, 0x62, 0x11, 0xac, 0x46, 0x89, 0xe0, 0x8a,
0x63, 0xdb, 0x8a, 0xc3, 0x47, 0x73, 0xce, 0xe7, 0x0b, 0xf6, 0x4c, 0xab, 0xa7, 0xe9, 0xeb, 0x67,
0x61, 0x2a, 0x02, 0x15, 0xf1, 0xd8, 0x00, 0xc9, 0xe7, 0xd0, 0x9f, 0x70, 0x7e, 0x9b, 0x26, 0x94,
0xfd, 0x9a, 0x32, 0xa9, 0x70, 0x0f, 0x5a, 0x31, 0x0f, 0xd9, 0xcb, 0x73, 0xcf, 0x39, 0x70, 0x9e,
0x76, 0xa9, 0x95, 0xc8, 0x73, 0x18, 0xe4, 0x40, 0x99, 0xf0, 0x58, 0x32, 0x7c, 0x0c, 0x8d, 0xcc,
0xa6, 0x71, 0xbd, 0x71, 0x7f, 0x94, 0x47, 0x70, 0xc5, 0x43, 0x46, 0xb5, 0x89, 0x5c, 0xad, 0x9d,
0x34, 0xbb, 0xc4, 0xaf, 0xa0, 0xbf, 0xd0, 0x1a, 0x61, 0x34, 0x9e, 0x73, 0x50, 0x7f, 0xda, 0x1b,
0xef, 0x15, 0xde, 0x25, 0x3c, 0x2d, 0x83, 0x09, 0x85, 0x0f, 0xca, 0x41, 0x48, 0x3c, 0x81, 0x41,
0x8e, 0x31, 0x2a, 0xcb, 0xb8, 0x7f, 0x87, 0xd1, 0x98, 0x69, 0x05, 0x4e, 0x4e, 0xc0, 0xfb, 0x36,
0x8a, 0xc3, 0x1b, 0xc5, 0x45, 0x30, 0x67, 0x59, 0xf0, 0xb2, 0x48, 0xf1, 0x53, 0x68, 0x66, 0x79,
0x48, 0xcb, 0x59, 0xc9, 0xd1, 0xd8, 0xc8, 0x9f, 0x0e, 0xec, 0xdf, 0x65, 0x30, 0xd5, 0x7c, 0x04,
0xc0, 0xa7, 0xbf, 0xb0, 0x99, 0xba, 0x89, 0xfe, 0x30, 0x95, 0xaa, 0xd3, 0x0d, 0x0d, 0x9e, 0xc2,
0x60, 0xc6, 0x63, 0x25, 0x82, 0x99, 0x9a, 0xb0, 0x78, 0xae, 0xde, 0x78, 0x35, 0x5d, 0xcd, 0x8f,
0x46, 0xa6, 0x6f, 0xa3, 0xbc, 0x6f, 0xa3, 0x73, 0xdb, 0x37, 0x5a, 0x71, 0xc0, 0x2f, 0xa1, 0xc1,
0x13, 0x25, 0xbd, 0xba, 0x76, 0x5c, 0xa7, 0x7d, 0x6d, 0xbe, 0xd7, 0x49, 0xe6, 0x25, 0xa9, 0x06,
0x91, 0x9f, 0xa1, 0x97, 0xc5, 0x77, 0x1a, 0x86, 0x82, 0x49, 0x89, 0x2f, 0xa0, 0xab, 0x44, 0x10,
0xcb, 0x84, 0x0b, 0xa5, 0xa3, 0x1b, 0x6c, 0x74, 0x22, 0x03, 0xfa, 0xb9, 0x95, 0xae, 0x81, 0xe8,
0x41, 0x3b, 0x30, 0x04, 0x3a, 0xda, 0x2e, 0xcd, 0x45, 0xf2, 0xaf, 0x03, 0x83, 0xf2, 0xbb, 0x78,
0x0c, 0xb0, 0x0c, 0x7e, 0x9f, 0x04, 0x8a, 0xc5, 0xb3, 0x95, 0x9d, 0x95, 0x77, 0x64, 0xb7, 0x01,
0xc6, 0x23, 0xe8, 0x2f, 0xa3, 0x98, 0xb2, 0x24, 0x55, 0xda, 0x68, 0x6b, 0xe3, 0x96, 0xbb, 0xc0,
0x12, 0x5a, 0x86, 0x21, 0x81, 0xdd, 0x65, 0x14, 0xdf, 0x24, 0x8c, 0x85, 0xdf, 0x4d, 0x13, 0x53,
0x99, 0x3a, 0x2d, 0xe9, 0xb2, 0x31, 0x0f, 0x96, 0x3c, 0x8d, 0x95, 0xd7, 0xd0, 0x56, 0x2b, 0xe1,
0xd7, 0xb0, 0x2b, 0x98, 0x54, 0x22, 0x9a, 0xe9, 0xf0, 0xbd, 0xa6, 0x0d, 0xb8, 0xfc, 0xe4, 0x1a,
0x40, 0x4b, 0x70, 0xd2, 0x85, 0xb6, 0x0d, 0x8a, 0xf8, 0xe0, 0x56, 0xc1, 0xf8, 0x19, 0xf4, 0x5f,
0x0b, 0xc6, 0xce, 0x82, 0x38, 0xfc, 0x2d, 0x0a, 0xd5, 0x1b, 0x3b, 0x11, 0x65, 0x25, 0x0e, 0xa1,
0x93, 0x29, 0xce, 0x23, 0x79, 0xab, 0x53, 0xae, 0xd3, 0x42, 0x26, 0x7f, 0x39, 0xd0, 0xc8, 0x68,
0x71, 0x00, 0xb5, 0x28, 0xb4, 0x3b, 0x5a, 0x8b, 0x42, 0x1c, 0x95, 0x9b, 0xd2, 0x1b, 0x3f, 0x2c,
0xc5, 0x6c, 0x3b, 0x5e, 0xb4, 0x0a, 0x9f, 0x40, 0x43, 0xad, 0x12, 0xa6, 0x8b, 0x33, 0x18, 0x3f,
0x28, 0x77, 0x7d, 0x95, 0x30, 0xaa, 0xcd, 0x77, 0xea, 0xd1, 0x78, 0xbf, 0x7a, 0x08, 0xd8, 0x7d,
0x95, 0x32, 0xb1, 0xca, 0xf7, 0xe1, 0x09, 0xb4, 0x24, 0x8b, 0x43, 0x26, 0xee, 0xbf, 0x1a, 0xd6,
0x98, 0xc1, 0x54, 0x20, 0xe6, 0x4c, 0xd9, 0x5c, 0xaa, 0x30, 0x63, 0xc4, 0x87, 0xd0, 0x5c, 0x44,
0xcb, 0x48, 0xd9, 0x0e, 0x1b, 0x81, 0x04, 0xd0, 0xb7, 0x6f, 0xda, 0x2d, 0xfe, 0x9f, 0x8f, 0x7e,
0x01, 0x9d, 0xe2, 0x86, 0xd4, 0xee, 0xdb, 0xf7, 0xc2, 0x4c, 0xfe, 0x71, 0xa0, 0xb7, 0x91, 0x35,
0x1e, 0x43, 0x87, 0x27, 0x4c, 0x04, 0x8a, 0x0b, 0xbb, 0x46, 0x9f, 0x14, 0xae, 0x1b, 0xb8, 0xd1,
0xb5, 0x05, 0xd1, 0x02, 0x8e, 0x47, 0xd0, 0xd6, 0xff, 0x71, 0xa8, 0x73, 0x1d, 0x8c, 0x3f, 0xde,
0xee, 0x19, 0x87, 0x34, 0x07, 0x67, 0xb9, 0xbf, 0x0d, 0x16, 0x29, 0xcb, 0x73, 0xd7, 0x02, 0x79,
0x01, 0x9d, 0xfc, 0x0d, 0x6c, 0x41, 0x6d, 0xe2, 0xbb, 0x3b, 0xd9, 0xf7, 0xe2, 0x95, 0xeb, 0x64,
0xdf, 0x4b, 0xdf, 0xad, 0x61, 0x1b, 0xea, 0x13, 0xff, 0xc2, 0xad, 0x67, 0x3f, 0x97, 0xfe, 0x85,
0xdb, 0x20, 0x87, 0xd0, 0xb6, 0xfc, 0xf8, 0xa0, 0x32, 0xa1, 0xee, 0x0e, 0xee, 0xae, 0xc7, 0xd1,
0x75, 0x0e, 0x3d, 0xe8, 0x97, 0x0e, 0x43, 0xc6, 0xe2, 0x7f, 0xf3, 0x83, 0xbb, 0x73, 0x48, 0xa0,
0x93, 0x0f, 0x0f, 0x76, 0xa1, 0x79, 0x7a, 0xfe, 0xfd, 0xcb, 0x2b, 0x77, 0x07, 0x7b, 0xd0, 0xbe,
0xf1, 0xaf, 0xe9, 0xe9, 0xe5, 0x85, 0xeb, 0x8c, 0xff, 0x76, 0xa0, 0x6d, 0x0f, 0x04, 0x1e, 0x43,
0xcb, 0x9c, 0x66, 0xdc, 0x72, 0xfd, 0x87, 0xdb, 0x6e, 0x38, 0x9e, 0x00, 0x9c, 0xa5, 0x8b, 0x5b,
0xeb, 0xbe, 0x7f, 0xbf, 0xbb, 0x1c, 0x7a, 0x5b, 0xfc, 0x25, 0xfe, 0x08, 0x6e, 0xf5, 0x64, 0xe3,
0x41, 0x81, 0xde, 0x72, 0xcd, 0x87, 0x8f, 0xdf, 0x81, 0x30, 0xcc, 0xe3, 0x13, 0x68, 0x1a, 0xb6,
0x23, 0x68, 0xea, 0x29, 0xc4, 0x0f, 0x0b, 0xa7, 0xcd, 0x4d, 0x18, 0xee, 0x55, 0xd5, 0x86, 0xe0,
0xac, 0xf1, 0x53, 0x2d, 0x99, 0x4e, 0x5b, 0xfa, 0x32, 0x3e, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff,
0xd3, 0xc6, 0x96, 0x47, 0xd7, 0x07, 0x00, 0x00,
var fileDescriptor_61fc82527fbe24ad = []byte{
// 860 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x6d, 0x6f, 0x1b, 0x45,
0x10, 0xce, 0xf9, 0xdd, 0xe3, 0xd8, 0x5c, 0x47, 0x25, 0x39, 0x2c, 0xa8, 0xd2, 0x85, 0x8a, 0x12,
0x24, 0x57, 0x72, 0xab, 0x48, 0x91, 0x40, 0x51, 0x42, 0x42, 0x54, 0x61, 0x12, 0xba, 0xb1, 0x84,
0x84, 0xc4, 0x87, 0xb3, 0x6f, 0xeb, 0x1e, 0xb1, 0x6f, 0x8f, 0xdd, 0xbd, 0x82, 0xf9, 0x11, 0xfc,
0x0f, 0x24, 0x7e, 0x13, 0xbf, 0x83, 0x4f, 0x08, 0xed, 0xcb, 0x9d, 0x7d, 0x4e, 0x5c, 0xc1, 0xa7,
0xf3, 0xcc, 0x3c, 0x33, 0xfb, 0xec, 0x33, 0xb3, 0x63, 0xe8, 0xf2, 0xb7, 0x4c, 0xcc, 0xc3, 0xe5,
0x20, 0x15, 0x5c, 0x71, 0x6c, 0x3a, 0xb3, 0xff, 0x68, 0xc6, 0xf9, 0x6c, 0xce, 0x9e, 0x19, 0xf7,
0x24, 0x7b, 0xfd, 0x2c, 0xca, 0x44, 0xa8, 0x62, 0x9e, 0x58, 0x20, 0xf9, 0x14, 0xba, 0x23, 0xce,
0x6f, 0xb3, 0x94, 0xb2, 0x9f, 0x33, 0x26, 0x15, 0xee, 0x41, 0x23, 0xe1, 0x11, 0x7b, 0x79, 0x1e,
0x78, 0x07, 0xde, 0xd3, 0x36, 0x75, 0x16, 0x79, 0x0e, 0xbd, 0x1c, 0x28, 0x53, 0x9e, 0x48, 0x86,
0x8f, 0xa1, 0xa6, 0x63, 0x06, 0xd7, 0x19, 0x76, 0x07, 0x39, 0x83, 0x2b, 0x1e, 0x31, 0x6a, 0x42,
0xe4, 0x6a, 0x95, 0x64, 0xaa, 0x4b, 0xfc, 0x02, 0xba, 0x73, 0xe3, 0x11, 0xd6, 0x13, 0x78, 0x07,
0xd5, 0xa7, 0x9d, 0xe1, 0x5e, 0x91, 0x5d, 0xc2, 0xd3, 0x32, 0x98, 0x50, 0x78, 0xaf, 0x4c, 0x42,
0xe2, 0x09, 0xf4, 0x72, 0x8c, 0x75, 0xb9, 0x8a, 0xfb, 0x77, 0x2a, 0xda, 0x30, 0xdd, 0x80, 0x93,
0x13, 0x08, 0xbe, 0x8e, 0x93, 0xe8, 0x46, 0x71, 0x11, 0xce, 0x98, 0x26, 0x2f, 0x8b, 0x2b, 0x7e,
0x0c, 0x75, 0x7d, 0x0f, 0xe9, 0x6a, 0x6e, 0xdc, 0xd1, 0xc6, 0xc8, 0x1f, 0x1e, 0xec, 0xdf, 0xad,
0x60, 0xd5, 0x7c, 0x04, 0xc0, 0x27, 0x3f, 0xb1, 0xa9, 0xba, 0x89, 0x7f, 0xb3, 0x4a, 0x55, 0xe9,
0x9a, 0x07, 0x4f, 0xa1, 0x37, 0xe5, 0x89, 0x12, 0xe1, 0x54, 0x8d, 0x58, 0x32, 0x53, 0x6f, 0x82,
0x8a, 0x51, 0xf3, 0x83, 0x81, 0xed, 0xdb, 0x20, 0xef, 0xdb, 0xe0, 0xdc, 0xf5, 0x8d, 0x6e, 0x24,
0xe0, 0xe7, 0x50, 0xe3, 0xa9, 0x92, 0x41, 0xd5, 0x24, 0xae, 0xae, 0x7d, 0x6d, 0xbf, 0xd7, 0xa9,
0xce, 0x92, 0xd4, 0x80, 0xc8, 0x8f, 0xd0, 0xd1, 0xfc, 0x4e, 0xa3, 0x48, 0x30, 0x29, 0xf1, 0x05,
0xb4, 0x95, 0x08, 0x13, 0x99, 0x72, 0xa1, 0x0c, 0xbb, 0xde, 0x5a, 0x27, 0x34, 0x70, 0x9c, 0x47,
0xe9, 0x0a, 0x88, 0x01, 0x34, 0x43, 0x5b, 0xc0, 0xb0, 0x6d, 0xd3, 0xdc, 0x24, 0xff, 0x78, 0xd0,
0x2b, 0x9f, 0x8b, 0xc7, 0x00, 0x8b, 0xf0, 0xd7, 0x51, 0xa8, 0x58, 0x32, 0x5d, 0xba, 0x59, 0x79,
0xc7, 0xed, 0xd6, 0xc0, 0x78, 0x04, 0xdd, 0x45, 0x9c, 0x50, 0x96, 0x66, 0xca, 0x04, 0x9d, 0x36,
0x7e, 0xb9, 0x0b, 0x2c, 0xa5, 0x65, 0x18, 0x12, 0xd8, 0x5d, 0xc4, 0xc9, 0x4d, 0xca, 0x58, 0xf4,
0xcd, 0x24, 0xb5, 0xca, 0x54, 0x69, 0xc9, 0xa7, 0xc7, 0x3c, 0x5c, 0xf0, 0x2c, 0x51, 0x41, 0xcd,
0x44, 0x9d, 0x85, 0x5f, 0xc2, 0xae, 0x60, 0x52, 0x89, 0x78, 0x6a, 0xe8, 0x07, 0x75, 0x47, 0xb8,
0x7c, 0xe4, 0x0a, 0x40, 0x4b, 0x70, 0xd2, 0x86, 0xa6, 0x23, 0x45, 0xc6, 0xe0, 0x6f, 0x82, 0xf1,
0x13, 0xe8, 0xbe, 0x16, 0x8c, 0x9d, 0x85, 0x49, 0xf4, 0x4b, 0x1c, 0xa9, 0x37, 0x6e, 0x22, 0xca,
0x4e, 0xec, 0x43, 0x4b, 0x3b, 0xce, 0x63, 0x79, 0x6b, 0xae, 0x5c, 0xa5, 0x85, 0x4d, 0xfe, 0xf4,
0xa0, 0xa6, 0xcb, 0x62, 0x0f, 0x2a, 0x71, 0xe4, 0xde, 0x68, 0x25, 0x8e, 0x70, 0x50, 0x6e, 0x4a,
0x67, 0xf8, 0xb0, 0xc4, 0xd9, 0x75, 0xbc, 0x68, 0x15, 0x3e, 0x81, 0x9a, 0x5a, 0xa6, 0xcc, 0x88,
0xd3, 0x1b, 0x3e, 0x28, 0x77, 0x7d, 0x99, 0x32, 0x6a, 0xc2, 0x77, 0xf4, 0xa8, 0xfd, 0x3f, 0x3d,
0x7e, 0xf7, 0x60, 0xf7, 0x55, 0xc6, 0xc4, 0x32, 0x7f, 0x10, 0x4f, 0xa0, 0x21, 0x59, 0x12, 0x31,
0x71, 0xff, 0xda, 0x70, 0x41, 0x0d, 0x53, 0xa1, 0x98, 0x31, 0xe5, 0x2e, 0xb3, 0x09, 0xb3, 0x41,
0x7c, 0x08, 0xf5, 0x79, 0xbc, 0x88, 0x95, 0x6b, 0xb1, 0x35, 0xb4, 0x7e, 0x69, 0x9c, 0xcc, 0x26,
0xe1, 0xf4, 0xd6, 0xf0, 0x6d, 0xd1, 0xc2, 0x26, 0x21, 0x74, 0x1d, 0x1f, 0xf7, 0xc4, 0xff, 0x23,
0xa1, 0xcf, 0xa0, 0x55, 0x2c, 0x98, 0xca, 0x7d, 0xcb, 0xa0, 0x08, 0x93, 0xbf, 0x3d, 0xe8, 0xac,
0x49, 0x82, 0xc7, 0xd0, 0xe2, 0x29, 0x13, 0xa1, 0xe2, 0xc2, 0xbd, 0xb1, 0x8f, 0x8a, 0xd4, 0x35,
0xdc, 0xe0, 0xda, 0x81, 0x68, 0x01, 0xc7, 0x23, 0x68, 0x9a, 0xdf, 0x49, 0x64, 0x74, 0xe8, 0x0d,
0x3f, 0xdc, 0x9e, 0x99, 0x44, 0x34, 0x07, 0x6b, 0x5d, 0xde, 0x86, 0xf3, 0x8c, 0xe5, 0xba, 0x18,
0x83, 0xbc, 0x80, 0x56, 0x7e, 0x06, 0x36, 0xa0, 0x32, 0x1a, 0xfb, 0x3b, 0xfa, 0x7b, 0xf1, 0xca,
0xf7, 0xf4, 0xf7, 0x72, 0xec, 0x57, 0xb0, 0x09, 0xd5, 0xd1, 0xf8, 0xc2, 0xaf, 0xea, 0x1f, 0x97,
0xe3, 0x0b, 0xbf, 0x46, 0x0e, 0xa1, 0xe9, 0xea, 0xe3, 0x83, 0x8d, 0xf1, 0xf5, 0x77, 0x70, 0x77,
0x35, 0xab, 0xbe, 0x77, 0x18, 0x40, 0xb7, 0xb4, 0x35, 0x74, 0x95, 0xf1, 0x57, 0xdf, 0xf9, 0x3b,
0x87, 0x04, 0x5a, 0xf9, 0x64, 0x61, 0x1b, 0xea, 0xa7, 0xe7, 0xdf, 0xbe, 0xbc, 0xf2, 0x77, 0xb0,
0x03, 0xcd, 0x9b, 0xf1, 0x35, 0x3d, 0xbd, 0xbc, 0xf0, 0xbd, 0xe1, 0x5f, 0x1e, 0x34, 0xdd, 0xf6,
0xc0, 0x63, 0x68, 0xd8, 0xbd, 0x8d, 0x5b, 0xfe, 0x1a, 0xfa, 0xdb, 0x16, 0x3c, 0x9e, 0x00, 0x9c,
0x65, 0xf3, 0x5b, 0x97, 0xbe, 0x7f, 0x7f, 0xba, 0xec, 0x07, 0x5b, 0xf2, 0x25, 0x7e, 0x0f, 0xfe,
0xe6, 0x3e, 0xc7, 0x83, 0x02, 0xbd, 0x65, 0xd5, 0xf7, 0x1f, 0xbf, 0x03, 0x61, 0x2b, 0x0f, 0x4f,
0xa0, 0x6e, 0xab, 0x1d, 0x41, 0xdd, 0x4c, 0x21, 0xbe, 0x5f, 0x24, 0xad, 0xbf, 0x92, 0xfe, 0xde,
0xa6, 0xdb, 0x16, 0x38, 0xab, 0xfd, 0x50, 0x49, 0x27, 0x93, 0x86, 0x59, 0x9b, 0xcf, 0xff, 0x0d,
0x00, 0x00, 0xff, 0xff, 0x34, 0x43, 0x8e, 0x71, 0xf4, 0x07, 0x00, 0x00,
}

View File

@ -102,6 +102,7 @@ message QueryRequest {
overlay.Node sender = 1;
overlay.Node target = 2;
int64 limit = 3;
bool pingback = 4;
}
message QueryResponse {

View File

@ -35,7 +35,7 @@ func (m *PayerBandwidthAllocation) Reset() { *m = PayerBandwidthAllocati
func (m *PayerBandwidthAllocation) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocation) ProtoMessage() {}
func (*PayerBandwidthAllocation) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{0}
return fileDescriptor_569d535d76469daf, []int{0}
}
func (m *PayerBandwidthAllocation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocation.Unmarshal(m, b)
@ -84,7 +84,7 @@ func (m *PayerBandwidthAllocation_Data) Reset() { *m = PayerBandwidthAll
func (m *PayerBandwidthAllocation_Data) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocation_Data) ProtoMessage() {}
func (*PayerBandwidthAllocation_Data) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{0, 0}
return fileDescriptor_569d535d76469daf, []int{0, 0}
}
func (m *PayerBandwidthAllocation_Data) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocation_Data.Unmarshal(m, b)
@ -151,7 +151,7 @@ func (m *RenterBandwidthAllocation) Reset() { *m = RenterBandwidthAlloca
func (m *RenterBandwidthAllocation) String() string { return proto.CompactTextString(m) }
func (*RenterBandwidthAllocation) ProtoMessage() {}
func (*RenterBandwidthAllocation) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{1}
return fileDescriptor_569d535d76469daf, []int{1}
}
func (m *RenterBandwidthAllocation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenterBandwidthAllocation.Unmarshal(m, b)
@ -197,7 +197,7 @@ func (m *RenterBandwidthAllocation_Data) Reset() { *m = RenterBandwidthA
func (m *RenterBandwidthAllocation_Data) String() string { return proto.CompactTextString(m) }
func (*RenterBandwidthAllocation_Data) ProtoMessage() {}
func (*RenterBandwidthAllocation_Data) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{1, 0}
return fileDescriptor_569d535d76469daf, []int{1, 0}
}
func (m *RenterBandwidthAllocation_Data) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenterBandwidthAllocation_Data.Unmarshal(m, b)
@ -243,7 +243,7 @@ func (m *PieceStore) Reset() { *m = PieceStore{} }
func (m *PieceStore) String() string { return proto.CompactTextString(m) }
func (*PieceStore) ProtoMessage() {}
func (*PieceStore) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{2}
return fileDescriptor_569d535d76469daf, []int{2}
}
func (m *PieceStore) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore.Unmarshal(m, b)
@ -290,7 +290,7 @@ func (m *PieceStore_PieceData) Reset() { *m = PieceStore_PieceData{} }
func (m *PieceStore_PieceData) String() string { return proto.CompactTextString(m) }
func (*PieceStore_PieceData) ProtoMessage() {}
func (*PieceStore_PieceData) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{2, 0}
return fileDescriptor_569d535d76469daf, []int{2, 0}
}
func (m *PieceStore_PieceData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore_PieceData.Unmarshal(m, b)
@ -342,7 +342,7 @@ func (m *PieceId) Reset() { *m = PieceId{} }
func (m *PieceId) String() string { return proto.CompactTextString(m) }
func (*PieceId) ProtoMessage() {}
func (*PieceId) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{3}
return fileDescriptor_569d535d76469daf, []int{3}
}
func (m *PieceId) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceId.Unmarshal(m, b)
@ -382,7 +382,7 @@ func (m *PieceSummary) Reset() { *m = PieceSummary{} }
func (m *PieceSummary) String() string { return proto.CompactTextString(m) }
func (*PieceSummary) ProtoMessage() {}
func (*PieceSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{4}
return fileDescriptor_569d535d76469daf, []int{4}
}
func (m *PieceSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceSummary.Unmarshal(m, b)
@ -435,7 +435,7 @@ func (m *PieceRetrieval) Reset() { *m = PieceRetrieval{} }
func (m *PieceRetrieval) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval) ProtoMessage() {}
func (*PieceRetrieval) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{5}
return fileDescriptor_569d535d76469daf, []int{5}
}
func (m *PieceRetrieval) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval.Unmarshal(m, b)
@ -482,7 +482,7 @@ func (m *PieceRetrieval_PieceData) Reset() { *m = PieceRetrieval_PieceDa
func (m *PieceRetrieval_PieceData) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval_PieceData) ProtoMessage() {}
func (*PieceRetrieval_PieceData) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{5, 0}
return fileDescriptor_569d535d76469daf, []int{5, 0}
}
func (m *PieceRetrieval_PieceData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval_PieceData.Unmarshal(m, b)
@ -535,7 +535,7 @@ func (m *PieceRetrievalStream) Reset() { *m = PieceRetrievalStream{} }
func (m *PieceRetrievalStream) String() string { return proto.CompactTextString(m) }
func (*PieceRetrievalStream) ProtoMessage() {}
func (*PieceRetrievalStream) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{6}
return fileDescriptor_569d535d76469daf, []int{6}
}
func (m *PieceRetrievalStream) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrievalStream.Unmarshal(m, b)
@ -580,7 +580,7 @@ func (m *PieceDelete) Reset() { *m = PieceDelete{} }
func (m *PieceDelete) String() string { return proto.CompactTextString(m) }
func (*PieceDelete) ProtoMessage() {}
func (*PieceDelete) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{7}
return fileDescriptor_569d535d76469daf, []int{7}
}
func (m *PieceDelete) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDelete.Unmarshal(m, b)
@ -618,7 +618,7 @@ func (m *PieceDeleteSummary) Reset() { *m = PieceDeleteSummary{} }
func (m *PieceDeleteSummary) String() string { return proto.CompactTextString(m) }
func (*PieceDeleteSummary) ProtoMessage() {}
func (*PieceDeleteSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{8}
return fileDescriptor_569d535d76469daf, []int{8}
}
func (m *PieceDeleteSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDeleteSummary.Unmarshal(m, b)
@ -657,7 +657,7 @@ func (m *PieceStoreSummary) Reset() { *m = PieceStoreSummary{} }
func (m *PieceStoreSummary) String() string { return proto.CompactTextString(m) }
func (*PieceStoreSummary) ProtoMessage() {}
func (*PieceStoreSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{9}
return fileDescriptor_569d535d76469daf, []int{9}
}
func (m *PieceStoreSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStoreSummary.Unmarshal(m, b)
@ -701,7 +701,7 @@ func (m *StatsReq) Reset() { *m = StatsReq{} }
func (m *StatsReq) String() string { return proto.CompactTextString(m) }
func (*StatsReq) ProtoMessage() {}
func (*StatsReq) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{10}
return fileDescriptor_569d535d76469daf, []int{10}
}
func (m *StatsReq) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatsReq.Unmarshal(m, b)
@ -733,7 +733,7 @@ func (m *StatSummary) Reset() { *m = StatSummary{} }
func (m *StatSummary) String() string { return proto.CompactTextString(m) }
func (*StatSummary) ProtoMessage() {}
func (*StatSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_1cd776e1a43644fb, []int{11}
return fileDescriptor_569d535d76469daf, []int{11}
}
func (m *StatSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatSummary.Unmarshal(m, b)
@ -1057,9 +1057,9 @@ var _PieceStoreRoutes_serviceDesc = grpc.ServiceDesc{
Metadata: "piecestore.proto",
}
func init() { proto.RegisterFile("piecestore.proto", fileDescriptor_piecestore_1cd776e1a43644fb) }
func init() { proto.RegisterFile("piecestore.proto", fileDescriptor_569d535d76469daf) }
var fileDescriptor_piecestore_1cd776e1a43644fb = []byte{
var fileDescriptor_569d535d76469daf = []byte{
// 679 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xdb, 0x4e, 0xdb, 0x4c,
0x10, 0xc6, 0xce, 0x89, 0x4c, 0x02, 0x3f, 0x2c, 0x08, 0x39, 0x16, 0xfc, 0x8a, 0x0c, 0x42, 0x11,

View File

@ -33,6 +33,7 @@ const (
var RedundancyScheme_SchemeType_name = map[int32]string{
0: "RS",
}
var RedundancyScheme_SchemeType_value = map[string]int32{
"RS": 0,
}
@ -40,8 +41,9 @@ var RedundancyScheme_SchemeType_value = map[string]int32{
func (x RedundancyScheme_SchemeType) String() string {
return proto.EnumName(RedundancyScheme_SchemeType_name, int32(x))
}
func (RedundancyScheme_SchemeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{0, 0}
return fileDescriptor_75fef806d28fc810, []int{0, 0}
}
type EncryptionScheme_EncryptionType int32
@ -55,6 +57,7 @@ var EncryptionScheme_EncryptionType_name = map[int32]string{
0: "AESGCM",
1: "SECRETBOX",
}
var EncryptionScheme_EncryptionType_value = map[string]int32{
"AESGCM": 0,
"SECRETBOX": 1,
@ -63,8 +66,9 @@ var EncryptionScheme_EncryptionType_value = map[string]int32{
func (x EncryptionScheme_EncryptionType) String() string {
return proto.EnumName(EncryptionScheme_EncryptionType_name, int32(x))
}
func (EncryptionScheme_EncryptionType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{1, 0}
return fileDescriptor_75fef806d28fc810, []int{1, 0}
}
type Pointer_DataType int32
@ -78,6 +82,7 @@ var Pointer_DataType_name = map[int32]string{
0: "INLINE",
1: "REMOTE",
}
var Pointer_DataType_value = map[string]int32{
"INLINE": 0,
"REMOTE": 1,
@ -86,8 +91,9 @@ var Pointer_DataType_value = map[string]int32{
func (x Pointer_DataType) String() string {
return proto.EnumName(Pointer_DataType_name, int32(x))
}
func (Pointer_DataType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{4, 0}
return fileDescriptor_75fef806d28fc810, []int{4, 0}
}
type RedundancyScheme struct {
@ -107,7 +113,7 @@ func (m *RedundancyScheme) Reset() { *m = RedundancyScheme{} }
func (m *RedundancyScheme) String() string { return proto.CompactTextString(m) }
func (*RedundancyScheme) ProtoMessage() {}
func (*RedundancyScheme) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{0}
return fileDescriptor_75fef806d28fc810, []int{0}
}
func (m *RedundancyScheme) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RedundancyScheme.Unmarshal(m, b)
@ -182,7 +188,7 @@ func (m *EncryptionScheme) Reset() { *m = EncryptionScheme{} }
func (m *EncryptionScheme) String() string { return proto.CompactTextString(m) }
func (*EncryptionScheme) ProtoMessage() {}
func (*EncryptionScheme) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{1}
return fileDescriptor_75fef806d28fc810, []int{1}
}
func (m *EncryptionScheme) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EncryptionScheme.Unmarshal(m, b)
@ -235,7 +241,7 @@ func (m *RemotePiece) Reset() { *m = RemotePiece{} }
func (m *RemotePiece) String() string { return proto.CompactTextString(m) }
func (*RemotePiece) ProtoMessage() {}
func (*RemotePiece) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{2}
return fileDescriptor_75fef806d28fc810, []int{2}
}
func (m *RemotePiece) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RemotePiece.Unmarshal(m, b)
@ -283,7 +289,7 @@ func (m *RemoteSegment) Reset() { *m = RemoteSegment{} }
func (m *RemoteSegment) String() string { return proto.CompactTextString(m) }
func (*RemoteSegment) ProtoMessage() {}
func (*RemoteSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{3}
return fileDescriptor_75fef806d28fc810, []int{3}
}
func (m *RemoteSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RemoteSegment.Unmarshal(m, b)
@ -348,7 +354,7 @@ func (m *Pointer) Reset() { *m = Pointer{} }
func (m *Pointer) String() string { return proto.CompactTextString(m) }
func (*Pointer) ProtoMessage() {}
func (*Pointer) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{4}
return fileDescriptor_75fef806d28fc810, []int{4}
}
func (m *Pointer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Pointer.Unmarshal(m, b)
@ -431,7 +437,7 @@ func (m *PutRequest) Reset() { *m = PutRequest{} }
func (m *PutRequest) String() string { return proto.CompactTextString(m) }
func (*PutRequest) ProtoMessage() {}
func (*PutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{5}
return fileDescriptor_75fef806d28fc810, []int{5}
}
func (m *PutRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PutRequest.Unmarshal(m, b)
@ -485,7 +491,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{6}
return fileDescriptor_75fef806d28fc810, []int{6}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetRequest.Unmarshal(m, b)
@ -537,7 +543,7 @@ func (m *ListRequest) Reset() { *m = ListRequest{} }
func (m *ListRequest) String() string { return proto.CompactTextString(m) }
func (*ListRequest) ProtoMessage() {}
func (*ListRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{7}
return fileDescriptor_75fef806d28fc810, []int{7}
}
func (m *ListRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListRequest.Unmarshal(m, b)
@ -617,7 +623,7 @@ func (m *PutResponse) Reset() { *m = PutResponse{} }
func (m *PutResponse) String() string { return proto.CompactTextString(m) }
func (*PutResponse) ProtoMessage() {}
func (*PutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{8}
return fileDescriptor_75fef806d28fc810, []int{8}
}
func (m *PutResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PutResponse.Unmarshal(m, b)
@ -650,7 +656,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{9}
return fileDescriptor_75fef806d28fc810, []int{9}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetResponse.Unmarshal(m, b)
@ -697,7 +703,7 @@ func (m *ListResponse) Reset() { *m = ListResponse{} }
func (m *ListResponse) String() string { return proto.CompactTextString(m) }
func (*ListResponse) ProtoMessage() {}
func (*ListResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{10}
return fileDescriptor_75fef806d28fc810, []int{10}
}
func (m *ListResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListResponse.Unmarshal(m, b)
@ -744,7 +750,7 @@ func (m *ListResponse_Item) Reset() { *m = ListResponse_Item{} }
func (m *ListResponse_Item) String() string { return proto.CompactTextString(m) }
func (*ListResponse_Item) ProtoMessage() {}
func (*ListResponse_Item) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{10, 0}
return fileDescriptor_75fef806d28fc810, []int{10, 0}
}
func (m *ListResponse_Item) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListResponse_Item.Unmarshal(m, b)
@ -797,7 +803,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteRequest) ProtoMessage() {}
func (*DeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{11}
return fileDescriptor_75fef806d28fc810, []int{11}
}
func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
@ -842,7 +848,7 @@ func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteResponse) ProtoMessage() {}
func (*DeleteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_5845982d5b04ba4c, []int{12}
return fileDescriptor_75fef806d28fc810, []int{12}
}
func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
@ -1061,9 +1067,9 @@ var _PointerDB_serviceDesc = grpc.ServiceDesc{
Metadata: "pointerdb.proto",
}
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_pointerdb_5845982d5b04ba4c) }
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_75fef806d28fc810) }
var fileDescriptor_pointerdb_5845982d5b04ba4c = []byte{
var fileDescriptor_75fef806d28fc810 = []byte{
// 1013 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xdb, 0x6e, 0xdb, 0x46,
0x10, 0x35, 0x75, 0xe7, 0xc8, 0xb2, 0xd9, 0x45, 0xea, 0x30, 0x72, 0x8a, 0x18, 0x2c, 0x5a, 0xb8,

View File

@ -8,8 +8,7 @@ import (
"github.com/mr-tron/base58/base58"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
)
func TestNewPieceID(t *testing.T) {
@ -27,7 +26,7 @@ func TestNewPieceID(t *testing.T) {
func TestDerivePieceID(t *testing.T) {
pid := NewPieceID()
nid, err := kademlia.NewID()
nid, err := node.NewID()
assert.NoError(t, err)
did, err := pid.Derive(nid.Bytes())

View File

@ -61,6 +61,7 @@ func NewClient(identity *provider.FullIdentity, address string, APIKey []byte) (
if err != nil {
return nil, err
}
c, err := clientConnection(address, dialOpt)
if err != nil {

View File

@ -106,6 +106,8 @@ func (s *Server) Put(ctx context.Context, req *pb.PutRequest) (resp *pb.PutRespo
func (s *Server) Get(ctx context.Context, req *pb.GetRequest) (resp *pb.GetResponse, err error) {
defer mon.Task()(&ctx)(&err)
s.logger.Debug("entering pointerdb get")
if err = s.validateAuth(req.GetAPIKey()); err != nil {
return nil, err
}

View File

@ -15,8 +15,11 @@ type ConnectionPool struct {
}
// NewConnectionPool initializes a new in memory pool
func NewConnectionPool() Pool {
return &ConnectionPool{}
func NewConnectionPool() *ConnectionPool {
return &ConnectionPool{
cache: make(map[string]interface{}),
mu: sync.RWMutex{},
}
}
// Add takes a node ID as the key and a node client as the value to store

View File

@ -5,6 +5,7 @@ package pool
import (
"context"
"sync"
"testing"
"github.com/stretchr/testify/assert"
@ -15,14 +16,19 @@ type TestFoo struct {
}
func TestGet(t *testing.T) {
ctx := context.Background()
cases := []struct {
pool ConnectionPool
pool *ConnectionPool
key string
expected TestFoo
expectedError error
}{
{
pool: ConnectionPool{cache: map[string]interface{}{"foo": TestFoo{called: "hoot"}}},
pool: func() *ConnectionPool {
p := NewConnectionPool()
assert.NoError(t, p.Add(ctx, "foo", TestFoo{called: "hoot"}))
return p
}(),
key: "foo",
expected: TestFoo{called: "hoot"},
expectedError: nil,
@ -31,7 +37,7 @@ func TestGet(t *testing.T) {
for i := range cases {
v := &cases[i]
test, err := v.pool.Get(context.Background(), v.key)
test, err := v.pool.Get(ctx, v.key)
assert.Equal(t, v.expectedError, err)
assert.Equal(t, v.expected, test)
}
@ -46,7 +52,9 @@ func TestAdd(t *testing.T) {
expectedError error
}{
{
pool: ConnectionPool{cache: map[string]interface{}{}},
pool: ConnectionPool{
mu: sync.RWMutex{},
cache: map[string]interface{}{}},
key: "foo",
value: TestFoo{called: "hoot"},
expected: TestFoo{called: "hoot"},
@ -74,7 +82,9 @@ func TestRemove(t *testing.T) {
expectedError error
}{
{
pool: ConnectionPool{cache: map[string]interface{}{"foo": TestFoo{called: "hoot"}}},
pool: ConnectionPool{
mu: sync.RWMutex{},
cache: map[string]interface{}{"foo": TestFoo{called: "hoot"}}},
key: "foo",
expected: nil,
expectedError: nil,

View File

@ -239,7 +239,6 @@ func (ic IdentityConfig) Run(ctx context.Context,
return err
}
defer func() { _ = s.Close() }()
zap.S().Infof("Node %s started", s.Identity().ID)
return s.Run(ctx)

View File

@ -41,7 +41,7 @@ func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{0}
return fileDescriptor_a368771650b1cdca, []int{0}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
@ -125,7 +125,7 @@ func (m *NodeStats) Reset() { *m = NodeStats{} }
func (m *NodeStats) String() string { return proto.CompactTextString(m) }
func (*NodeStats) ProtoMessage() {}
func (*NodeStats) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{1}
return fileDescriptor_a368771650b1cdca, []int{1}
}
func (m *NodeStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeStats.Unmarshal(m, b)
@ -186,7 +186,7 @@ func (m *CreateRequest) Reset() { *m = CreateRequest{} }
func (m *CreateRequest) String() string { return proto.CompactTextString(m) }
func (*CreateRequest) ProtoMessage() {}
func (*CreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{2}
return fileDescriptor_a368771650b1cdca, []int{2}
}
func (m *CreateRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateRequest.Unmarshal(m, b)
@ -232,7 +232,7 @@ func (m *CreateResponse) Reset() { *m = CreateResponse{} }
func (m *CreateResponse) String() string { return proto.CompactTextString(m) }
func (*CreateResponse) ProtoMessage() {}
func (*CreateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{3}
return fileDescriptor_a368771650b1cdca, []int{3}
}
func (m *CreateResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateResponse.Unmarshal(m, b)
@ -272,7 +272,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{4}
return fileDescriptor_a368771650b1cdca, []int{4}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetRequest.Unmarshal(m, b)
@ -318,7 +318,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{5}
return fileDescriptor_a368771650b1cdca, []int{5}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetResponse.Unmarshal(m, b)
@ -358,7 +358,7 @@ func (m *UpdateRequest) Reset() { *m = UpdateRequest{} }
func (m *UpdateRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateRequest) ProtoMessage() {}
func (*UpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{6}
return fileDescriptor_a368771650b1cdca, []int{6}
}
func (m *UpdateRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateRequest.Unmarshal(m, b)
@ -404,7 +404,7 @@ func (m *UpdateResponse) Reset() { *m = UpdateResponse{} }
func (m *UpdateResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateResponse) ProtoMessage() {}
func (*UpdateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{7}
return fileDescriptor_a368771650b1cdca, []int{7}
}
func (m *UpdateResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateResponse.Unmarshal(m, b)
@ -444,7 +444,7 @@ func (m *UpdateBatchRequest) Reset() { *m = UpdateBatchRequest{} }
func (m *UpdateBatchRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateBatchRequest) ProtoMessage() {}
func (*UpdateBatchRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{8}
return fileDescriptor_a368771650b1cdca, []int{8}
}
func (m *UpdateBatchRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateBatchRequest.Unmarshal(m, b)
@ -490,7 +490,7 @@ func (m *UpdateBatchResponse) Reset() { *m = UpdateBatchResponse{} }
func (m *UpdateBatchResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateBatchResponse) ProtoMessage() {}
func (*UpdateBatchResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_statdb_a251acf1433ffe9d, []int{9}
return fileDescriptor_a368771650b1cdca, []int{9}
}
func (m *UpdateBatchResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateBatchResponse.Unmarshal(m, b)
@ -709,9 +709,9 @@ var _StatDB_serviceDesc = grpc.ServiceDesc{
Metadata: "statdb.proto",
}
func init() { proto.RegisterFile("statdb.proto", fileDescriptor_statdb_a251acf1433ffe9d) }
func init() { proto.RegisterFile("statdb.proto", fileDescriptor_a368771650b1cdca) }
var fileDescriptor_statdb_a251acf1433ffe9d = []byte{
var fileDescriptor_a368771650b1cdca = []byte{
// 499 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xc1, 0x6a, 0xdb, 0x40,
0x10, 0x45, 0x91, 0xac, 0xc4, 0x23, 0x39, 0x90, 0x71, 0x9b, 0x0a, 0x97, 0x82, 0xaa, 0x50, 0xea,

View File

@ -16,7 +16,7 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
@ -95,7 +95,7 @@ func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.
if err != nil {
return Meta{}, err
}
var path paths.Path
var pointer *pb.Pointer
if !remoteSized {
@ -263,8 +263,8 @@ func (s *segmentStore) Delete(ctx context.Context, path paths.Path) (err error)
func (s *segmentStore) lookupNodes(ctx context.Context, seg *pb.RemoteSegment) (nodes []*pb.Node, err error) {
// Get list of all nodes IDs storing a piece from the segment
var nodeIds []dht.NodeID
for _, p := range seg.GetRemotePieces() {
nodeIds = append(nodeIds, kademlia.StringToNodeID(p.GetNodeId()))
for _, p := range seg.RemotePieces {
nodeIds = append(nodeIds, node.IDFromString(p.GetNodeId()))
}
// Lookup the node info from node IDs
n, err := s.oc.BulkLookup(ctx, nodeIds)

View File

@ -1,5 +1,5 @@
#!/bin/bash
FILES=$(find $PWD -type f ! -path '*vendor/*' \( -iname '*.go' ! -iname "*.pb.go" \))
#!/bin/bash
FILES=$(find $PWD -type f ! -path '*vendor/*' \( -iname '*.go' ! -iname "*.pb.go" \))
for i in $FILES
do
if ! grep -q 'Copyright' <<< "$(head -n 2 "$i")"