Flatten proto definitions into a single package (#360)

* protos: move streams to pb
* protos: move overlay to pb
* protos: move pointerdb to pb
* protos: move piecestore to pb
* fix statdb import naming
This commit is contained in:
Egon Elbre 2018-09-18 07:39:06 +03:00 committed by GitHub
parent f6c48dcb60
commit b6b6111173
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 563 additions and 578 deletions

View File

@ -13,7 +13,7 @@ import (
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/process"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
var (
@ -90,13 +90,13 @@ func cmdAdd(cmd *cobra.Command, args []string) (err error) {
for i, a := range nodes {
zap.S().Infof("adding node ID: %s; Address: %s", i, a)
err := c.Put(i, proto.Node{
err := c.Put(i, pb.Node{
Id: i,
Address: &proto.NodeAddress{
Address: &pb.NodeAddress{
Transport: 0,
Address: a,
},
Restrictions: &proto.NodeRestrictions{
Restrictions: &pb.NodeRestrictions{
FreeBandwidth: 2000000000,
FreeDisk: 2000000000,
},

View File

@ -17,9 +17,9 @@ import (
"github.com/zeebo/errs"
"google.golang.org/grpc"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/rpc/client"
"storj.io/storj/pkg/provider"
pb "storj.io/storj/protos/piecestore"
)
var ctx = context.Background()

View File

@ -12,6 +12,6 @@ You can change the port number with a flag if necessary: `-port=<port-number>`
Afterward, you can use [Bolter](https://github.com/hasit/bolter) or a similar BoltDB viewer to make sure your pointer entries were changed as expected.
If changes are made to `storj.io/storj/protos/pointerdb/pointerdb.proto`, the protobuf file will need to be regenerated by running `go generate` inside `protos/pointerdb`.
If changes are made to `storj.io/storj/pkg/pb/pointerdb.proto`, the protobuf file will need to be regenerated by running `go generate` inside `pkg/pb`.
Tests for this example code can be found in `storj.io/storj/pkg/pointerdb/client_test.go`.

View File

@ -15,10 +15,10 @@ import (
"google.golang.org/grpc/status"
p "storj.io/storj/pkg/paths"
client "storj.io/storj/pkg/pointerdb/pdbclient"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb/pdbclient"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storage/meta"
proto "storj.io/storj/protos/pointerdb"
)
var (
@ -48,7 +48,7 @@ func main() {
os.Exit(1)
}
APIKey := []byte("abc123")
pdbclient, err := client.NewClient(identity, pointerdbClientPort, APIKey)
client, err := pdbclient.NewClient(identity, pointerdbClientPort, APIKey)
if err != nil {
logger.Error("Failed to dial: ", zap.Error(err))
@ -60,13 +60,13 @@ func main() {
// Example parameters to pass into API calls
var path = p.New("fold1/fold2/fold3/file.txt")
pointer := &proto.Pointer{
Type: proto.Pointer_INLINE,
pointer := &pb.Pointer{
Type: pb.Pointer_INLINE,
InlineSegment: []byte("popcorn"),
}
// Example Put1
err = pdbclient.Put(ctx, path, pointer)
err = client.Put(ctx, path, pointer)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("couldn't put pointer in db", zap.Error(err))
@ -75,7 +75,7 @@ func main() {
}
// Example Put2
err = pdbclient.Put(ctx, p.New("fold1/fold2"), pointer)
err = client.Put(ctx, p.New("fold1/fold2"), pointer)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("couldn't put pointer in db", zap.Error(err))
@ -84,7 +84,7 @@ func main() {
}
// Example Get
getRes, err := pdbclient.Get(ctx, path)
getRes, err := client.Get(ctx, path)
if err != nil {
logger.Error("couldn't GET pointer from db", zap.Error(err))
@ -96,7 +96,7 @@ func main() {
// Example List with pagination
prefix := p.New("fold1")
items, more, err := pdbclient.List(ctx, prefix, nil, nil, true, 1, meta.None)
items, more, err := client.List(ctx, prefix, nil, nil, true, 1, meta.None)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("failed to list file paths", zap.Error(err))
@ -109,7 +109,7 @@ func main() {
}
// Example Delete
err = pdbclient.Delete(ctx, path)
err = client.Delete(ctx, path)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("Error in deleteing file from db", zap.Error(err))

View File

@ -7,7 +7,7 @@ import (
"context"
"time"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// NodeID is the unique identifier used for Nodes in the DHT
@ -18,28 +18,28 @@ type NodeID interface {
// DHT is the interface for the DHT in the Storj network
type DHT interface {
GetNodes(ctx context.Context, start string, limit int, restrictions ...proto.Restriction) ([]*proto.Node, error)
GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error)
GetRoutingTable(ctx context.Context) (RoutingTable, error)
Bootstrap(ctx context.Context) error
Ping(ctx context.Context, node proto.Node) (proto.Node, error)
FindNode(ctx context.Context, ID NodeID) (proto.Node, error)
Ping(ctx context.Context, node pb.Node) (pb.Node, error)
FindNode(ctx context.Context, ID NodeID) (pb.Node, error)
Disconnect() error
}
// RoutingTable contains information on nodes we have locally
type RoutingTable interface {
// local params
Local() proto.Node
Local() pb.Node
K() int
CacheSize() int
GetBucket(id string) (bucket Bucket, ok bool)
GetBuckets() ([]Bucket, error)
FindNear(id NodeID, limit int) ([]*proto.Node, error)
FindNear(id NodeID, limit int) ([]*pb.Node, error)
ConnectionSuccess(node *proto.Node) error
ConnectionFailed(node *proto.Node) error
ConnectionSuccess(node *pb.Node) error
ConnectionFailed(node *pb.Node) error
// these are for refreshing
SetBucketTimestamp(id string, now time.Time) error
@ -48,8 +48,8 @@ type RoutingTable interface {
// Bucket is a set of methods to act on kademlia k buckets
type Bucket interface {
Routing() []proto.Node
Cache() []proto.Node
Routing() []pb.Node
Cache() []pb.Node
Midpoint() string
Nodes() []*proto.Node
Nodes() []*pb.Node
}

View File

@ -11,7 +11,7 @@ import (
gomock "github.com/golang/mock/gomock"
dht "storj.io/storj/pkg/dht"
overlay "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// MockDHT is a mock of DHT interface
@ -62,9 +62,9 @@ func (mr *MockDHTMockRecorder) Disconnect() *gomock.Call {
}
// FindNode mocks base method
func (m *MockDHT) FindNode(arg0 context.Context, arg1 dht.NodeID) (overlay.Node, error) {
func (m *MockDHT) FindNode(arg0 context.Context, arg1 dht.NodeID) (pb.Node, error) {
ret := m.ctrl.Call(m, "FindNode", arg0, arg1)
ret0, _ := ret[0].(overlay.Node)
ret0, _ := ret[0].(pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -75,13 +75,13 @@ func (mr *MockDHTMockRecorder) FindNode(arg0, arg1 interface{}) *gomock.Call {
}
// GetNodes mocks base method
func (m *MockDHT) GetNodes(arg0 context.Context, arg1 string, arg2 int, arg3 ...overlay.Restriction) ([]*overlay.Node, error) {
func (m *MockDHT) GetNodes(arg0 context.Context, arg1 string, arg2 int, arg3 ...pb.Restriction) ([]*pb.Node, error) {
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetNodes", varargs...)
ret0, _ := ret[0].([]*overlay.Node)
ret0, _ := ret[0].([]*pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -106,9 +106,9 @@ func (mr *MockDHTMockRecorder) GetRoutingTable(arg0 interface{}) *gomock.Call {
}
// Ping mocks base method
func (m *MockDHT) Ping(arg0 context.Context, arg1 overlay.Node) (overlay.Node, error) {
func (m *MockDHT) Ping(arg0 context.Context, arg1 pb.Node) (pb.Node, error) {
ret := m.ctrl.Call(m, "Ping", arg0, arg1)
ret0, _ := ret[0].(overlay.Node)
ret0, _ := ret[0].(pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -154,7 +154,7 @@ func (mr *MockRoutingTableMockRecorder) CacheSize() *gomock.Call {
}
// ConnectionFailed mocks base method
func (m *MockRoutingTable) ConnectionFailed(arg0 *overlay.Node) error {
func (m *MockRoutingTable) ConnectionFailed(arg0 *pb.Node) error {
ret := m.ctrl.Call(m, "ConnectionFailed", arg0)
ret0, _ := ret[0].(error)
return ret0
@ -166,7 +166,7 @@ func (mr *MockRoutingTableMockRecorder) ConnectionFailed(arg0 interface{}) *gomo
}
// ConnectionSuccess mocks base method
func (m *MockRoutingTable) ConnectionSuccess(arg0 *overlay.Node) error {
func (m *MockRoutingTable) ConnectionSuccess(arg0 *pb.Node) error {
ret := m.ctrl.Call(m, "ConnectionSuccess", arg0)
ret0, _ := ret[0].(error)
return ret0
@ -178,9 +178,9 @@ func (mr *MockRoutingTableMockRecorder) ConnectionSuccess(arg0 interface{}) *gom
}
// FindNear mocks base method
func (m *MockRoutingTable) FindNear(arg0 dht.NodeID, arg1 int) ([]*overlay.Node, error) {
func (m *MockRoutingTable) FindNear(arg0 dht.NodeID, arg1 int) ([]*pb.Node, error) {
ret := m.ctrl.Call(m, "FindNear", arg0, arg1)
ret0, _ := ret[0].([]*overlay.Node)
ret0, _ := ret[0].([]*pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -242,9 +242,9 @@ func (mr *MockRoutingTableMockRecorder) K() *gomock.Call {
}
// Local mocks base method
func (m *MockRoutingTable) Local() overlay.Node {
func (m *MockRoutingTable) Local() pb.Node {
ret := m.ctrl.Call(m, "Local")
ret0, _ := ret[0].(overlay.Node)
ret0, _ := ret[0].(pb.Node)
return ret0
}

View File

@ -3,21 +3,21 @@
package kademlia
import proto "storj.io/storj/protos/overlay"
import "storj.io/storj/pkg/pb"
// KBucket implements the Bucket interface
type KBucket struct {
nodes []*proto.Node
nodes []*pb.Node
}
// Routing __ (TODO) still not entirely sure what the bucket methods are supposed to do
func (b *KBucket) Routing() []proto.Node {
return []proto.Node{}
func (b *KBucket) Routing() []pb.Node {
return []pb.Node{}
}
// Cache __ (TODO) still not entirely sure what the bucket methods are supposed to do
func (b *KBucket) Cache() []proto.Node {
return []proto.Node{}
func (b *KBucket) Cache() []pb.Node {
return []pb.Node{}
}
// Midpoint __ (TODO) still not entirely sure what the bucket methods are supposed to do
@ -26,6 +26,6 @@ func (b *KBucket) Midpoint() string {
}
// Nodes returns the set of all nodes in a bucket
func (b *KBucket) Nodes() []*proto.Node {
func (b *KBucket) Nodes() []*pb.Node {
return b.nodes
}

View File

@ -10,8 +10,8 @@ import (
"github.com/zeebo/errs"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
)
var (
@ -59,7 +59,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
}
// TODO(jt): kademlia should register on server.GRPC() instead of listening
// itself
kad, err := NewKademlia(server.Identity().ID, []proto.Node{*in}, host, port)
kad, err := NewKademlia(server.Identity().ID, []pb.Node{*in}, host, port)
if err != nil {
return err
}

View File

@ -14,19 +14,19 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/dht"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// NodeErr is the class for all errors pertaining to node operations
var NodeErr = errs.Class("node error")
//TODO: shouldn't default to TCP but not sure what to do yet
var defaultTransport = proto.NodeTransport_TCP
var defaultTransport = pb.NodeTransport_TCP
// Kademlia is an implementation of kademlia adhering to the DHT interface.
type Kademlia struct {
routingTable RoutingTable
bootstrapNodes []proto.Node
bootstrapNodes []pb.Node
ip string
port string
stun bool
@ -34,7 +34,7 @@ type Kademlia struct {
}
// NewKademlia returns a newly configured Kademlia instance
func NewKademlia(id dht.NodeID, bootstrapNodes []proto.Node, ip string, port string) (*Kademlia, error) {
func NewKademlia(id dht.NodeID, bootstrapNodes []pb.Node, ip string, port string) (*Kademlia, error) {
if port == "" {
return nil, NodeErr.New("must specify port in request to NewKademlia")
}
@ -88,14 +88,14 @@ func (k Kademlia) Disconnect() error {
// GetNodes returns all nodes from a starting node up to a maximum limit
// stored in the local routing table limiting the result by the specified restrictions
func (k Kademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...proto.Restriction) ([]*proto.Node, error) {
func (k Kademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
if start == "" {
start = k.dht.GetSelfID()
}
nn, err := k.dht.FindNodes(ctx, start, limit)
if err != nil {
return []*proto.Node{}, err
return []*pb.Node{}, err
}
nodes := convertNetworkNodes(nn)
@ -122,41 +122,41 @@ func (k *Kademlia) Bootstrap(ctx context.Context) error {
}
// Ping checks that the provided node is still accessible on the network
func (k *Kademlia) Ping(ctx context.Context, node proto.Node) (proto.Node, error) {
func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error) {
n, err := convertProtoNode(node)
if err != nil {
return proto.Node{}, err
return pb.Node{}, err
}
ok, err := k.dht.Ping(n)
if err != nil {
return proto.Node{}, err
return pb.Node{}, err
}
if !ok {
return proto.Node{}, NodeErr.New("node unavailable")
return pb.Node{}, NodeErr.New("node unavailable")
}
return node, nil
}
// FindNode looks up the provided NodeID first in the local Node, and if it is not found
// begins searching the network for the NodeID. Returns and error if node was not found
func (k *Kademlia) FindNode(ctx context.Context, ID dht.NodeID) (proto.Node, error) {
func (k *Kademlia) FindNode(ctx context.Context, ID dht.NodeID) (pb.Node, error) {
nodes, err := k.dht.FindNode(ID.Bytes())
if err != nil {
return proto.Node{}, err
return pb.Node{}, err
}
for _, v := range nodes {
if string(v.ID) == ID.String() {
return proto.Node{Id: string(v.ID), Address: &proto.NodeAddress{
return pb.Node{Id: string(v.ID), Address: &pb.NodeAddress{
Transport: defaultTransport,
Address: net.JoinHostPort(v.IP.String(), strconv.Itoa(v.Port)),
},
}, nil
}
}
return proto.Node{}, NodeErr.New("node not found")
return pb.Node{}, NodeErr.New("node not found")
}
// ListenAndServe connects the kademlia node to the network and listens for incoming requests
@ -174,7 +174,7 @@ func (k *Kademlia) ListenAndServe() error {
return nil
}
func convertProtoNodes(n []proto.Node) ([]*bkad.NetworkNode, error) {
func convertProtoNodes(n []pb.Node) ([]*bkad.NetworkNode, error) {
nn := make([]*bkad.NetworkNode, len(n))
for i, v := range n {
node, err := convertProtoNode(v)
@ -187,8 +187,8 @@ func convertProtoNodes(n []proto.Node) ([]*bkad.NetworkNode, error) {
return nn, nil
}
func convertNetworkNodes(n []*bkad.NetworkNode) []*proto.Node {
nn := make([]*proto.Node, len(n))
func convertNetworkNodes(n []*bkad.NetworkNode) []*pb.Node {
nn := make([]*pb.Node, len(n))
for i, v := range n {
nn[i] = convertNetworkNode(v)
}
@ -196,14 +196,14 @@ func convertNetworkNodes(n []*bkad.NetworkNode) []*proto.Node {
return nn
}
func convertNetworkNode(v *bkad.NetworkNode) *proto.Node {
return &proto.Node{
func convertNetworkNode(v *bkad.NetworkNode) *pb.Node {
return &pb.Node{
Id: string(v.ID),
Address: &proto.NodeAddress{Transport: defaultTransport, Address: net.JoinHostPort(v.IP.String(), strconv.Itoa(v.Port))},
Address: &pb.NodeAddress{Transport: defaultTransport, Address: net.JoinHostPort(v.IP.String(), strconv.Itoa(v.Port))},
}
}
func convertProtoNode(v proto.Node) (*bkad.NetworkNode, error) {
func convertProtoNode(v pb.Node) (*bkad.NetworkNode, error) {
host, port, err := net.SplitHostPort(v.GetAddress().GetAddress())
if err != nil {
return nil, err
@ -224,7 +224,7 @@ func newID() ([]byte, error) {
}
// GetIntroNode determines the best node to bootstrap a new node onto the network
func GetIntroNode(id, ip, port string) (*proto.Node, error) {
func GetIntroNode(id, ip, port string) (*pb.Node, error) {
addr := "bootstrap.storj.io:8080"
if ip != "" && port != "" {
addr = ip + ":" + port
@ -239,52 +239,52 @@ func GetIntroNode(id, ip, port string) (*proto.Node, error) {
id = string(i)
}
return &proto.Node{
return &pb.Node{
Id: id,
Address: &proto.NodeAddress{
Address: &pb.NodeAddress{
Transport: defaultTransport,
Address: addr,
},
}, nil
}
func restrict(r proto.Restriction, n []*proto.Node) []*proto.Node {
func restrict(r pb.Restriction, n []*pb.Node) []*pb.Node {
oper := r.GetOperand()
op := r.GetOperator()
val := r.GetValue()
var comp int64
results := []*proto.Node{}
results := []*pb.Node{}
for _, v := range n {
switch oper {
case proto.Restriction_freeBandwidth:
case pb.Restriction_freeBandwidth:
comp = v.GetRestrictions().GetFreeBandwidth()
case proto.Restriction_freeDisk:
case pb.Restriction_freeDisk:
comp = v.GetRestrictions().GetFreeDisk()
}
switch op {
case proto.Restriction_EQ:
case pb.Restriction_EQ:
if comp != val {
results = append(results, v)
continue
}
case proto.Restriction_LT:
case pb.Restriction_LT:
if comp < val {
results = append(results, v)
continue
}
case proto.Restriction_LTE:
case pb.Restriction_LTE:
if comp <= val {
results = append(results, v)
continue
}
case proto.Restriction_GT:
case pb.Restriction_GT:
if comp > val {
results = append(results, v)
continue
}
case proto.Restriction_GTE:
case pb.Restriction_GTE:
if comp >= val {
results = append(results, v)
continue

View File

@ -11,16 +11,16 @@ import (
"time"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"github.com/stretchr/testify/assert"
"storj.io/storj/protos/overlay"
)
const (
testNetSize = 20
)
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Node) {
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
bid, err := newID()
assert.NoError(t, err)
@ -33,7 +33,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
intro, err := GetIntroNode(bnid.String(), ip, pm)
assert.NoError(t, err)
boot, err := NewKademlia(&bnid, []overlay.Node{*intro}, ip, pm)
boot, err := NewKademlia(&bnid, []pb.Node{*intro}, ip, pm)
assert.NoError(t, err)
//added bootnode to dhts so it could be closed in defer as well
@ -56,7 +56,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
assert.NoError(t, err)
id := NodeID(nid)
dht, err := NewKademlia(&id, []overlay.Node{bootNode}, ip, gg)
dht, err := NewKademlia(&id, []pb.Node{bootNode}, ip, gg)
assert.NoError(t, err)
p++
@ -71,11 +71,11 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
return dhts, bootNode
}
func newTestKademlia(t *testing.T, ip, port string, b overlay.Node) *Kademlia {
func newTestKademlia(t *testing.T, ip, port string, b pb.Node) *Kademlia {
i, err := newID()
assert.NoError(t, err)
id := NodeID(i)
n := []overlay.Node{b}
n := []pb.Node{b}
kad, err := NewKademlia(&id, n, ip, port)
assert.NoError(t, err)
@ -136,7 +136,7 @@ func TestGetNodes(t *testing.T) {
k *Kademlia
limit int
expectedErr error
restrictions []overlay.Restriction
restrictions []pb.Restriction
}{
{
k: newTestKademlia(t, "127.0.0.1", "6000", bootNode),
@ -221,14 +221,14 @@ func TestPing(t *testing.T) {
cases := []struct {
k *Kademlia
input overlay.Node
input pb.Node
expectedErr error
}{
{
k: newTestKademlia(t, "127.0.0.1", "6000", bootNode),
input: overlay.Node{
input: pb.Node{
Id: rt.Local().Id,
Address: &overlay.NodeAddress{
Address: &pb.NodeAddress{
Transport: defaultTransport,
Address: addr.Address,
},

View File

@ -4,11 +4,11 @@
package kademlia
import (
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
func (rt *RoutingTable) addToReplacementCache(kadBucketID storage.Key, node *proto.Node) {
func (rt *RoutingTable) addToReplacementCache(kadBucketID storage.Key, node *pb.Node) {
bucketID := string(kadBucketID)
nodes := rt.replacementCache[bucketID]
nodes = append(nodes, node)

View File

@ -7,7 +7,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
func TestAddToReplacementCache(t *testing.T) {
@ -16,7 +17,7 @@ func TestAddToReplacementCache(t *testing.T) {
kadBucketID := []byte{255, 255}
node1 := mockNode(string([]byte{233, 255}))
rt.addToReplacementCache(kadBucketID, node1)
assert.Equal(t, []*proto.Node{node1}, rt.replacementCache[string(kadBucketID)])
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[string(kadBucketID)])
kadBucketID2 := []byte{127, 255}
node2 := mockNode(string([]byte{100, 255}))
node3 := mockNode(string([]byte{90, 255}))
@ -24,8 +25,8 @@ func TestAddToReplacementCache(t *testing.T) {
rt.addToReplacementCache(kadBucketID2, node2)
rt.addToReplacementCache(kadBucketID2, node3)
assert.Equal(t, []*proto.Node{node1}, rt.replacementCache[string(kadBucketID)])
assert.Equal(t, []*proto.Node{node2, node3}, rt.replacementCache[string(kadBucketID2)])
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[string(kadBucketID)])
assert.Equal(t, []*pb.Node{node2, node3}, rt.replacementCache[string(kadBucketID2)])
rt.addToReplacementCache(kadBucketID2, node4)
assert.Equal(t, []*proto.Node{node3, node4}, rt.replacementCache[string(kadBucketID2)])
assert.Equal(t, []*pb.Node{node3, node4}, rt.replacementCache[string(kadBucketID2)])
}

View File

@ -13,8 +13,8 @@ import (
"go.uber.org/zap"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/utils"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/storelogger"
@ -32,12 +32,12 @@ var RoutingErr = errs.Class("routing table error")
// RoutingTable implements the RoutingTable interface
type RoutingTable struct {
self *proto.Node
self *pb.Node
kadBucketDB storage.KeyValueStore
nodeBucketDB storage.KeyValueStore
transport *proto.NodeTransport
transport *pb.NodeTransport
mutex *sync.Mutex
replacementCache map[string][]*proto.Node
replacementCache map[string][]*pb.Node
idLength int // kbucket and node id bit length (SHA256) = 256
bucketSize int // max number of nodes stored in a kbucket = 20 (k)
rcBucketSize int // replacementCache bucket max length
@ -53,7 +53,7 @@ type RoutingOptions struct {
}
// NewRoutingTable returns a newly configured instance of a RoutingTable
func NewRoutingTable(localNode *proto.Node, options *RoutingOptions) (*RoutingTable, error) {
func NewRoutingTable(localNode *pb.Node, options *RoutingOptions) (*RoutingTable, error) {
kdb, err := boltdb.New(options.kpath, KademliaBucket)
if err != nil {
return nil, RoutingErr.New("could not create kadBucketDB: %s", err)
@ -63,7 +63,7 @@ func NewRoutingTable(localNode *proto.Node, options *RoutingOptions) (*RoutingTa
if err != nil {
return nil, RoutingErr.New("could not create nodeBucketDB: %s", err)
}
rp := make(map[string][]*proto.Node)
rp := make(map[string][]*pb.Node)
rt := &RoutingTable{
self: localNode,
kadBucketDB: storelogger.New(zap.L(), kdb),
@ -90,7 +90,7 @@ func (rt *RoutingTable) Close() error {
}
// Local returns the local nodes ID
func (rt *RoutingTable) Local() proto.Node {
func (rt *RoutingTable) Local() pb.Node {
return *rt.self
}
@ -144,23 +144,23 @@ func (rt *RoutingTable) GetBuckets() (k []dht.Bucket, err error) {
// FindNear returns the node corresponding to the provided nodeID if present in the routing table
// otherwise returns all Nodes closest via XOR to the provided nodeID up to the provided limit
func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*proto.Node, error) {
func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*pb.Node, error) {
//if id is in the routing table
n, err := rt.nodeBucketDB.Get(id.Bytes())
if n != nil {
ns, err := unmarshalNodes(storage.Keys{id.Bytes()}, []storage.Value{n})
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not unmarshal node %s", err)
return []*pb.Node{}, RoutingErr.New("could not unmarshal node %s", err)
}
return ns, nil
}
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return []*proto.Node{}, RoutingErr.New("could not get key from rt %s", err)
return []*pb.Node{}, RoutingErr.New("could not get key from rt %s", err)
}
// if id is not in the routing table
nodeIDs, err := rt.nodeBucketDB.List(nil, 0)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not get node ids %s", err)
return []*pb.Node{}, RoutingErr.New("could not get node ids %s", err)
}
sortedIDs := sortByXOR(nodeIDs, id.Bytes())
if len(sortedIDs) >= limit {
@ -168,18 +168,18 @@ func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*proto.Node, error
}
ids, serializedNodes, err := rt.getNodesFromIDs(sortedIDs)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not get nodes %s", err)
return []*pb.Node{}, RoutingErr.New("could not get nodes %s", err)
}
unmarshaledNodes, err := unmarshalNodes(ids, serializedNodes)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
return []*pb.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
}
return unmarshaledNodes, nil
}
// ConnectionSuccess updates or adds a node to the routing table when
// a successful connection is made to the node on the network
func (rt *RoutingTable) ConnectionSuccess(node *proto.Node) error {
func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
v, err := rt.nodeBucketDB.Get(storage.Key(node.Id))
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return RoutingErr.New("could not get node %s", err)
@ -200,7 +200,7 @@ func (rt *RoutingTable) ConnectionSuccess(node *proto.Node) error {
// ConnectionFailed removes a node from the routing table when
// a connection fails for the node on the network
func (rt *RoutingTable) ConnectionFailed(node *proto.Node) error {
func (rt *RoutingTable) ConnectionFailed(node *pb.Node) error {
nodeID := storage.Key(node.Id)
bucketID, err := rt.getKBucketID(nodeID)
if err != nil {

View File

@ -9,16 +9,16 @@ import (
"math/rand"
"time"
pb "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/proto"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
// addNode attempts to add a new contact to the routing table
// Requires node not already in table
// Returns true if node was added successfully
func (rt *RoutingTable) addNode(node *proto.Node) (bool, error) {
func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
rt.mutex.Lock()
defer rt.mutex.Unlock()
nodeKey := storage.Key(node.Id)
@ -100,7 +100,7 @@ func (rt *RoutingTable) addNode(node *proto.Node) (bool, error) {
// updateNode will update the node information given that
// the node is already in the routing table.
func (rt *RoutingTable) updateNode(node *proto.Node) error {
func (rt *RoutingTable) updateNode(node *pb.Node) error {
marshaledNode, err := marshalNode(*node)
if err != nil {
return err
@ -141,17 +141,17 @@ func (rt *RoutingTable) removeNode(kadBucketID storage.Key, nodeID storage.Key)
return nil
}
// marshalNode: helper, sanitizes proto Node for db insertion
func marshalNode(node proto.Node) ([]byte, error) {
// marshalNode: helper, sanitizes Node for db insertion
func marshalNode(node pb.Node) ([]byte, error) {
node.Id = "-"
nodeVal, err := pb.Marshal(&node)
nodeVal, err := proto.Marshal(&node)
if err != nil {
return nil, RoutingErr.New("could not marshal proto node: %s", err)
return nil, RoutingErr.New("could not marshal node: %s", err)
}
return nodeVal, nil
}
// putNode: helper, adds or updates proto Node and ID to nodeBucketDB
// putNode: helper, adds or updates Node and ID to nodeBucketDB
func (rt *RoutingTable) putNode(nodeKey storage.Key, nodeValue storage.Value) error {
err := rt.nodeBucketDB.Put(nodeKey, nodeValue)
if err != nil {
@ -318,15 +318,15 @@ func (rt *RoutingTable) getNodesFromIDs(nodeIDs storage.Keys) (storage.Keys, []s
return nodeIDs, nodes, nil
}
// unmarshalNodes: helper, returns slice of reconstructed proto node pointers given a map of nodeIDs:serialized nodes
func unmarshalNodes(nodeIDs storage.Keys, nodes []storage.Value) ([]*proto.Node, error) {
// unmarshalNodes: helper, returns slice of reconstructed node pointers given a map of nodeIDs:serialized nodes
func unmarshalNodes(nodeIDs storage.Keys, nodes []storage.Value) ([]*pb.Node, error) {
if len(nodeIDs) != len(nodes) {
return []*proto.Node{}, RoutingErr.New("length mismatch between nodeIDs and nodes")
return []*pb.Node{}, RoutingErr.New("length mismatch between nodeIDs and nodes")
}
var unmarshaled []*proto.Node
var unmarshaled []*pb.Node
for i, n := range nodes {
node := &proto.Node{}
err := pb.Unmarshal(n, node)
node := &pb.Node{}
err := proto.Unmarshal(n, node)
if err != nil {
return unmarshaled, RoutingErr.New("could not unmarshal node %s", err)
}
@ -336,19 +336,19 @@ func unmarshalNodes(nodeIDs storage.Keys, nodes []storage.Value) ([]*proto.Node,
return unmarshaled, nil
}
// getUnmarshaledNodesFromBucket: helper, gets proto nodes within kbucket
func (rt *RoutingTable) getUnmarshaledNodesFromBucket(bucketID storage.Key) ([]*proto.Node, error) {
// getUnmarshaledNodesFromBucket: helper, gets nodes within kbucket
func (rt *RoutingTable) getUnmarshaledNodesFromBucket(bucketID storage.Key) ([]*pb.Node, error) {
nodeIDs, err := rt.getNodeIDsWithinKBucket(bucketID)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not get nodeIds within kbucket %s", err)
return []*pb.Node{}, RoutingErr.New("could not get nodeIds within kbucket %s", err)
}
ids, serializedNodes, err := rt.getNodesFromIDs(nodeIDs)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not get node values %s", err)
return []*pb.Node{}, RoutingErr.New("could not get node values %s", err)
}
unmarshaledNodes, err := unmarshalNodes(ids, serializedNodes)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
return []*pb.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
}
return unmarshaledNodes, nil
}

View File

@ -10,9 +10,10 @@ import (
"testing"
"time"
pb "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -34,7 +35,7 @@ func createRoutingTable(t *testing.T, localNodeID []byte) (*RoutingTable, func()
if localNodeID == nil {
localNodeID = []byte("AA")
}
localNode := &proto.Node{Id: string(localNodeID)}
localNode := &pb.Node{Id: string(localNodeID)}
options := &RoutingOptions{
kpath: filepath.Join(tempdir, "Kadbucket"),
npath: filepath.Join(tempdir, "Nodebucket"),
@ -55,8 +56,8 @@ func createRoutingTable(t *testing.T, localNodeID []byte) (*RoutingTable, func()
}
}
func mockNode(id string) *proto.Node {
var node proto.Node
func mockNode(id string) *pb.Node {
var node pb.Node
node.Id = id
return &node
}
@ -265,7 +266,7 @@ func TestUpdateNode(t *testing.T) {
x := unmarshaled[0].Address
assert.Nil(t, x)
node.Address = &proto.NodeAddress{Address: "BB"}
node.Address = &pb.NodeAddress{Address: "BB"}
err = rt.updateNode(node)
assert.NoError(t, err)
val, err = rt.nodeBucketDB.Get(storage.Key(node.Id))
@ -512,11 +513,11 @@ func TestGetNodesFromIDs(t *testing.T) {
nodeIDA := []byte(nodeA.Id)
nodeIDB := []byte(nodeB.Id)
nodeIDC := []byte(nodeC.Id)
a, err := pb.Marshal(nodeA)
a, err := proto.Marshal(nodeA)
assert.NoError(t, err)
b, err := pb.Marshal(nodeB)
b, err := proto.Marshal(nodeB)
assert.NoError(t, err)
c, err := pb.Marshal(nodeC)
c, err := proto.Marshal(nodeC)
assert.NoError(t, err)
rt, cleanup := createRoutingTable(t, nodeIDA)
defer cleanup()
@ -541,11 +542,11 @@ func TestUnmarshalNodes(t *testing.T) {
nodeIDA := []byte(nodeA.Id)
nodeIDB := []byte(nodeB.Id)
nodeIDC := []byte(nodeC.Id)
a, err := pb.Marshal(nodeA)
a, err := proto.Marshal(nodeA)
assert.NoError(t, err)
b, err := pb.Marshal(nodeB)
b, err := proto.Marshal(nodeB)
assert.NoError(t, err)
c, err := pb.Marshal(nodeC)
c, err := proto.Marshal(nodeC)
assert.NoError(t, err)
rt, cleanup := createRoutingTable(t, nodeIDA)
defer cleanup()
@ -558,9 +559,9 @@ func TestUnmarshalNodes(t *testing.T) {
assert.NoError(t, err)
nodes, err := unmarshalNodes(ids, values)
assert.NoError(t, err)
expected := []*proto.Node{nodeA, nodeB, nodeC}
expected := []*pb.Node{nodeA, nodeB, nodeC}
for i, v := range expected {
assert.True(t, pb.Equal(v, nodes[i]))
assert.True(t, proto.Equal(v, nodes[i]))
}
}
@ -577,10 +578,10 @@ func TestGetUnmarshaledNodesFromBucket(t *testing.T) {
_, err = rt.addNode(nodeC)
assert.NoError(t, err)
nodes, err := rt.getUnmarshaledNodesFromBucket(bucketID)
expected := []*proto.Node{nodeA, nodeB, nodeC}
expected := []*pb.Node{nodeA, nodeB, nodeC}
assert.NoError(t, err)
for i, v := range expected {
assert.True(t, pb.Equal(v, nodes[i]))
assert.True(t, proto.Equal(v, nodes[i]))
}
}

View File

@ -7,10 +7,10 @@ import (
"testing"
"time"
pb "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -52,18 +52,18 @@ func TestGetBucket(t *testing.T) {
ok bool
}{
{nodeID: node.Id,
expected: &KBucket{nodes: []*proto.Node{node, node2}},
expected: &KBucket{nodes: []*pb.Node{node, node2}},
ok: true,
},
{nodeID: node2.Id,
expected: &KBucket{nodes: []*proto.Node{node, node2}},
expected: &KBucket{nodes: []*pb.Node{node, node2}},
ok: true,
},
}
for i, v := range cases {
b, e := rt.GetBucket(node2.Id)
for j, w := range v.expected.nodes {
if !assert.True(t, pb.Equal(w, b.Nodes()[j])) {
if !assert.True(t, proto.Equal(w, b.Nodes()[j])) {
t.Logf("case %v failed expected: ", i)
}
}
@ -81,12 +81,12 @@ func TestGetBuckets(t *testing.T) {
ok, err := rt.addNode(node2)
assert.True(t, ok)
assert.NoError(t, err)
expected := []*proto.Node{node, node2}
expected := []*pb.Node{node, node2}
buckets, err := rt.GetBuckets()
assert.NoError(t, err)
for _, v := range buckets {
for j, w := range v.Nodes() {
assert.True(t, pb.Equal(expected[j], w))
assert.True(t, proto.Equal(expected[j], w))
}
}
}
@ -99,23 +99,23 @@ func TestFindNear(t *testing.T) {
ok, err := rt.addNode(node2)
assert.True(t, ok)
assert.NoError(t, err)
expected := []*proto.Node{node}
expected := []*pb.Node{node}
nodes, err := rt.FindNear(StringToNodeID(node.Id), 1)
assert.NoError(t, err)
assert.Equal(t, expected, nodes)
node3 := mockNode("CC")
expected = []*proto.Node{node2, node}
expected = []*pb.Node{node2, node}
nodes, err = rt.FindNear(StringToNodeID(node3.Id), 2)
assert.NoError(t, err)
assert.Equal(t, expected, nodes)
expected = []*proto.Node{node2}
expected = []*pb.Node{node2}
nodes, err = rt.FindNear(StringToNodeID(node3.Id), 1)
assert.NoError(t, err)
assert.Equal(t, expected, nodes)
expected = []*proto.Node{node2, node}
expected = []*pb.Node{node2, node}
nodes, err = rt.FindNear(StringToNodeID(node3.Id), 3)
assert.NoError(t, err)
assert.Equal(t, expected, nodes)
@ -127,10 +127,10 @@ func TestConnectionSuccess(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte(id))
defer cleanup()
id2 := "BB"
address1 := &proto.NodeAddress{Address: "a"}
address2 := &proto.NodeAddress{Address: "b"}
node1 := &proto.Node{Id: id, Address: address1}
node2 := &proto.Node{Id: id2, Address: address2}
address1 := &pb.NodeAddress{Address: "a"}
address2 := &pb.NodeAddress{Address: "b"}
node1 := &pb.Node{Id: id, Address: address1}
node2 := &pb.Node{Id: id2, Address: address2}
//Updates node
err := rt.ConnectionSuccess(node1)

View File

@ -7,7 +7,7 @@ import (
"context"
"storj.io/storj/pkg/dht"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// NewMockKademlia returns a newly intialized MockKademlia struct
@ -18,12 +18,12 @@ func NewMockKademlia() *MockKademlia {
// MockKademlia is a mock implementation of the DHT interface used solely for testing
type MockKademlia struct {
RoutingTable dht.RoutingTable
Nodes []*proto.Node
Nodes []*pb.Node
}
// GetNodes increments the GetNodesCalled field on MockKademlia
// returns the Nodes field on MockKademlia
func (k *MockKademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...proto.Restriction) ([]*proto.Node, error) {
func (k *MockKademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
return k.Nodes, nil
}
@ -40,14 +40,14 @@ func (k *MockKademlia) Bootstrap(ctx context.Context) error {
}
// Ping increments the PingCalled field on MockKademlia
func (k *MockKademlia) Ping(ctx context.Context, node proto.Node) (proto.Node, error) {
func (k *MockKademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error) {
return node, nil
}
// FindNode increments the FindNodeCalled field on MockKademlia
//
// returns the local kademlia node
func (k *MockKademlia) FindNode(ctx context.Context, ID dht.NodeID) (proto.Node, error) {
func (k *MockKademlia) FindNode(ctx context.Context, ID dht.NodeID) (pb.Node, error) {
return k.RoutingTable.Local(), nil
}

View File

@ -8,17 +8,17 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pool"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/transport"
proto "storj.io/storj/protos/overlay"
)
//NodeClientErr is the class for all errors pertaining to node client operations
var NodeClientErr = errs.Class("node client error")
// NewNodeClient instantiates a node client
func NewNodeClient(identity *provider.FullIdentity, self proto.Node) (Client, error) {
func NewNodeClient(identity *provider.FullIdentity, self pb.Node) (Client, error) {
client := transport.NewClient(identity)
return &Node{
self: self,
@ -29,5 +29,5 @@ func NewNodeClient(identity *provider.FullIdentity, self proto.Node) (Client, er
// Client is the Node client communication interface
type Client interface {
Lookup(ctx context.Context, to proto.Node, find proto.Node) ([]*proto.Node, error)
Lookup(ctx context.Context, to pb.Node, find pb.Node) ([]*pb.Node, error)
}

View File

@ -7,20 +7,21 @@ import (
"context"
"google.golang.org/grpc"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pool"
"storj.io/storj/pkg/transport"
proto "storj.io/storj/protos/overlay"
)
// Node is the storj definition for a node in the network
type Node struct {
self proto.Node
self pb.Node
tc transport.Client
cache pool.Pool
}
// Lookup queries nodes looking for a particular node in the network
func (n *Node) Lookup(ctx context.Context, to proto.Node, find proto.Node) ([]*proto.Node, error) {
func (n *Node) Lookup(ctx context.Context, to pb.Node, find pb.Node) ([]*pb.Node, error) {
v, err := n.cache.Get(ctx, to.GetId())
if err != nil {
return nil, err
@ -37,8 +38,8 @@ func (n *Node) Lookup(ctx context.Context, to proto.Node, find proto.Node) ([]*p
conn = c
}
c := proto.NewNodesClient(conn)
resp, err := c.Query(ctx, &proto.QueryRequest{Sender: &n.self, Target: &find})
c := pb.NewNodesClient(conn)
resp, err := c.Query(ctx, &pb.QueryRequest{Sender: &n.self, Target: &find})
if err != nil {
return nil, err
}

View File

@ -12,23 +12,23 @@ import (
"google.golang.org/grpc"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
)
var ctx = context.Background()
func TestLookup(t *testing.T) {
cases := []struct {
self proto.Node
to proto.Node
find proto.Node
self pb.Node
to pb.Node
find pb.Node
expectedErr error
}{
{
self: proto.Node{Id: NewNodeID(t), Address: &proto.NodeAddress{Address: ":7070"}},
to: proto.Node{}, // filled after server has been started
find: proto.Node{Id: NewNodeID(t), Address: &proto.NodeAddress{Address: ":9090"}},
self: pb.Node{Id: NewNodeID(t), Address: &pb.NodeAddress{Address: ":7070"}},
to: pb.Node{}, // filled after server has been started
find: pb.Node{Id: NewNodeID(t), Address: &pb.NodeAddress{Address: ":9090"}},
expectedErr: nil,
},
}
@ -37,7 +37,7 @@ func TestLookup(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
v.to = proto.Node{Id: NewNodeID(t), Address: &proto.NodeAddress{Address: lis.Addr().String()}}
v.to = pb.Node{Id: NewNodeID(t), Address: &pb.NodeAddress{Address: lis.Addr().String()}}
srv, mock, err := newTestServer(ctx)
assert.NoError(t, err)
@ -75,7 +75,7 @@ func newTestServer(ctx context.Context) (*grpc.Server, *mockNodeServer, error) {
grpcServer := grpc.NewServer(identOpt)
mn := &mockNodeServer{queryCalled: 0}
proto.RegisterNodesServer(grpcServer, mn)
pb.RegisterNodesServer(grpcServer, mn)
return grpcServer, mn, nil
@ -85,9 +85,9 @@ type mockNodeServer struct {
queryCalled int
}
func (mn *mockNodeServer) Query(ctx context.Context, req *proto.QueryRequest) (*proto.QueryResponse, error) {
func (mn *mockNodeServer) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResponse, error) {
mn.queryCalled++
return &proto.QueryResponse{}, nil
return &pb.QueryResponse{}, nil
}
// NewNodeID returns the string representation of a dht node ID

View File

@ -8,7 +8,7 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// Server implements the grpc Node Server
@ -17,27 +17,27 @@ type Server struct {
}
// Query is a node to node communication query
func (s *Server) Query(ctx context.Context, req proto.QueryRequest) (proto.QueryResponse, error) {
func (s *Server) Query(ctx context.Context, req pb.QueryRequest) (pb.QueryResponse, error) {
rt, err := s.dht.GetRoutingTable(ctx)
if err != nil {
return proto.QueryResponse{}, NodeClientErr.New("could not get routing table %s", err)
return pb.QueryResponse{}, NodeClientErr.New("could not get routing table %s", err)
}
_, err = s.dht.Ping(ctx, *req.Sender)
if err != nil {
err = rt.ConnectionFailed(req.Sender)
if err != nil {
return proto.QueryResponse{}, NodeClientErr.New("could not respond to connection failed %s", err)
return pb.QueryResponse{}, NodeClientErr.New("could not respond to connection failed %s", err)
}
return proto.QueryResponse{}, NodeClientErr.New("connection to node %s failed", req.Sender.Id)
return pb.QueryResponse{}, NodeClientErr.New("connection to node %s failed", req.Sender.Id)
}
err = rt.ConnectionSuccess(req.Sender)
if err != nil {
return proto.QueryResponse{}, NodeClientErr.New("could not respond to connection success %s", err)
return pb.QueryResponse{}, NodeClientErr.New("could not respond to connection success %s", err)
}
id := kademlia.StringToNodeID(req.Target.Id)
nodes, err := rt.FindNear(id, int(req.Limit))
if err != nil {
return proto.QueryResponse{}, NodeClientErr.New("could not find near %s", err)
return pb.QueryResponse{}, NodeClientErr.New("could not find near %s", err)
}
return proto.QueryResponse{Sender: req.Sender, Response: nodes}, nil
return pb.QueryResponse{Sender: req.Sender, Response: nodes}, nil
}

View File

@ -14,7 +14,7 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/dht/mocks"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
func TestQuery(t *testing.T) {
@ -23,21 +23,21 @@ func TestQuery(t *testing.T) {
mockDHT := mock_dht.NewMockDHT(ctrl)
mockRT := mock_dht.NewMockRoutingTable(ctrl)
s := &Server{dht: mockDHT}
sender := &proto.Node{Id: "A"}
target := &proto.Node{Id: "B"}
node := &proto.Node{Id: "C"}
sender := &pb.Node{Id: "A"}
target := &pb.Node{Id: "B"}
node := &pb.Node{Id: "C"}
cases := []struct {
caseName string
rt dht.RoutingTable
getRTErr error
pingNode proto.Node
pingNode pb.Node
pingErr error
successErr error
failErr error
findNear []*proto.Node
findNear []*pb.Node
limit int
nearErr error
res proto.QueryResponse
res pb.QueryResponse
err error
}{
{caseName: "ping success, return sender",
@ -47,10 +47,10 @@ func TestQuery(t *testing.T) {
pingErr: nil,
successErr: nil,
failErr: nil,
findNear: []*proto.Node{target},
findNear: []*pb.Node{target},
limit: 2,
nearErr: nil,
res: proto.QueryResponse{Sender: sender, Response: []*proto.Node{target}},
res: pb.QueryResponse{Sender: sender, Response: []*pb.Node{target}},
err: nil,
},
{caseName: "ping success, return nearest",
@ -60,10 +60,10 @@ func TestQuery(t *testing.T) {
pingErr: nil,
successErr: nil,
failErr: nil,
findNear: []*proto.Node{sender, node},
findNear: []*pb.Node{sender, node},
limit: 2,
nearErr: nil,
res: proto.QueryResponse{Sender: sender, Response: []*proto.Node{sender, node}},
res: pb.QueryResponse{Sender: sender, Response: []*pb.Node{sender, node}},
err: nil,
},
{caseName: "ping success, connectionSuccess errors",
@ -73,41 +73,41 @@ func TestQuery(t *testing.T) {
pingErr: nil,
successErr: errors.New("connection fails error"),
failErr: nil,
findNear: []*proto.Node{},
findNear: []*pb.Node{},
limit: 2,
nearErr: nil,
res: proto.QueryResponse{},
res: pb.QueryResponse{},
err: errors.New("query error"),
},
{caseName: "ping fails, return error",
rt: mockRT,
getRTErr: nil,
pingNode: proto.Node{},
pingNode: pb.Node{},
pingErr: errors.New("ping err"),
successErr: nil,
failErr: nil,
findNear: []*proto.Node{},
findNear: []*pb.Node{},
limit: 2,
nearErr: nil,
res: proto.QueryResponse{},
res: pb.QueryResponse{},
err: errors.New("query error"),
},
{caseName: "ping fails, connectionFailed errors",
rt: mockRT,
getRTErr: nil,
pingNode: proto.Node{},
pingNode: pb.Node{},
pingErr: errors.New("ping err"),
successErr: nil,
failErr: errors.New("connection fails error"),
findNear: []*proto.Node{},
findNear: []*pb.Node{},
limit: 2,
nearErr: nil,
res: proto.QueryResponse{},
res: pb.QueryResponse{},
err: errors.New("query error"),
},
}
for i, v := range cases {
req := proto.QueryRequest{Sender: sender, Target: &proto.Node{Id: "B"}, Limit: int64(2)}
req := pb.QueryRequest{Sender: sender, Target: &pb.Node{Id: "B"}, Limit: int64(2)}
mockDHT.EXPECT().GetRoutingTable(gomock.Any()).Return(v.rt, v.getRTErr)
mockDHT.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(v.pingNode, v.pingErr)
if v.pingErr != nil {

View File

@ -14,7 +14,7 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis"
@ -65,7 +65,7 @@ func NewBoltOverlayCache(dbPath string, DHT dht.DHT) (*Cache, error) {
}
// Get looks up the provided nodeID from the overlay cache
func (o *Cache) Get(ctx context.Context, key string) (*overlay.Node, error) {
func (o *Cache) Get(ctx context.Context, key string) (*pb.Node, error) {
b, err := o.DB.Get([]byte(key))
if err != nil {
return nil, err
@ -75,7 +75,7 @@ func (o *Cache) Get(ctx context.Context, key string) (*overlay.Node, error) {
return nil, nil
}
na := &overlay.Node{}
na := &pb.Node{}
if err := proto.Unmarshal(b, na); err != nil {
return nil, err
}
@ -84,7 +84,7 @@ func (o *Cache) Get(ctx context.Context, key string) (*overlay.Node, error) {
}
// GetAll looks up the provided nodeIDs from the overlay cache
func (o *Cache) GetAll(ctx context.Context, keys []string) ([]*overlay.Node, error) {
func (o *Cache) GetAll(ctx context.Context, keys []string) ([]*pb.Node, error) {
if len(keys) == 0 {
return nil, OverlayError.New("no keys provided")
}
@ -96,13 +96,13 @@ func (o *Cache) GetAll(ctx context.Context, keys []string) ([]*overlay.Node, err
if err != nil {
return nil, err
}
var ns []*overlay.Node
var ns []*pb.Node
for _, v := range vs {
if v == nil {
ns = append(ns, nil)
continue
}
na := &overlay.Node{}
na := &pb.Node{}
err := proto.Unmarshal(v, na)
if err != nil {
return nil, OverlayError.New("could not unmarshal non-nil node: %v", err)
@ -113,7 +113,7 @@ func (o *Cache) GetAll(ctx context.Context, keys []string) ([]*overlay.Node, err
}
// Put adds a nodeID to the redis cache with a binary representation of proto defined Node
func (o *Cache) Put(nodeID string, value overlay.Node) error {
func (o *Cache) Put(nodeID string, value pb.Node) error {
data, err := proto.Marshal(&value)
if err != nil {
return err

View File

@ -18,7 +18,7 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis"
@ -32,8 +32,8 @@ var (
)
type dbClient int
type responses map[dbClient]*overlay.Node
type responsesB map[dbClient][]*overlay.Node
type responses map[dbClient]*pb.Node
type responsesB map[dbClient][]*pb.Node
type errors map[dbClient]*errs.Class
const (
@ -43,18 +43,18 @@ const (
testNetSize = 30
)
func newTestKademlia(t *testing.T, ip, port string, d dht.DHT, b overlay.Node) *kademlia.Kademlia {
func newTestKademlia(t *testing.T, ip, port string, d dht.DHT, b pb.Node) *kademlia.Kademlia {
i, err := kademlia.NewID()
assert.NoError(t, err)
id := *i
n := []overlay.Node{b}
n := []pb.Node{b}
kad, err := kademlia.NewKademlia(&id, n, ip, port)
assert.NoError(t, err)
return kad
}
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Node) {
func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, pb.Node) {
bid, err := kademlia.NewID()
assert.NoError(t, err)
@ -67,7 +67,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
intro, err := kademlia.GetIntroNode(bnid.String(), ip, pm)
assert.NoError(t, err)
boot, err := kademlia.NewKademlia(&bnid, []overlay.Node{*intro}, ip, pm)
boot, err := kademlia.NewKademlia(&bnid, []pb.Node{*intro}, ip, pm)
assert.NoError(t, err)
rt, err := boot.GetRoutingTable(context.Background())
@ -87,7 +87,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
assert.NoError(t, err)
id := *nid
dht, err := kademlia.NewKademlia(&id, []overlay.Node{bootNode}, ip, gg)
dht, err := kademlia.NewKademlia(&id, []pb.Node{bootNode}, ip, gg)
assert.NoError(t, err)
p++
@ -115,7 +115,7 @@ var (
expectedTimesCalled: 1,
key: "foo",
expectedResponses: func() responses {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
return responses{
mock: na,
bolt: na,
@ -130,7 +130,7 @@ var (
data: []storage.ListItem{{
Key: storage.Key("foo"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -143,7 +143,7 @@ var (
expectedTimesCalled: 1,
key: "error",
expectedResponses: func() responses {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
return responses{
mock: nil,
bolt: na,
@ -158,7 +158,7 @@ var (
data: []storage.ListItem{{
Key: storage.Key("error"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -184,7 +184,7 @@ var (
data: []storage.ListItem{{
Key: storage.Key("foo"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -206,8 +206,8 @@ var (
expectedTimesCalled: 1,
keys: []string{"key1"},
expectedResponses: func() responsesB {
n1 := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
ns := []*overlay.Node{n1}
n1 := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
ns := []*pb.Node{n1}
return responsesB{
mock: ns,
bolt: ns,
@ -223,7 +223,7 @@ var (
{
Key: storage.Key("key1"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -237,9 +237,9 @@ var (
expectedTimesCalled: 1,
keys: []string{"key1", "key2"},
expectedResponses: func() responsesB {
n1 := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
n2 := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9998"}}
ns := []*overlay.Node{n1, n2}
n1 := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
n2 := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9998"}}
ns := []*pb.Node{n1, n2}
return responsesB{
mock: ns,
bolt: ns,
@ -255,7 +255,7 @@ var (
{
Key: storage.Key("key1"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -265,7 +265,7 @@ var (
}, {
Key: storage.Key("key2"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9998"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9998"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -279,8 +279,8 @@ var (
expectedTimesCalled: 1,
keys: []string{"key1", "key3"},
expectedResponses: func() responsesB {
n1 := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
ns := []*overlay.Node{n1, nil}
n1 := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
ns := []*pb.Node{n1, nil}
return responsesB{
mock: ns,
bolt: ns,
@ -296,7 +296,7 @@ var (
{
Key: storage.Key("key1"),
Value: func() storage.Value {
na := &overlay.Node{Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
na := &pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}}
d, err := proto.Marshal(na)
if err != nil {
panic(err)
@ -310,7 +310,7 @@ var (
expectedTimesCalled: 1,
keys: []string{"", ""},
expectedResponses: func() responsesB {
ns := []*overlay.Node{nil, nil}
ns := []*pb.Node{nil, nil}
return responsesB{
mock: ns,
bolt: ns,
@ -344,7 +344,7 @@ var (
testID string
expectedTimesCalled int
key string
value overlay.Node
value pb.Node
expectedErrors errors
data []storage.ListItem
}{
@ -352,7 +352,7 @@ var (
testID: "valid Put",
expectedTimesCalled: 1,
key: "foo",
value: overlay.Node{Id: "foo", Address: &overlay.NodeAddress{Transport: overlay.NodeTransport_TCP, Address: "127.0.0.1:9999"}},
value: pb.Node{Id: "foo", Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: "127.0.0.1:9999"}},
expectedErrors: errors{
mock: nil,
bolt: nil,
@ -473,7 +473,7 @@ func TestRedisPut(t *testing.T) {
v, err := db.Get([]byte(c.key))
assert.NoError(t, err)
na := &overlay.Node{}
na := &pb.Node{}
assert.NoError(t, proto.Unmarshal(v, na))
assert.True(t, proto.Equal(na, &c.value))
})
@ -520,7 +520,7 @@ func TestBoltPut(t *testing.T) {
v, err := db.Get([]byte(c.key))
assert.NoError(t, err)
na := &overlay.Node{}
na := &pb.Node{}
assert.NoError(t, proto.Unmarshal(v, na))
assert.True(t, proto.Equal(na, &c.value))
@ -593,7 +593,7 @@ func TestMockPut(t *testing.T) {
v, err := db.Get(storage.Key(c.key))
assert.NoError(t, err)
na := &overlay.Node{}
na := &pb.Node{}
assert.NoError(t, proto.Unmarshal(v, na))
assert.True(t, proto.Equal(na, &c.value))
})

View File

@ -9,8 +9,8 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
)
// Client is the interface that defines an overlay client.
@ -26,14 +26,14 @@ var ClientError = errs.Class("Client Error")
//Client implements the Overlay Client interface
type Client interface {
Choose(ctx context.Context, limit int, space int64) ([]*proto.Node, error)
Lookup(ctx context.Context, nodeID dht.NodeID) (*proto.Node, error)
BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*proto.Node, error)
Choose(ctx context.Context, limit int, space int64) ([]*pb.Node, error)
Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error)
BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error)
}
// Overlay is the overlay concrete implementation of the client interface
type Overlay struct {
client proto.OverlayClient
client pb.OverlayClient
}
// NewOverlayClient returns a new intialized Overlay Client
@ -56,10 +56,10 @@ func NewOverlayClient(identity *provider.FullIdentity, address string) (*Overlay
var _ Client = (*Overlay)(nil)
// Choose implements the client.Choose interface
func (o *Overlay) Choose(ctx context.Context, amount int, space int64) ([]*proto.Node, error) {
func (o *Overlay) Choose(ctx context.Context, amount int, space int64) ([]*pb.Node, error) {
// TODO(coyle): We will also need to communicate with the reputation service here
resp, err := o.client.FindStorageNodes(ctx, &proto.FindStorageNodesRequest{
Opts: &proto.OverlayOptions{Amount: int64(amount), Restrictions: &proto.NodeRestrictions{
resp, err := o.client.FindStorageNodes(ctx, &pb.FindStorageNodesRequest{
Opts: &pb.OverlayOptions{Amount: int64(amount), Restrictions: &pb.NodeRestrictions{
FreeDisk: space,
}},
})
@ -71,8 +71,8 @@ func (o *Overlay) Choose(ctx context.Context, amount int, space int64) ([]*proto
}
// Lookup provides a Node with the given ID
func (o *Overlay) Lookup(ctx context.Context, nodeID dht.NodeID) (*proto.Node, error) {
resp, err := o.client.Lookup(ctx, &proto.LookupRequest{NodeID: nodeID.String()})
func (o *Overlay) Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error) {
resp, err := o.client.Lookup(ctx, &pb.LookupRequest{NodeID: nodeID.String()})
if err != nil {
return nil, err
}
@ -81,10 +81,10 @@ func (o *Overlay) Lookup(ctx context.Context, nodeID dht.NodeID) (*proto.Node, e
}
//BulkLookup provides a list of Nodes with the given IDs
func (o *Overlay) BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*proto.Node, error) {
var reqs proto.LookupRequests
func (o *Overlay) BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error) {
var reqs pb.LookupRequests
for _, v := range nodeIDs {
reqs.Lookuprequest = append(reqs.Lookuprequest, &proto.LookupRequest{NodeID: v.String()})
reqs.Lookuprequest = append(reqs.Lookuprequest, &pb.LookupRequest{NodeID: v.String()})
}
resp, err := o.client.BulkLookup(ctx, &reqs)
@ -92,7 +92,7 @@ func (o *Overlay) BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*prot
return nil, ClientError.Wrap(err)
}
var nodes []*proto.Node
var nodes []*pb.Node
for _, v := range resp.Lookupresponse {
nodes = append(nodes, v.Node)
}

View File

@ -14,8 +14,8 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/storage/redis/redisserver"
)
@ -191,10 +191,10 @@ func TestBulkLookupV2(t *testing.T) {
assert.NotNil(t, oc)
assert.NotEmpty(t, oc.client)
n1 := &proto.Node{Id: "n1"}
n2 := &proto.Node{Id: "n2"}
n3 := &proto.Node{Id: "n3"}
nodes := []*proto.Node{n1, n2, n3}
n1 := &pb.Node{Id: "n1"}
n2 := &pb.Node{Id: "n2"}
n3 := &pb.Node{Id: "n3"}
nodes := []*pb.Node{n1, n2, n3}
for _, n := range nodes {
assert.NoError(t, s.cache.Put(n.Id, *n))
}
@ -202,7 +202,7 @@ func TestBulkLookupV2(t *testing.T) {
cases := []struct {
testID string
nodeIDs []dht.NodeID
responses []*proto.Node
responses []*pb.Node
errors *errs.Class
}{
{testID: "empty id",
@ -226,7 +226,7 @@ func TestBulkLookupV2(t *testing.T) {
id2 := kademlia.StringToNodeID("n5")
return []dht.NodeID{id1, id2}
}(),
responses: []*proto.Node{nil, nil},
responses: []*pb.Node{nil, nil},
errors: nil,
},
{testID: "random order and nil",
@ -237,8 +237,8 @@ func TestBulkLookupV2(t *testing.T) {
id4 := kademlia.StringToNodeID("n4")
return []dht.NodeID{id2, id1, id3, id4}
}(),
responses: func() []*proto.Node {
return []*proto.Node{nodes[1], nodes[0], nodes[2], nil}
responses: func() []*pb.Node {
return []*pb.Node{nodes[1], nodes[0], nodes[2], nil}
}(),
errors: nil,
},
@ -273,7 +273,7 @@ func newServer(ctx context.Context, redisAddr string) (*grpc.Server, *Server, er
}
s := &Server{cache: cache}
proto.RegisterOverlayServer(grpcServer, s)
pb.RegisterOverlayServer(grpcServer, s)
return grpcServer, s, nil
}
@ -295,7 +295,7 @@ func newTestServer(ctx context.Context) (*grpc.Server, *mockOverlayServer, error
grpcServer := grpc.NewServer(identOpt)
mo := &mockOverlayServer{lookupCalled: 0, FindStorageNodesCalled: 0}
proto.RegisterOverlayServer(grpcServer, mo)
pb.RegisterOverlayServer(grpcServer, mo)
return grpcServer, mo, nil
@ -307,17 +307,17 @@ type mockOverlayServer struct {
FindStorageNodesCalled int
}
func (o *mockOverlayServer) Lookup(ctx context.Context, req *proto.LookupRequest) (*proto.LookupResponse, error) {
func (o *mockOverlayServer) Lookup(ctx context.Context, req *pb.LookupRequest) (*pb.LookupResponse, error) {
o.lookupCalled++
return &proto.LookupResponse{}, nil
return &pb.LookupResponse{}, nil
}
func (o *mockOverlayServer) FindStorageNodes(ctx context.Context, req *proto.FindStorageNodesRequest) (*proto.FindStorageNodesResponse, error) {
func (o *mockOverlayServer) FindStorageNodes(ctx context.Context, req *pb.FindStorageNodesRequest) (*pb.FindStorageNodesResponse, error) {
o.FindStorageNodesCalled++
return &proto.FindStorageNodesResponse{}, nil
return &pb.FindStorageNodesResponse{}, nil
}
func (o *mockOverlayServer) BulkLookup(ctx context.Context, reqs *proto.LookupRequests) (*proto.LookupResponses, error) {
func (o *mockOverlayServer) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (*pb.LookupResponses, error) {
o.bulkLookupCalled++
return &proto.LookupResponses{}, nil
return &pb.LookupResponses{}, nil
}

View File

@ -14,9 +14,9 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/utils"
proto "storj.io/storj/protos/overlay"
)
var (
@ -95,7 +95,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
}
}()
proto.RegisterOverlayServer(server.GRPC(), &Server{
pb.RegisterOverlayServer(server.GRPC(), &Server{
dht: kad,
cache: cache,

View File

@ -5,23 +5,22 @@ package overlay
import (
"context"
// "fmt"
"strings"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
)
// MockOverlay is a mocked overlay implementation
type MockOverlay struct {
nodes map[string]*proto.Node
nodes map[string]*pb.Node
}
// NewMockOverlay creates a new overlay mock
func NewMockOverlay(nodes []*proto.Node) *MockOverlay {
rv := &MockOverlay{nodes: map[string]*proto.Node{}}
func NewMockOverlay(nodes []*pb.Node) *MockOverlay {
rv := &MockOverlay{nodes: map[string]*pb.Node{}}
for _, node := range nodes {
rv.nodes[node.Id] = node
}
@ -30,9 +29,9 @@ func NewMockOverlay(nodes []*proto.Node) *MockOverlay {
// FindStorageNodes finds storage nodes based on the request
func (mo *MockOverlay) FindStorageNodes(ctx context.Context,
req *proto.FindStorageNodesRequest) (resp *proto.FindStorageNodesResponse,
req *pb.FindStorageNodesRequest) (resp *pb.FindStorageNodesResponse,
err error) {
nodes := make([]*proto.Node, 0, len(mo.nodes))
nodes := make([]*pb.Node, 0, len(mo.nodes))
for _, node := range mo.nodes {
nodes = append(nodes, node)
}
@ -40,25 +39,25 @@ func (mo *MockOverlay) FindStorageNodes(ctx context.Context,
return nil, errs.New("not enough farmers exist")
}
nodes = nodes[:req.Opts.GetAmount()]
return &proto.FindStorageNodesResponse{Nodes: nodes}, nil
return &pb.FindStorageNodesResponse{Nodes: nodes}, nil
}
// Lookup finds a single storage node based on the request
func (mo *MockOverlay) Lookup(ctx context.Context, req *proto.LookupRequest) (
*proto.LookupResponse, error) {
return &proto.LookupResponse{Node: mo.nodes[req.NodeID]}, nil
func (mo *MockOverlay) Lookup(ctx context.Context, req *pb.LookupRequest) (
*pb.LookupResponse, error) {
return &pb.LookupResponse{Node: mo.nodes[req.NodeID]}, nil
}
//BulkLookup finds multiple storage nodes based on the requests
func (mo *MockOverlay) BulkLookup(ctx context.Context, reqs *proto.LookupRequests) (
*proto.LookupResponses, error) {
var responses []*proto.LookupResponse
func (mo *MockOverlay) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (
*pb.LookupResponses, error) {
var responses []*pb.LookupResponse
for _, r := range reqs.Lookuprequest {
n := *mo.nodes[r.NodeID]
resp := &proto.LookupResponse{Node: &n}
resp := &pb.LookupResponse{Node: &n}
responses = append(responses, resp)
}
return &proto.LookupResponses{Lookupresponse: responses}, nil
}
return &pb.LookupResponses{Lookupresponse: responses}, nil
}
// MockConfig specifies static nodes for mock overlay
@ -68,21 +67,21 @@ type MockConfig struct {
// Run runs server with mock overlay
func (c MockConfig) Run(ctx context.Context, server *provider.Provider) error {
var nodes []*proto.Node
var nodes []*pb.Node
for _, nodestr := range strings.Split(c.Nodes, ",") {
parts := strings.SplitN(nodestr, ":", 2)
if len(parts) != 2 {
return Error.New("malformed node config: %#v", nodestr)
}
id, addr := parts[0], parts[1]
nodes = append(nodes, &proto.Node{
nodes = append(nodes, &pb.Node{
Id: id,
Address: &proto.NodeAddress{
Transport: proto.NodeTransport_TCP,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP,
Address: addr,
}})
}
proto.RegisterOverlayServer(server.GRPC(), NewMockOverlay(nodes))
pb.RegisterOverlayServer(server.GRPC(), NewMockOverlay(nodes))
return server.Run(ctx)
}

View File

@ -6,10 +6,11 @@ package mock_overlay
import (
context "context"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
dht "storj.io/storj/pkg/dht"
overlay "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// MockClient is a mock of Client interface
@ -36,9 +37,9 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
}
// BulkLookup mocks base method
func (m *MockClient) BulkLookup(arg0 context.Context, arg1 []dht.NodeID) ([]*overlay.Node, error) {
func (m *MockClient) BulkLookup(arg0 context.Context, arg1 []dht.NodeID) ([]*pb.Node, error) {
ret := m.ctrl.Call(m, "BulkLookup", arg0, arg1)
ret0, _ := ret[0].([]*overlay.Node)
ret0, _ := ret[0].([]*pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -49,9 +50,9 @@ func (mr *MockClientMockRecorder) BulkLookup(arg0, arg1 interface{}) *gomock.Cal
}
// Choose mocks base method
func (m *MockClient) Choose(arg0 context.Context, arg1 int, arg2 int64) ([]*overlay.Node, error) {
func (m *MockClient) Choose(arg0 context.Context, arg1 int, arg2 int64) ([]*pb.Node, error) {
ret := m.ctrl.Call(m, "Choose", arg0, arg1, arg2)
ret0, _ := ret[0].([]*overlay.Node)
ret0, _ := ret[0].([]*pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -62,9 +63,9 @@ func (mr *MockClientMockRecorder) Choose(arg0, arg1, arg2 interface{}) *gomock.C
}
// Lookup mocks base method
func (m *MockClient) Lookup(arg0 context.Context, arg1 dht.NodeID) (*overlay.Node, error) {
func (m *MockClient) Lookup(arg0 context.Context, arg1 dht.NodeID) (*pb.Node, error) {
ret := m.ctrl.Call(m, "Lookup", arg0, arg1)
ret0, _ := ret[0].(*overlay.Node)
ret0, _ := ret[0].(*pb.Node)
ret1, _ := ret[1].(error)
return ret0, ret1
}

View File

@ -12,7 +12,7 @@ import (
"google.golang.org/grpc"
"storj.io/storj/pkg/kademlia"
proto "storj.io/storj/protos/overlay" // naming proto to avoid confusion with this package
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -43,7 +43,7 @@ func TestFindStorageNodes(t *testing.T) {
c, err := NewClient(address, grpc.WithInsecure())
assert.NoError(t, err)
r, err := c.FindStorageNodes(context.Background(), &proto.FindStorageNodesRequest{Opts: &proto.OverlayOptions{Amount: 2}})
r, err := c.FindStorageNodes(context.Background(), &pb.FindStorageNodesRequest{Opts: &pb.OverlayOptions{Amount: 2}})
assert.NoError(t, err)
assert.NotNil(t, r)
@ -71,7 +71,7 @@ func TestOverlayLookup(t *testing.T) {
c, err := NewClient(address, grpc.WithInsecure())
assert.NoError(t, err)
r, err := c.Lookup(context.Background(), &proto.LookupRequest{NodeID: id.String()})
r, err := c.Lookup(context.Background(), &pb.LookupRequest{NodeID: id.String()})
assert.NoError(t, err)
assert.NotNil(t, r)
}
@ -98,9 +98,9 @@ func TestOverlayBulkLookup(t *testing.T) {
c, err := NewClient(address, grpc.WithInsecure())
assert.NoError(t, err)
req1 := &proto.LookupRequest{NodeID: id.String()}
req2 := &proto.LookupRequest{NodeID: id2.String()}
rs := &proto.LookupRequests{Lookuprequest: []*proto.LookupRequest{req1, req2}}
req1 := &pb.LookupRequest{NodeID: id.String()}
req2 := &pb.LookupRequest{NodeID: id2.String()}
rs := &pb.LookupRequests{Lookuprequest: []*pb.LookupRequest{req1, req2}}
r, err := c.BulkLookup(context.Background(), rs)
assert.NoError(t, err)
assert.NotNil(t, r)

View File

@ -6,8 +6,8 @@ package overlay
import (
"context"
"fmt"
protob "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"go.uber.org/zap"
@ -16,7 +16,7 @@ import (
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/dht"
proto "storj.io/storj/protos/overlay" // naming proto to avoid confusion with this package
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -32,7 +32,7 @@ type Server struct {
}
// Lookup finds the address of a node in our overlay network
func (o *Server) Lookup(ctx context.Context, req *proto.LookupRequest) (*proto.LookupResponse, error) {
func (o *Server) Lookup(ctx context.Context, req *pb.LookupRequest) (*pb.LookupResponse, error) {
na, err := o.cache.Get(ctx, req.NodeID)
if err != nil {
@ -40,13 +40,13 @@ func (o *Server) Lookup(ctx context.Context, req *proto.LookupRequest) (*proto.L
return nil, err
}
return &proto.LookupResponse{
return &pb.LookupResponse{
Node: na,
}, nil
}
//BulkLookup finds the addresses of nodes in our overlay network
func (o *Server) BulkLookup(ctx context.Context, reqs *proto.LookupRequests) (*proto.LookupResponses, error) {
func (o *Server) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (*pb.LookupResponses, error) {
ns, err := o.cache.GetAll(ctx, lookupRequestsToNodeIDs(reqs))
if err != nil {
@ -56,7 +56,7 @@ func (o *Server) BulkLookup(ctx context.Context, reqs *proto.LookupRequests) (*p
}
// FindStorageNodes searches the overlay network for nodes that meet the provided requirements
func (o *Server) FindStorageNodes(ctx context.Context, req *proto.FindStorageNodesRequest) (resp *proto.FindStorageNodesResponse, err error) {
func (o *Server) FindStorageNodes(ctx context.Context, req *pb.FindStorageNodesRequest) (resp *pb.FindStorageNodesResponse, err error) {
opts := req.GetOpts()
maxNodes := opts.GetAmount()
restrictions := opts.GetRestrictions()
@ -64,9 +64,9 @@ func (o *Server) FindStorageNodes(ctx context.Context, req *proto.FindStorageNod
restrictedSpace := restrictions.GetFreeDisk()
var start storage.Key
result := []*proto.Node{}
result := []*pb.Node{}
for {
var nodes []*proto.Node
var nodes []*pb.Node
nodes, start, err = o.populate(ctx, start, maxNodes, restrictedBandwidth, restrictedSpace)
if err != nil {
return nil, Error.Wrap(err)
@ -92,21 +92,21 @@ func (o *Server) FindStorageNodes(ctx context.Context, req *proto.FindStorageNod
result = result[:maxNodes]
}
return &proto.FindStorageNodesResponse{
return &pb.FindStorageNodesResponse{
Nodes: result,
}, nil
}
func (o *Server) getNodes(ctx context.Context, keys storage.Keys) ([]*proto.Node, error) {
func (o *Server) getNodes(ctx context.Context, keys storage.Keys) ([]*pb.Node, error) {
values, err := o.cache.DB.GetAll(keys)
if err != nil {
return nil, Error.Wrap(err)
}
nodes := []*proto.Node{}
nodes := []*pb.Node{}
for _, v := range values {
n := &proto.Node{}
if err := protob.Unmarshal(v, n); err != nil {
n := &pb.Node{}
if err := proto.Unmarshal(v, n); err != nil {
return nil, Error.Wrap(err)
}
@ -117,7 +117,7 @@ func (o *Server) getNodes(ctx context.Context, keys storage.Keys) ([]*proto.Node
}
func (o *Server) populate(ctx context.Context, starting storage.Key, maxNodes, restrictedBandwidth, restrictedSpace int64) ([]*proto.Node, storage.Key, error) {
func (o *Server) populate(ctx context.Context, starting storage.Key, maxNodes, restrictedBandwidth, restrictedSpace int64) ([]*pb.Node, storage.Key, error) {
limit := int(maxNodes * 2)
keys, err := o.cache.DB.List(starting, limit)
if err != nil {
@ -127,10 +127,10 @@ func (o *Server) populate(ctx context.Context, starting storage.Key, maxNodes, r
if len(keys) <= 0 {
o.logger.Info("No Keys returned from List operation")
return []*proto.Node{}, starting, nil
return []*pb.Node{}, starting, nil
}
result := []*proto.Node{}
result := []*pb.Node{}
nodes, err := o.getNodes(ctx, keys)
if err != nil {
o.logger.Error("Error getting nodes", zap.Error(err))
@ -155,7 +155,7 @@ func (o *Server) populate(ctx context.Context, starting storage.Key, maxNodes, r
}
//lookupRequestsToNodeIDs returns the nodeIDs from the LookupRequests
func lookupRequestsToNodeIDs(reqs *proto.LookupRequests) []string {
func lookupRequestsToNodeIDs(reqs *pb.LookupRequests) []string {
var ids []string
for _, v := range reqs.Lookuprequest {
ids = append(ids, v.NodeID)
@ -164,11 +164,11 @@ func lookupRequestsToNodeIDs(reqs *proto.LookupRequests) []string {
}
//nodesToLookupResponses returns LookupResponses from the nodes
func nodesToLookupResponses(nodes []*proto.Node) *proto.LookupResponses {
var rs []*proto.LookupResponse
func nodesToLookupResponses(nodes []*pb.Node) *pb.LookupResponses {
var rs []*pb.LookupResponse
for _, v := range nodes {
r := &proto.LookupResponse{Node: v}
r := &pb.LookupResponse{Node: v}
rs = append(rs, r)
}
return &proto.LookupResponses{Lookupresponse: rs}
return &pb.LookupResponses{Lookupresponse: rs}
}

View File

@ -9,13 +9,13 @@ import (
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/kademlia"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// NewServer creates a new Overlay Service Server
func NewServer(k *kademlia.Kademlia, cache *Cache, l *zap.Logger, m *monkit.Registry) *grpc.Server {
grpcServer := grpc.NewServer()
proto.RegisterOverlayServer(grpcServer, &Server{
pb.RegisterOverlayServer(grpcServer, &Server{
dht: k,
cache: cache,
logger: l,
@ -27,11 +27,11 @@ func NewServer(k *kademlia.Kademlia, cache *Cache, l *zap.Logger, m *monkit.Regi
// NewClient connects to grpc server at the provided address with the provided options
// returns a new instance of an overlay Client
func NewClient(serverAddr string, opts ...grpc.DialOption) (proto.OverlayClient, error) {
func NewClient(serverAddr string, opts ...grpc.DialOption) (pb.OverlayClient, error) {
conn, err := grpc.Dial(serverAddr, opts...)
if err != nil {
return nil, err
}
return proto.NewOverlayClient(conn), nil
return pb.NewOverlayClient(conn), nil
}

View File

@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
proto "storj.io/storj/protos/overlay" // naming proto to avoid confusion with this package
"storj.io/storj/pkg/pb"
)
func TestNewServer(t *testing.T) {
@ -32,23 +32,23 @@ func TestNewServer(t *testing.T) {
func newMockServer(opts ...grpc.ServerOption) *grpc.Server {
grpcServer := grpc.NewServer(opts...)
proto.RegisterOverlayServer(grpcServer, &TestMockOverlay{})
pb.RegisterOverlayServer(grpcServer, &TestMockOverlay{})
return grpcServer
}
type TestMockOverlay struct{}
func (o *TestMockOverlay) FindStorageNodes(ctx context.Context, req *proto.FindStorageNodesRequest) (*proto.FindStorageNodesResponse, error) {
return &proto.FindStorageNodesResponse{}, nil
func (o *TestMockOverlay) FindStorageNodes(ctx context.Context, req *pb.FindStorageNodesRequest) (*pb.FindStorageNodesResponse, error) {
return &pb.FindStorageNodesResponse{}, nil
}
func (o *TestMockOverlay) Lookup(ctx context.Context, req *proto.LookupRequest) (*proto.LookupResponse, error) {
return &proto.LookupResponse{}, nil
func (o *TestMockOverlay) Lookup(ctx context.Context, req *pb.LookupRequest) (*pb.LookupResponse, error) {
return &pb.LookupResponse{}, nil
}
func (o *TestMockOverlay) BulkLookup(ctx context.Context, reqs *proto.LookupRequests) (*proto.LookupResponses, error) {
return &proto.LookupResponses{}, nil
func (o *TestMockOverlay) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (*pb.LookupResponses, error) {
return &pb.LookupResponses{}, nil
}
func TestNewServerNilArgs(t *testing.T) {

View File

@ -6,14 +6,14 @@ package overlay
import (
"testing"
protob "github.com/gogo/protobuf/proto"
proto "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"google.golang.org/grpc"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/kademlia"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
"storj.io/storj/storage/teststore"
)
@ -39,15 +39,15 @@ func NewMockServer(items []storage.ListItem) *grpc.Server {
logger: zap.NewNop(),
metrics: registry,
}
proto.RegisterOverlayServer(grpcServer, &s)
pb.RegisterOverlayServer(grpcServer, &s)
return grpcServer
}
// NewNodeAddressValue provides a convient way to create a storage.Value for testing purposes
func NewNodeAddressValue(t *testing.T, address string) storage.Value {
na := &proto.Node{Id: "", Address: &proto.NodeAddress{Transport: proto.NodeTransport_TCP, Address: address}}
d, err := protob.Marshal(na)
na := &pb.Node{Id: "", Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP, Address: address}}
d, err := proto.Marshal(na)
assert.NoError(t, err)
return d

9
pkg/pb/gen.go Normal file
View File

@ -0,0 +1,9 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package pb
//go:generate protoc --go_out=plugins=grpc:. meta.proto
//go:generate protoc --go_out=plugins=grpc:. overlay.proto
//go:generate protoc --go_out=plugins=grpc:. pointerdb.proto
//go:generate protoc --go_out=plugins=grpc:. piecestore.proto

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: meta.proto
package streams
package pb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package streams;
package pb;
message MetaStreamInfo {
int64 number_of_segments = 1;

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: overlay.proto
package overlay
package pb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"

View File

@ -5,7 +5,7 @@ syntax = "proto3";
import "google/protobuf/duration.proto";
package overlay;
package pb;
// NodeTransport is an enum of possible transports for the overlay network
enum NodeTransport {

View File

@ -2,9 +2,9 @@
// See LICENSE for copying information.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: piece_store.proto
// source: piecestore.proto
package piecestoreroutes
package pb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"

View File

@ -2,10 +2,10 @@
// See LICENSE for copying information.
// Code generated by MockGen. DO NOT EDIT.
// Source: storj.io/storj/protos/piecestore (interfaces: PieceStoreRoutesClient,PieceStoreRoutes_RetrieveClient)
// Source: storj.io/storj/pkg/pb (interfaces: PieceStoreRoutesClient,PieceStoreRoutes_RetrieveClient)
// Package piecestoreroutes is a generated GoMock package.
package piecestoreroutes
// Package pb is a generated GoMock package.
package pb
import (
context "context"

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package piecestoreroutes;
package pb;
service PieceStoreRoutes {
rpc Piece(PieceId) returns (PieceSummary) {}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pointerdb.proto
package pointerdb
package pb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"

View File

@ -2,7 +2,7 @@
// See LICENSE for copying information.
syntax = "proto3";
package pointerdb;
package pb;
import "google/protobuf/timestamp.proto";

View File

@ -20,8 +20,8 @@ import (
"github.com/gtank/cryptopasta"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/ranger"
pb "storj.io/storj/protos/piecestore"
)
// ClientError is any error returned by the client

View File

@ -11,8 +11,8 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/ranger"
pb "storj.io/storj/protos/piecestore"
)
// Error is the error class for pieceRanger

View File

@ -16,7 +16,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
pb "storj.io/storj/protos/piecestore"
"storj.io/storj/pkg/pb"
)
func TestPieceRanger(t *testing.T) {

View File

@ -10,8 +10,8 @@ import (
"github.com/gogo/protobuf/proto"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/utils"
pb "storj.io/storj/protos/piecestore"
)
// StreamWriter creates a StreamWriter for writing data to the piece store server

View File

@ -18,9 +18,9 @@ import (
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/pb"
pstore "storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/utils"
pb "storj.io/storj/protos/piecestore"
)
var (

View File

@ -13,7 +13,7 @@ import (
"github.com/gogo/protobuf/proto"
_ "github.com/mattn/go-sqlite3"
pb "storj.io/storj/protos/piecestore"
"storj.io/storj/pkg/pb"
"golang.org/x/net/context"
)

View File

@ -5,8 +5,8 @@ package server
import (
"github.com/gogo/protobuf/proto"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/utils"
pb "storj.io/storj/protos/piecestore"
)
// StreamWriter -- Struct for writing piece to server upload stream

View File

@ -15,9 +15,9 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/pb"
pstore "storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/utils"
pb "storj.io/storj/protos/piecestore"
)
// RetrieveError is a type of error for failures in Server.Retrieve()

View File

@ -16,11 +16,11 @@ import (
"golang.org/x/net/context"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls"
pstore "storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/piecestore/rpc/server/psdb"
"storj.io/storj/pkg/provider"
pb "storj.io/storj/protos/piecestore"
)
var (

View File

@ -26,10 +26,10 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"storj.io/storj/pkg/pb"
pstore "storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/piecestore/rpc/server/psdb"
"storj.io/storj/pkg/provider"
pb "storj.io/storj/protos/piecestore"
)
var ctx = context.Background()

View File

@ -9,9 +9,9 @@ import (
"log"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/utils"
pb "storj.io/storj/protos/piecestore"
)
// OK - Success!

View File

@ -8,9 +8,9 @@ import (
"go.uber.org/zap"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/utils"
proto "storj.io/storj/protos/pointerdb"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/storelogger"
)
@ -45,7 +45,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) error {
defer func() { _ = bdb.Close() }()
bdblogged := storelogger.New(zap.L(), bdb)
proto.RegisterPointerDBServer(server.GRPC(), NewServer(bdblogged, zap.L(), c))
pb.RegisterPointerDBServer(server.GRPC(), NewServer(bdblogged, zap.L(), c))
return server.Run(ctx)
}

View File

@ -13,8 +13,8 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
p "storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
pb "storj.io/storj/protos/pointerdb"
"storj.io/storj/storage"
)

View File

@ -17,8 +17,8 @@ import (
"github.com/stretchr/testify/assert"
p "storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storage/meta"
pb "storj.io/storj/protos/pointerdb"
)
const (

View File

@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: storj.io/storj/pkg/pointerdb/pdbclient (interfaces: Client)
// Source: storj.io/storj/pkg/pb/pdbclient (interfaces: Client)
// Package mock_pointerdb is a generated GoMock package.
package mock_pointerdb
@ -10,8 +10,8 @@ import (
gomock "github.com/golang/mock/gomock"
paths "storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb/pdbclient"
"storj.io/storj/protos/pointerdb"
)
// MockClient is a mock of Client interface
@ -50,9 +50,9 @@ func (mr *MockClientMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {
}
// Get mocks base method
func (m *MockClient) Get(arg0 context.Context, arg1 paths.Path) (*pointerdb.Pointer, error) {
func (m *MockClient) Get(arg0 context.Context, arg1 paths.Path) (*pb.Pointer, error) {
ret := m.ctrl.Call(m, "Get", arg0, arg1)
ret0, _ := ret[0].(*pointerdb.Pointer)
ret0, _ := ret[0].(*pb.Pointer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -77,7 +77,7 @@ func (mr *MockClientMockRecorder) List(arg0, arg1, arg2, arg3, arg4, arg5, arg6
}
// Put mocks base method
func (m *MockClient) Put(arg0 context.Context, arg1 paths.Path, arg2 *pointerdb.Pointer) error {
func (m *MockClient) Put(arg0 context.Context, arg1 paths.Path, arg2 *pb.Pointer) error {
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0

View File

@ -2,7 +2,7 @@
// See LICENSE for copying information.
// Code generated by MockGen. DO NOT EDIT.
// Source: storj.io/storj/protos/pointerdb (interfaces: PointerDBClient)
// Source: storj.io/storj/pkg/pb (interfaces: PointerDBClient)
// Package pdbclient is a generated GoMock package.
package pdbclient
@ -13,7 +13,7 @@ import (
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
pointerdb "storj.io/storj/protos/pointerdb"
"storj.io/storj/pkg/pb"
)
// MockPointerDBClient is a mock of PointerDBClient interface
@ -40,13 +40,13 @@ func (m *MockPointerDBClient) EXPECT() *MockPointerDBClientMockRecorder {
}
// Delete mocks base method
func (m *MockPointerDBClient) Delete(arg0 context.Context, arg1 *pointerdb.DeleteRequest, arg2 ...grpc.CallOption) (*pointerdb.DeleteResponse, error) {
func (m *MockPointerDBClient) Delete(arg0 context.Context, arg1 *pb.DeleteRequest, arg2 ...grpc.CallOption) (*pb.DeleteResponse, error) {
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Delete", varargs...)
ret0, _ := ret[0].(*pointerdb.DeleteResponse)
ret0, _ := ret[0].(*pb.DeleteResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -58,13 +58,13 @@ func (mr *MockPointerDBClientMockRecorder) Delete(arg0, arg1 interface{}, arg2 .
}
// Get mocks base method
func (m *MockPointerDBClient) Get(arg0 context.Context, arg1 *pointerdb.GetRequest, arg2 ...grpc.CallOption) (*pointerdb.GetResponse, error) {
func (m *MockPointerDBClient) Get(arg0 context.Context, arg1 *pb.GetRequest, arg2 ...grpc.CallOption) (*pb.GetResponse, error) {
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Get", varargs...)
ret0, _ := ret[0].(*pointerdb.GetResponse)
ret0, _ := ret[0].(*pb.GetResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -76,13 +76,13 @@ func (mr *MockPointerDBClientMockRecorder) Get(arg0, arg1 interface{}, arg2 ...i
}
// List mocks base method
func (m *MockPointerDBClient) List(arg0 context.Context, arg1 *pointerdb.ListRequest, arg2 ...grpc.CallOption) (*pointerdb.ListResponse, error) {
func (m *MockPointerDBClient) List(arg0 context.Context, arg1 *pb.ListRequest, arg2 ...grpc.CallOption) (*pb.ListResponse, error) {
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "List", varargs...)
ret0, _ := ret[0].(*pointerdb.ListResponse)
ret0, _ := ret[0].(*pb.ListResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -94,7 +94,7 @@ func (mr *MockPointerDBClientMockRecorder) List(arg0, arg1 interface{}, arg2 ...
}
// ReverseList mocks base method
func (m *MockPointerDBClient) ReverseList(arg0 context.Context, arg1 *pointerdb.ListRequest, arg2 ...grpc.CallOption) (*pointerdb.ListResponse, error) {
func (m *MockPointerDBClient) ReverseList(arg0 context.Context, arg1 *pb.ListRequest, arg2 ...grpc.CallOption) (*pb.ListResponse, error) {
return m.List(arg0, arg1, arg2...)
}
@ -104,13 +104,13 @@ func (mr *MockPointerDBClientMockRecorder) ReverseList(arg0, arg1 interface{}, a
}
// Put mocks base method
func (m *MockPointerDBClient) Put(arg0 context.Context, arg1 *pointerdb.PutRequest, arg2 ...grpc.CallOption) (*pointerdb.PutResponse, error) {
func (m *MockPointerDBClient) Put(arg0 context.Context, arg1 *pb.PutRequest, arg2 ...grpc.CallOption) (*pb.PutResponse, error) {
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Put", varargs...)
ret0, _ := ret[0].(*pointerdb.PutResponse)
ret0, _ := ret[0].(*pb.PutResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}

View File

@ -14,9 +14,9 @@ import (
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb/auth"
"storj.io/storj/pkg/storage/meta"
pb "storj.io/storj/protos/pointerdb"
"storj.io/storj/storage"
)

View File

@ -19,8 +19,8 @@ import (
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storage/meta"
pb "storj.io/storj/protos/pointerdb"
"storj.io/storj/storage"
"storj.io/storj/storage/teststore"
)

View File

@ -15,7 +15,7 @@ import (
"google.golang.org/grpc"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
proto "storj.io/storj/pkg/statdb/proto"
pb "storj.io/storj/pkg/statdb/proto"
)
var (
@ -41,7 +41,7 @@ func (s *Service) Process(ctx context.Context, _ *cobra.Command, _ []string) err
if err != nil {
return err
}
proto.RegisterStatDBServer(grpcServer, ns)
pb.RegisterStatDBServer(grpcServer, ns)
s.logger.Debug(fmt.Sprintf("server listening on address %s", *addr))
defer grpcServer.GracefulStop()

View File

@ -14,28 +14,27 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/rpc/client"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/ranger"
"storj.io/storj/pkg/transport"
"storj.io/storj/pkg/utils"
proto "storj.io/storj/protos/overlay"
pb "storj.io/storj/protos/piecestore"
)
var mon = monkit.Package()
// Client defines an interface for storing erasure coded data to piece store nodes
type Client interface {
Put(ctx context.Context, nodes []*proto.Node, rs eestream.RedundancyStrategy,
Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy,
pieceID client.PieceID, data io.Reader, expiration time.Time) error
Get(ctx context.Context, nodes []*proto.Node, es eestream.ErasureScheme,
Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme,
pieceID client.PieceID, size int64) (ranger.Ranger, error)
Delete(ctx context.Context, nodes []*proto.Node, pieceID client.PieceID) error
Delete(ctx context.Context, nodes []*pb.Node, pieceID client.PieceID) error
}
type dialer interface {
dial(ctx context.Context, node *proto.Node) (ps client.PSClient, err error)
dial(ctx context.Context, node *pb.Node) (ps client.PSClient, err error)
}
type defaultDialer struct {
@ -43,7 +42,7 @@ type defaultDialer struct {
identity *provider.FullIdentity
}
func (d *defaultDialer) dial(ctx context.Context, node *proto.Node) (ps client.PSClient, err error) {
func (d *defaultDialer) dial(ctx context.Context, node *pb.Node) (ps client.PSClient, err error) {
defer mon.Task()(&ctx)(&err)
c, err := d.t.DialNode(ctx, node)
if err != nil {
@ -64,7 +63,7 @@ func NewClient(identity *provider.FullIdentity, t transport.Client, mbm int) Cli
return &ecClient{d: &d, mbm: mbm}
}
func (ec *ecClient) Put(ctx context.Context, nodes []*proto.Node, rs eestream.RedundancyStrategy,
func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy,
pieceID client.PieceID, data io.Reader, expiration time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
if len(nodes) != rs.TotalCount() {
@ -81,7 +80,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*proto.Node, rs eestream.Re
}
errs := make(chan error, len(readers))
for i, n := range nodes {
go func(i int, n *proto.Node) {
go func(i int, n *pb.Node) {
derivedPieceID, err := pieceID.Derive([]byte(n.GetId()))
if err != nil {
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
@ -116,7 +115,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*proto.Node, rs eestream.Re
return nil
}
func (ec *ecClient) Get(ctx context.Context, nodes []*proto.Node, es eestream.ErasureScheme,
func (ec *ecClient) Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme,
pieceID client.PieceID, size int64) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
@ -133,7 +132,7 @@ func (ec *ecClient) Get(ctx context.Context, nodes []*proto.Node, es eestream.Er
}
ch := make(chan rangerInfo, len(nodes))
for i, n := range nodes {
go func(i int, n *proto.Node) {
go func(i int, n *pb.Node) {
derivedPieceID, err := pieceID.Derive([]byte(n.GetId()))
if err != nil {
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
@ -165,11 +164,11 @@ func (ec *ecClient) Get(ctx context.Context, nodes []*proto.Node, es eestream.Er
return eestream.Unpad(rr, int(paddedSize-size))
}
func (ec *ecClient) Delete(ctx context.Context, nodes []*proto.Node, pieceID client.PieceID) (err error) {
func (ec *ecClient) Delete(ctx context.Context, nodes []*pb.Node, pieceID client.PieceID) (err error) {
defer mon.Task()(&ctx)(&err)
errs := make(chan error, len(nodes))
for _, n := range nodes {
go func(n *proto.Node) {
go func(n *pb.Node) {
derivedPieceID, err := pieceID.Derive([]byte(n.GetId()))
if err != nil {
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
@ -212,7 +211,7 @@ func collectErrors(errs <-chan error, size int) []error {
return result
}
func unique(nodes []*proto.Node) bool {
func unique(nodes []*pb.Node) bool {
if len(nodes) < 2 {
return true
}
@ -244,7 +243,7 @@ func calcPadded(size int64, blockSize int) int64 {
type lazyPieceRanger struct {
ranger ranger.Ranger
dialer dialer
node *proto.Node
node *pb.Node
id client.PieceID
size int64
pba *pb.PayerBandwidthAllocation

View File

@ -19,10 +19,10 @@ import (
"github.com/vivint/infectious"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/rpc/client"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/ranger"
proto "storj.io/storj/protos/overlay"
)
const (
@ -36,17 +36,17 @@ var (
)
var (
node0 = &proto.Node{Id: "node-0"}
node1 = &proto.Node{Id: "node-1"}
node2 = &proto.Node{Id: "node-2"}
node3 = &proto.Node{Id: "node-3"}
node0 = &pb.Node{Id: "node-0"}
node1 = &pb.Node{Id: "node-1"}
node2 = &pb.Node{Id: "node-2"}
node3 = &pb.Node{Id: "node-3"}
)
type mockDialer struct {
m map[*proto.Node]client.PSClient
m map[*pb.Node]client.PSClient
}
func (d *mockDialer) dial(ctx context.Context, node *proto.Node) (
func (d *mockDialer) dial(ctx context.Context, node *pb.Node) (
ps client.PSClient, err error) {
ps = d.m[node]
if ps == nil {
@ -125,32 +125,32 @@ func TestPut(t *testing.T) {
TestLoop:
for i, tt := range []struct {
nodes []*proto.Node
nodes []*pb.Node
min int
mbm int
badInput bool
errs []error
errString string
}{
{[]*proto.Node{}, 0, 0, true, []error{},
{[]*pb.Node{}, 0, 0, true, []error{},
fmt.Sprintf("ecclient error: number of nodes (0) do not match total count (%v) of erasure scheme", n)},
{[]*proto.Node{node0, node1, node2, node3}, 0, -1, true,
{[]*pb.Node{node0, node1, node2, node3}, 0, -1, true,
[]error{nil, nil, nil, nil},
"eestream error: negative max buffer memory"},
{[]*proto.Node{node0, node1, node0, node3}, 0, 0, true,
{[]*pb.Node{node0, node1, node0, node3}, 0, 0, true,
[]error{nil, nil, nil, nil},
"ecclient error: duplicated nodes are not allowed"},
{[]*proto.Node{node0, node1, node2, node3}, 0, 0, false,
{[]*pb.Node{node0, node1, node2, node3}, 0, 0, false,
[]error{nil, nil, nil, nil}, ""},
{[]*proto.Node{node0, node1, node2, node3}, 0, 0, false,
{[]*pb.Node{node0, node1, node2, node3}, 0, 0, false,
[]error{nil, ErrDialFailed, nil, nil},
"ecclient error: successful puts (3) less than minimum threshold (4)"},
{[]*proto.Node{node0, node1, node2, node3}, 0, 0, false,
{[]*pb.Node{node0, node1, node2, node3}, 0, 0, false,
[]error{nil, ErrOpFailed, nil, nil},
"ecclient error: successful puts (3) less than minimum threshold (4)"},
{[]*proto.Node{node0, node1, node2, node3}, 2, 0, false,
{[]*pb.Node{node0, node1, node2, node3}, 2, 0, false,
[]error{nil, ErrDialFailed, nil, nil}, ""},
{[]*proto.Node{node0, node1, node2, node3}, 2, 0, false,
{[]*pb.Node{node0, node1, node2, node3}, 2, 0, false,
[]error{ErrOpFailed, ErrDialFailed, nil, ErrDialFailed},
"ecclient error: successful puts (1) less than minimum threshold (2)"},
} {
@ -159,12 +159,12 @@ TestLoop:
id := client.NewPieceID()
ttl := time.Now()
errs := make(map[*proto.Node]error, len(tt.nodes))
errs := make(map[*pb.Node]error, len(tt.nodes))
for i, n := range tt.nodes {
errs[n] = tt.errs[i]
}
m := make(map[*proto.Node]client.PSClient, len(tt.nodes))
m := make(map[*pb.Node]client.PSClient, len(tt.nodes))
for _, n := range tt.nodes {
if !tt.badInput {
derivedID, err := id.Derive([]byte(n.GetId()))
@ -211,37 +211,37 @@ func TestGet(t *testing.T) {
TestLoop:
for i, tt := range []struct {
nodes []*proto.Node
nodes []*pb.Node
mbm int
errs []error
errString string
}{
{[]*proto.Node{}, 0, []error{}, "ecclient error: " +
{[]*pb.Node{}, 0, []error{}, "ecclient error: " +
fmt.Sprintf("number of nodes (0) do not match total count (%v) of erasure scheme", n)},
{[]*proto.Node{node0, node1, node2, node3}, -1,
{[]*pb.Node{node0, node1, node2, node3}, -1,
[]error{nil, nil, nil, nil},
"eestream error: negative max buffer memory"},
{[]*proto.Node{node0, node1, node2, node3}, 0,
{[]*pb.Node{node0, node1, node2, node3}, 0,
[]error{nil, nil, nil, nil}, ""},
{[]*proto.Node{node0, node1, node2, node3}, 0,
{[]*pb.Node{node0, node1, node2, node3}, 0,
[]error{nil, ErrDialFailed, nil, nil}, ""},
{[]*proto.Node{node0, node1, node2, node3}, 0,
{[]*pb.Node{node0, node1, node2, node3}, 0,
[]error{nil, ErrOpFailed, nil, nil}, ""},
{[]*proto.Node{node0, node1, node2, node3}, 0,
{[]*pb.Node{node0, node1, node2, node3}, 0,
[]error{ErrOpFailed, ErrDialFailed, nil, ErrDialFailed}, ""},
{[]*proto.Node{node0, node1, node2, node3}, 0,
{[]*pb.Node{node0, node1, node2, node3}, 0,
[]error{ErrDialFailed, ErrOpFailed, ErrOpFailed, ErrDialFailed}, ""},
} {
errTag := fmt.Sprintf("Test case #%d", i)
id := client.NewPieceID()
errs := make(map[*proto.Node]error, len(tt.nodes))
errs := make(map[*pb.Node]error, len(tt.nodes))
for i, n := range tt.nodes {
errs[n] = tt.errs[i]
}
m := make(map[*proto.Node]client.PSClient, len(tt.nodes))
m := make(map[*pb.Node]client.PSClient, len(tt.nodes))
for _, n := range tt.nodes {
if errs[n] == ErrOpFailed {
derivedID, err := id.Derive([]byte(n.GetId()))
@ -275,30 +275,30 @@ func TestDelete(t *testing.T) {
TestLoop:
for i, tt := range []struct {
nodes []*proto.Node
nodes []*pb.Node
errs []error
errString string
}{
{[]*proto.Node{}, []error{}, ""},
{[]*proto.Node{node0}, []error{nil}, ""},
{[]*proto.Node{node0}, []error{ErrDialFailed}, dialFailed},
{[]*proto.Node{node0}, []error{ErrOpFailed}, opFailed},
{[]*proto.Node{node0, node1}, []error{nil, nil}, ""},
{[]*proto.Node{node0, node1}, []error{ErrDialFailed, nil}, ""},
{[]*proto.Node{node0, node1}, []error{nil, ErrOpFailed}, ""},
{[]*proto.Node{node0, node1}, []error{ErrDialFailed, ErrDialFailed}, dialFailed},
{[]*proto.Node{node0, node1}, []error{ErrOpFailed, ErrOpFailed}, opFailed},
{[]*pb.Node{}, []error{}, ""},
{[]*pb.Node{node0}, []error{nil}, ""},
{[]*pb.Node{node0}, []error{ErrDialFailed}, dialFailed},
{[]*pb.Node{node0}, []error{ErrOpFailed}, opFailed},
{[]*pb.Node{node0, node1}, []error{nil, nil}, ""},
{[]*pb.Node{node0, node1}, []error{ErrDialFailed, nil}, ""},
{[]*pb.Node{node0, node1}, []error{nil, ErrOpFailed}, ""},
{[]*pb.Node{node0, node1}, []error{ErrDialFailed, ErrDialFailed}, dialFailed},
{[]*pb.Node{node0, node1}, []error{ErrOpFailed, ErrOpFailed}, opFailed},
} {
errTag := fmt.Sprintf("Test case #%d", i)
id := client.NewPieceID()
errs := make(map[*proto.Node]error, len(tt.nodes))
errs := make(map[*pb.Node]error, len(tt.nodes))
for i, n := range tt.nodes {
errs[n] = tt.errs[i]
}
m := make(map[*proto.Node]client.PSClient, len(tt.nodes))
m := make(map[*pb.Node]client.PSClient, len(tt.nodes))
for _, n := range tt.nodes {
if errs[n] != ErrDialFailed {
derivedID, err := id.Derive([]byte(n.GetId()))
@ -327,21 +327,21 @@ TestLoop:
func TestUnique(t *testing.T) {
for i, tt := range []struct {
nodes []*proto.Node
nodes []*pb.Node
unique bool
}{
{nil, true},
{[]*proto.Node{}, true},
{[]*proto.Node{node0}, true},
{[]*proto.Node{node0, node1}, true},
{[]*proto.Node{node0, node0}, false},
{[]*proto.Node{node0, node1, node0}, false},
{[]*proto.Node{node1, node0, node0}, false},
{[]*proto.Node{node0, node0, node1}, false},
{[]*proto.Node{node2, node0, node1}, true},
{[]*proto.Node{node2, node0, node3, node1}, true},
{[]*proto.Node{node2, node0, node2, node1}, false},
{[]*proto.Node{node1, node0, node3, node1}, false},
{[]*pb.Node{}, true},
{[]*pb.Node{node0}, true},
{[]*pb.Node{node0, node1}, true},
{[]*pb.Node{node0, node0}, false},
{[]*pb.Node{node0, node1, node0}, false},
{[]*pb.Node{node1, node0, node0}, false},
{[]*pb.Node{node0, node0, node1}, false},
{[]*pb.Node{node2, node0, node1}, true},
{[]*pb.Node{node2, node0, node3, node1}, true},
{[]*pb.Node{node2, node0, node2, node1}, false},
{[]*pb.Node{node1, node0, node3, node1}, false},
} {
errTag := fmt.Sprintf("Test case #%d", i)
assert.Equal(t, tt.unique, unique(tt.nodes), errTag)

View File

@ -12,9 +12,9 @@ import (
gomock "github.com/golang/mock/gomock"
eestream "storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/pb"
client "storj.io/storj/pkg/piecestore/rpc/client"
ranger "storj.io/storj/pkg/ranger"
overlay "storj.io/storj/protos/overlay"
)
// MockClient is a mock of Client interface
@ -41,7 +41,7 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
}
// Delete mocks base method
func (m *MockClient) Delete(arg0 context.Context, arg1 []*overlay.Node, arg2 client.PieceID) error {
func (m *MockClient) Delete(arg0 context.Context, arg1 []*pb.Node, arg2 client.PieceID) error {
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
@ -53,7 +53,7 @@ func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.C
}
// Get mocks base method
func (m *MockClient) Get(arg0 context.Context, arg1 []*overlay.Node, arg2 eestream.ErasureScheme, arg3 client.PieceID, arg4 int64) (ranger.Ranger, error) {
func (m *MockClient) Get(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.ErasureScheme, arg3 client.PieceID, arg4 int64) (ranger.Ranger, error) {
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(ranger.Ranger)
ret1, _ := ret[1].(error)
@ -66,7 +66,7 @@ func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{})
}
// Put mocks base method
func (m *MockClient) Put(arg0 context.Context, arg1 []*overlay.Node, arg2 eestream.RedundancyStrategy, arg3 client.PieceID, arg4 io.Reader, arg5 time.Time) error {
func (m *MockClient) Put(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.RedundancyStrategy, arg3 client.PieceID, arg4 io.Reader, arg5 time.Time) error {
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0

View File

@ -16,9 +16,9 @@ import (
time "time"
gomock "github.com/golang/mock/gomock"
pb "storj.io/storj/pkg/pb"
client "storj.io/storj/pkg/piecestore/rpc/client"
ranger "storj.io/storj/pkg/ranger"
piecestore "storj.io/storj/protos/piecestore"
)
// MockPSClient is a mock of PSClient interface
@ -69,7 +69,7 @@ func (mr *MockPSClientMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call
}
// Get mocks base method
func (m *MockPSClient) Get(arg0 context.Context, arg1 client.PieceID, arg2 int64, arg3 *piecestore.PayerBandwidthAllocation) (ranger.Ranger, error) {
func (m *MockPSClient) Get(arg0 context.Context, arg1 client.PieceID, arg2 int64, arg3 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(ranger.Ranger)
ret1, _ := ret[1].(error)
@ -82,9 +82,9 @@ func (mr *MockPSClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gom
}
// Meta mocks base method
func (m *MockPSClient) Meta(arg0 context.Context, arg1 client.PieceID) (*piecestore.PieceSummary, error) {
func (m *MockPSClient) Meta(arg0 context.Context, arg1 client.PieceID) (*pb.PieceSummary, error) {
ret := m.ctrl.Call(m, "Meta", arg0, arg1)
ret0, _ := ret[0].(*piecestore.PieceSummary)
ret0, _ := ret[0].(*pb.PieceSummary)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -95,7 +95,7 @@ func (mr *MockPSClientMockRecorder) Meta(arg0, arg1 interface{}) *gomock.Call {
}
// Put mocks base method
func (m *MockPSClient) Put(arg0 context.Context, arg1 client.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *piecestore.PayerBandwidthAllocation) error {
func (m *MockPSClient) Put(arg0 context.Context, arg1 client.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *pb.PayerBandwidthAllocation) error {
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0

View File

@ -13,7 +13,7 @@ import (
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
overlay "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
// MockClient is a mock of Client interface
@ -40,7 +40,7 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
}
// DialNode mocks base method
func (m *MockClient) DialNode(arg0 context.Context, arg1 *overlay.Node) (*grpc.ClientConn, error) {
func (m *MockClient) DialNode(arg0 context.Context, arg1 *pb.Node) (*grpc.ClientConn, error) {
ret := m.ctrl.Call(m, "DialNode", arg0, arg1)
ret0, _ := ret[0].(*grpc.ClientConn)
ret1, _ := ret[1].(error)
@ -53,7 +53,7 @@ func (mr *MockClientMockRecorder) DialNode(arg0, arg1 interface{}) *gomock.Call
}
// DialUnauthenticated mocks base method
func (m *MockClient) DialUnauthenticated(arg0 context.Context, arg1 overlay.NodeAddress) (*grpc.ClientConn, error) {
func (m *MockClient) DialUnauthenticated(arg0 context.Context, arg1 pb.NodeAddress) (*grpc.ClientConn, error) {
ret := m.ctrl.Call(m, "DialUnauthenticated", arg0, arg1)
ret0, _ := ret[0].(*grpc.ClientConn)
ret1, _ := ret[1].(error)

View File

@ -14,17 +14,16 @@ import (
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/rpc/client"
"storj.io/storj/pkg/pointerdb/pdbclient"
"storj.io/storj/pkg/ranger"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/storage/ec"
opb "storj.io/storj/protos/overlay"
ppb "storj.io/storj/protos/pointerdb"
)
var (
@ -91,7 +90,7 @@ func (s *segmentStore) Put(ctx context.Context, path paths.Path, data io.Reader,
metadata []byte, expiration time.Time) (meta Meta, err error) {
defer mon.Task()(&ctx)(&err)
var p *ppb.Pointer
var p *pb.Pointer
exp, err := ptypes.TimestampProto(expiration)
if err != nil {
@ -104,8 +103,8 @@ func (s *segmentStore) Put(ctx context.Context, path paths.Path, data io.Reader,
return Meta{}, err
}
if !remoteSized {
p = &ppb.Pointer{
Type: ppb.Pointer_INLINE,
p = &pb.Pointer{
Type: pb.Pointer_INLINE,
InlineSegment: peekReader.thresholdBuf,
Size: int64(len(peekReader.thresholdBuf)),
ExpirationDate: exp,
@ -146,21 +145,21 @@ func (s *segmentStore) Put(ctx context.Context, path paths.Path, data io.Reader,
}
// makeRemotePointer creates a pointer of type remote
func (s *segmentStore) makeRemotePointer(nodes []*opb.Node, pieceID client.PieceID, readerSize int64,
exp *timestamp.Timestamp, metadata []byte) (pointer *ppb.Pointer, err error) {
var remotePieces []*ppb.RemotePiece
func (s *segmentStore) makeRemotePointer(nodes []*pb.Node, pieceID client.PieceID, readerSize int64,
exp *timestamp.Timestamp, metadata []byte) (pointer *pb.Pointer, err error) {
var remotePieces []*pb.RemotePiece
for i := range nodes {
remotePieces = append(remotePieces, &ppb.RemotePiece{
remotePieces = append(remotePieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: nodes[i].Id,
})
}
pointer = &ppb.Pointer{
Type: ppb.Pointer_REMOTE,
Remote: &ppb.RemoteSegment{
Redundancy: &ppb.RedundancyScheme{
Type: ppb.RedundancyScheme_RS,
pointer = &pb.Pointer{
Type: pb.Pointer_REMOTE,
Remote: &pb.RemoteSegment{
Redundancy: &pb.RedundancyScheme{
Type: pb.RedundancyScheme_RS,
MinReq: int32(s.rs.RequiredCount()),
Total: int32(s.rs.TotalCount()),
RepairThreshold: int32(s.rs.Min),
@ -187,7 +186,7 @@ func (s *segmentStore) Get(ctx context.Context, path paths.Path) (
return nil, Meta{}, Error.Wrap(err)
}
if pr.GetType() == ppb.Pointer_REMOTE {
if pr.GetType() == pb.Pointer_REMOTE {
seg := pr.GetRemote()
pid := client.PieceID(seg.PieceId)
nodes, err := s.lookupNodes(ctx, seg)
@ -211,7 +210,7 @@ func (s *segmentStore) Get(ctx context.Context, path paths.Path) (
return rr, convertMeta(pr), nil
}
func makeErasureScheme(rs *ppb.RedundancyScheme) (eestream.ErasureScheme, error) {
func makeErasureScheme(rs *pb.RedundancyScheme) (eestream.ErasureScheme, error) {
fc, err := infectious.NewFEC(int(rs.GetMinReq()), int(rs.GetTotal()))
if err != nil {
return nil, Error.Wrap(err)
@ -229,7 +228,7 @@ func (s *segmentStore) Delete(ctx context.Context, path paths.Path) (err error)
return Error.Wrap(err)
}
if pr.GetType() == ppb.Pointer_REMOTE {
if pr.GetType() == pb.Pointer_REMOTE {
seg := pr.GetRemote()
pid := client.PieceID(seg.PieceId)
nodes, err := s.lookupNodes(ctx, seg)
@ -249,7 +248,7 @@ func (s *segmentStore) Delete(ctx context.Context, path paths.Path) (err error)
}
// lookupNodes calls Lookup to get node addresses from the overlay
func (s *segmentStore) lookupNodes(ctx context.Context, seg *ppb.RemoteSegment) (nodes []*opb.Node, err error) {
func (s *segmentStore) lookupNodes(ctx context.Context, seg *pb.RemoteSegment) (nodes []*pb.Node, err error) {
pieces := seg.GetRemotePieces()
var nodeIds []dht.NodeID
for _, p := range pieces {
@ -286,7 +285,7 @@ func (s *segmentStore) List(ctx context.Context, prefix, startAfter,
}
// convertMeta converts pointer to segment metadata
func convertMeta(pr *ppb.Pointer) Meta {
func convertMeta(pr *pb.Pointer) Meta {
return Meta{
Modified: convertTime(pr.GetCreationDate()),
Expiration: convertTime(pr.GetExpirationDate()),

View File

@ -17,11 +17,10 @@ import (
mock_eestream "storj.io/storj/pkg/eestream/mocks"
mock_overlay "storj.io/storj/pkg/overlay/mocks"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
pdb "storj.io/storj/pkg/pointerdb/pdbclient"
mock_pointerdb "storj.io/storj/pkg/pointerdb/pdbclient/mocks"
mock_ecclient "storj.io/storj/pkg/storage/ec/mocks"
opb "storj.io/storj/protos/overlay"
ppb "storj.io/storj/protos/pointerdb"
)
var (
@ -63,10 +62,10 @@ func TestSegmentStoreMeta(t *testing.T) {
for _, tt := range []struct {
pathInput string
returnPointer *ppb.Pointer
returnPointer *pb.Pointer
returnMeta Meta
}{
{"path/1/2/3", &ppb.Pointer{CreationDate: pExp, ExpirationDate: pExp}, Meta{Modified: mExp, Expiration: mExp}},
{"path/1/2/3", &pb.Pointer{CreationDate: pExp, ExpirationDate: pExp}, Meta{Modified: mExp, Expiration: mExp}},
} {
p := paths.New(tt.pathInput)
@ -115,7 +114,7 @@ func TestSegmentStorePutRemote(t *testing.T) {
mockES.EXPECT().TotalCount().Return(1),
mockOC.EXPECT().Choose(
gomock.Any(), gomock.Any(), gomock.Any(),
).Return([]*opb.Node{
).Return([]*pb.Node{
{Id: "im-a-node"},
}, nil),
mockEC.EXPECT().Put(
@ -192,12 +191,12 @@ func TestSegmentStoreGetInline(t *testing.T) {
for _, tt := range []struct {
pathInput string
thresholdSize int
pointerType ppb.Pointer_DataType
pointerType pb.Pointer_DataType
inlineContent []byte
size int64
metadata []byte
}{
{"path/1/2/3", 10, ppb.Pointer_INLINE, []byte("000"), int64(3), []byte("metadata")},
{"path/1/2/3", 10, pb.Pointer_INLINE, []byte("000"), int64(3), []byte("metadata")},
} {
mockOC := mock_overlay.NewMockClient(ctrl)
mockEC := mock_ecclient.NewMockClient(ctrl)
@ -215,7 +214,7 @@ func TestSegmentStoreGetInline(t *testing.T) {
calls := []*gomock.Call{
mockPDB.EXPECT().Get(
gomock.Any(), gomock.Any(),
).Return(&ppb.Pointer{
).Return(&pb.Pointer{
Type: tt.pointerType,
InlineSegment: tt.inlineContent,
CreationDate: someTime,
@ -242,11 +241,11 @@ func TestSegmentStoreGetRemote(t *testing.T) {
for _, tt := range []struct {
pathInput string
thresholdSize int
pointerType ppb.Pointer_DataType
pointerType pb.Pointer_DataType
size int64
metadata []byte
}{
{"path/1/2/3", 10, ppb.Pointer_REMOTE, int64(3), []byte("metadata")},
{"path/1/2/3", 10, pb.Pointer_REMOTE, int64(3), []byte("metadata")},
} {
mockOC := mock_overlay.NewMockClient(ctrl)
mockEC := mock_ecclient.NewMockClient(ctrl)
@ -264,18 +263,18 @@ func TestSegmentStoreGetRemote(t *testing.T) {
calls := []*gomock.Call{
mockPDB.EXPECT().Get(
gomock.Any(), gomock.Any(),
).Return(&ppb.Pointer{
).Return(&pb.Pointer{
Type: tt.pointerType,
Remote: &ppb.RemoteSegment{
Redundancy: &ppb.RedundancyScheme{
Type: ppb.RedundancyScheme_RS,
Remote: &pb.RemoteSegment{
Redundancy: &pb.RedundancyScheme{
Type: pb.RedundancyScheme_RS,
MinReq: 1,
Total: 2,
RepairThreshold: 1,
SuccessThreshold: 2,
},
PieceId: "here's my piece id",
RemotePieces: []*ppb.RemotePiece{},
RemotePieces: []*pb.RemotePiece{},
},
CreationDate: someTime,
ExpirationDate: someTime,
@ -305,12 +304,12 @@ func TestSegmentStoreDeleteInline(t *testing.T) {
for _, tt := range []struct {
pathInput string
thresholdSize int
pointerType ppb.Pointer_DataType
pointerType pb.Pointer_DataType
inlineContent []byte
size int64
metadata []byte
}{
{"path/1/2/3", 10, ppb.Pointer_INLINE, []byte("000"), int64(3), []byte("metadata")},
{"path/1/2/3", 10, pb.Pointer_INLINE, []byte("000"), int64(3), []byte("metadata")},
} {
mockOC := mock_overlay.NewMockClient(ctrl)
mockEC := mock_ecclient.NewMockClient(ctrl)
@ -328,7 +327,7 @@ func TestSegmentStoreDeleteInline(t *testing.T) {
calls := []*gomock.Call{
mockPDB.EXPECT().Get(
gomock.Any(), gomock.Any(),
).Return(&ppb.Pointer{
).Return(&pb.Pointer{
Type: tt.pointerType,
InlineSegment: tt.inlineContent,
CreationDate: someTime,
@ -358,11 +357,11 @@ func TestSegmentStoreDeleteRemote(t *testing.T) {
for _, tt := range []struct {
pathInput string
thresholdSize int
pointerType ppb.Pointer_DataType
pointerType pb.Pointer_DataType
size int64
metadata []byte
}{
{"path/1/2/3", 10, ppb.Pointer_REMOTE, int64(3), []byte("metadata")},
{"path/1/2/3", 10, pb.Pointer_REMOTE, int64(3), []byte("metadata")},
} {
mockOC := mock_overlay.NewMockClient(ctrl)
mockEC := mock_ecclient.NewMockClient(ctrl)
@ -380,18 +379,18 @@ func TestSegmentStoreDeleteRemote(t *testing.T) {
calls := []*gomock.Call{
mockPDB.EXPECT().Get(
gomock.Any(), gomock.Any(),
).Return(&ppb.Pointer{
).Return(&pb.Pointer{
Type: tt.pointerType,
Remote: &ppb.RemoteSegment{
Redundancy: &ppb.RedundancyScheme{
Type: ppb.RedundancyScheme_RS,
Remote: &pb.RemoteSegment{
Redundancy: &pb.RedundancyScheme{
Type: pb.RedundancyScheme_RS,
MinReq: 1,
Total: 2,
RepairThreshold: 1,
SuccessThreshold: 2,
},
PieceId: "here's my piece id",
RemotePieces: []*ppb.RemotePiece{},
RemotePieces: []*pb.RemotePiece{},
},
CreationDate: someTime,
ExpirationDate: someTime,
@ -453,8 +452,8 @@ func TestSegmentStoreList(t *testing.T) {
).Return([]pdb.ListItem{
{
Path: listedPath,
Pointer: &ppb.Pointer{
Type: ppb.Pointer_INLINE,
Pointer: &pb.Pointer{
Type: pb.Pointer_INLINE,
InlineSegment: tt.inlineContent,
CreationDate: someTime,
ExpirationDate: someTime,

View File

@ -14,10 +14,10 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/pb"
ranger "storj.io/storj/pkg/ranger"
"storj.io/storj/pkg/storage/meta"
"storj.io/storj/pkg/storage/segments"
streamspb "storj.io/storj/protos/streams"
)
var mon = monkit.Package()
@ -32,7 +32,7 @@ type Meta struct {
// convertMeta converts segment metadata to stream metadata
func convertMeta(segmentMeta segments.Meta) (Meta, error) {
msi := streamspb.MetaStreamInfo{}
msi := pb.MetaStreamInfo{}
err := proto.Unmarshal(segmentMeta.Data, &msi)
if err != nil {
return Meta{}, err
@ -104,7 +104,7 @@ func (s *streamStore) Put(ctx context.Context, path paths.Path, data io.Reader,
lastSegmentPath := path.Prepend("l")
md := streamspb.MetaStreamInfo{
md := pb.MetaStreamInfo{
NumberOfSegments: totalSegments,
SegmentsSize: s.segmentSize,
LastSegmentSize: lastSegmentSize,
@ -144,7 +144,7 @@ func (s *streamStore) Get(ctx context.Context, path paths.Path) (
return nil, Meta{}, err
}
msi := streamspb.MetaStreamInfo{}
msi := pb.MetaStreamInfo{}
err = proto.Unmarshal(lastSegmentMeta.Data, &msi)
if err != nil {
return nil, Meta{}, err
@ -202,7 +202,7 @@ func (s *streamStore) Delete(ctx context.Context, path paths.Path) (err error) {
return err
}
msi := streamspb.MetaStreamInfo{}
msi := pb.MetaStreamInfo{}
err = proto.Unmarshal(lastSegmentMeta.Data, &msi)
if err != nil {
return err

View File

@ -10,7 +10,7 @@ import (
"google.golang.org/grpc"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/pkg/pb"
)
var (
@ -21,6 +21,6 @@ var (
// Client defines the interface to an transport client.
type Client interface {
DialUnauthenticated(ctx context.Context, addr proto.NodeAddress) (*grpc.ClientConn, error)
DialNode(ctx context.Context, node *proto.Node) (*grpc.ClientConn, error)
DialUnauthenticated(ctx context.Context, addr pb.NodeAddress) (*grpc.ClientConn, error)
DialNode(ctx context.Context, node *pb.Node) (*grpc.ClientConn, error)
}

View File

@ -8,8 +8,8 @@ import (
"google.golang.org/grpc"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
)
// Transport interface structure
@ -23,7 +23,7 @@ func NewClient(identity *provider.FullIdentity) *Transport {
}
// DialNode using the authenticated mode
func (o *Transport) DialNode(ctx context.Context, node *proto.Node) (conn *grpc.ClientConn, err error) {
func (o *Transport) DialNode(ctx context.Context, node *pb.Node) (conn *grpc.ClientConn, err error) {
defer mon.Task()(&ctx)(&err)
if node.Address == nil || node.Address.Address == "" {
@ -38,7 +38,7 @@ func (o *Transport) DialNode(ctx context.Context, node *proto.Node) (conn *grpc.
}
// DialUnauthenticated using unauthenticated mode
func (o *Transport) DialUnauthenticated(ctx context.Context, addr proto.NodeAddress) (conn *grpc.ClientConn, err error) {
func (o *Transport) DialUnauthenticated(ctx context.Context, addr pb.NodeAddress) (conn *grpc.ClientConn, err error) {
defer mon.Task()(&ctx)(&err)
if addr.Address == "" {

View File

@ -8,8 +8,8 @@ import (
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
proto "storj.io/storj/protos/overlay"
)
var ctx = context.Background()
@ -25,10 +25,10 @@ func TestDialNode(t *testing.T) {
}
// node.Address.Address == "" condition test
node := proto.Node{
node := pb.Node{
Id: "DUMMYID1",
Address: &proto.NodeAddress{
Transport: proto.NodeTransport_TCP,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP,
Address: "",
},
}
@ -37,7 +37,7 @@ func TestDialNode(t *testing.T) {
assert.Nil(t, conn)
// node.Address == nil condition test
node = proto.Node{
node = pb.Node{
Id: "DUMMYID2",
Address: nil,
}
@ -46,10 +46,10 @@ func TestDialNode(t *testing.T) {
assert.Nil(t, conn)
// node is valid argument condition test
node = proto.Node{
node = pb.Node{
Id: "DUMMYID3",
Address: &proto.NodeAddress{
Transport: proto.NodeTransport_TCP,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP,
Address: "127.0.0.0:9000",
},
}

View File

@ -1,6 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
//go:generate protoc --go_out=plugins=grpc:. overlay.proto

View File

@ -1,6 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestoreroutes
//go:generate protoc -I ./ piece_store.proto --go_out=plugins=grpc:.

View File

@ -1,6 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package pointerdb
//go:generate protoc --go_out=plugins=grpc:. pointerdb.proto

View File

@ -1,6 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package streams
//go:generate protoc --go_out=plugins=grpc:. meta.proto