pkg/dht: remove (#2599)
This commit is contained in:
parent
665a1e386f
commit
63c1a050fc
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@ -14,5 +14,4 @@
|
||||
/pkg/datarepair/ @jenlij
|
||||
|
||||
# kademlia
|
||||
/pkg/dht/ @jenlij
|
||||
/pkg/kademlia/ @jenlij
|
||||
|
@ -1,49 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
// DHT is the interface for the DHT in the Storj network
|
||||
type DHT interface {
|
||||
FindNear(ctx context.Context, start storj.NodeID, limit int) ([]*pb.Node, error)
|
||||
Bootstrap(ctx context.Context) error
|
||||
Ping(ctx context.Context, node pb.Node) (pb.Node, error)
|
||||
FindNode(ctx context.Context, ID storj.NodeID) (pb.Node, error)
|
||||
Seen() []*pb.Node
|
||||
}
|
||||
|
||||
// RoutingTable contains information on nodes we have locally
|
||||
type RoutingTable interface {
|
||||
// local params
|
||||
Local() overlay.NodeDossier
|
||||
K() int
|
||||
CacheSize() int
|
||||
GetBucketIds(context.Context) (storage.Keys, error)
|
||||
FindNear(ctx context.Context, id storj.NodeID, limit int) ([]*pb.Node, error)
|
||||
ConnectionSuccess(ctx context.Context, node *pb.Node) error
|
||||
ConnectionFailed(ctx context.Context, node *pb.Node) error
|
||||
// these are for refreshing
|
||||
SetBucketTimestamp(ctx context.Context, id []byte, now time.Time) error
|
||||
GetBucketTimestamp(ctx context.Context, id []byte) (time.Time, error)
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Bucket is a set of methods to act on kademlia k buckets
|
||||
type Bucket interface {
|
||||
Routing() []pb.Node
|
||||
Cache() []pb.Node
|
||||
// TODO: should this be a NodeID?
|
||||
Midpoint() string
|
||||
Nodes() []*pb.Node
|
||||
}
|
@ -13,14 +13,14 @@ import (
|
||||
|
||||
// Inspector is a gRPC service for inspecting kademlia internals
|
||||
type Inspector struct {
|
||||
dht *Kademlia
|
||||
kademlia *Kademlia
|
||||
identity *identity.FullIdentity
|
||||
}
|
||||
|
||||
// NewInspector creates an Inspector
|
||||
func NewInspector(kad *Kademlia, identity *identity.FullIdentity) *Inspector {
|
||||
func NewInspector(kademlia *Kademlia, identity *identity.FullIdentity) *Inspector {
|
||||
return &Inspector{
|
||||
dht: kad,
|
||||
kademlia: kademlia,
|
||||
identity: identity,
|
||||
}
|
||||
}
|
||||
@ -30,7 +30,7 @@ func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest)
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: this is definitely the wrong way to get this
|
||||
kadNodes, err := srv.dht.FindNear(ctx, srv.identity.ID, 100000)
|
||||
kadNodes, err := srv.kademlia.FindNear(ctx, srv.identity.ID, 100000)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -43,7 +43,7 @@ func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest)
|
||||
// GetBuckets returns all kademlia buckets for current kademlia instance
|
||||
func (srv *Inspector) GetBuckets(ctx context.Context, req *pb.GetBucketsRequest) (_ *pb.GetBucketsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
b, err := srv.dht.GetBucketIds(ctx)
|
||||
b, err := srv.kademlia.GetBucketIds(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -64,7 +64,7 @@ func (srv *Inspector) FindNear(ctx context.Context, req *pb.FindNearRequest) (_
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
start := req.Start
|
||||
limit := req.Limit
|
||||
nodes, err := srv.dht.FindNear(ctx, start, int(limit))
|
||||
nodes, err := srv.kademlia.FindNear(ctx, start, int(limit))
|
||||
if err != nil {
|
||||
return &pb.FindNearResponse{}, err
|
||||
}
|
||||
@ -76,7 +76,7 @@ func (srv *Inspector) FindNear(ctx context.Context, req *pb.FindNearRequest) (_
|
||||
// PingNode sends a PING RPC to the provided node ID in the Kad network.
|
||||
func (srv *Inspector) PingNode(ctx context.Context, req *pb.PingNodeRequest) (_ *pb.PingNodeResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = srv.dht.Ping(ctx, pb.Node{
|
||||
_, err = srv.kademlia.Ping(ctx, pb.Node{
|
||||
Id: req.Id,
|
||||
Address: &pb.NodeAddress{
|
||||
Address: req.Address,
|
||||
@ -98,7 +98,7 @@ func (srv *Inspector) LookupNode(ctx context.Context, req *pb.LookupNodeRequest)
|
||||
if err != nil {
|
||||
return &pb.LookupNodeResponse{}, err
|
||||
}
|
||||
node, err := srv.dht.FindNode(ctx, id)
|
||||
node, err := srv.kademlia.FindNode(ctx, id)
|
||||
if err != nil {
|
||||
return &pb.LookupNodeResponse{}, err
|
||||
}
|
||||
@ -111,7 +111,7 @@ func (srv *Inspector) LookupNode(ctx context.Context, req *pb.LookupNodeRequest)
|
||||
// DumpNodes returns all of the nodes in the routing table database.
|
||||
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (_ *pb.DumpNodesResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
nodes, err := srv.dht.DumpNodes(ctx)
|
||||
nodes, err := srv.kademlia.DumpNodes(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -124,7 +124,7 @@ func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (
|
||||
// NodeInfo sends a PING RPC to a node and returns its local info.
|
||||
func (srv *Inspector) NodeInfo(ctx context.Context, req *pb.NodeInfoRequest) (_ *pb.NodeInfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
info, err := srv.dht.FetchInfo(ctx, pb.Node{
|
||||
info, err := srv.kademlia.FetchInfo(ctx, pb.Node{
|
||||
Id: req.Id,
|
||||
Address: req.Address,
|
||||
})
|
||||
@ -142,7 +142,7 @@ func (srv *Inspector) NodeInfo(ctx context.Context, req *pb.NodeInfoRequest) (_
|
||||
// GetBucketList returns the list of buckets with their routing nodes and their cached nodes
|
||||
func (srv *Inspector) GetBucketList(ctx context.Context, req *pb.GetBucketListRequest) (_ *pb.GetBucketListResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
bucketIds, err := srv.dht.GetBucketIds(ctx)
|
||||
bucketIds, err := srv.kademlia.GetBucketIds(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -151,11 +151,11 @@ func (srv *Inspector) GetBucketList(ctx context.Context, req *pb.GetBucketListRe
|
||||
|
||||
for i, b := range bucketIds {
|
||||
bucketID := keyToBucketID(b)
|
||||
routingNodes, err := srv.dht.GetNodesWithinKBucket(ctx, bucketID)
|
||||
routingNodes, err := srv.kademlia.GetNodesWithinKBucket(ctx, bucketID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cachedNodes := srv.dht.GetCachedNodesWithinKBucket(bucketID)
|
||||
cachedNodes := srv.kademlia.GetCachedNodesWithinKBucket(bucketID)
|
||||
buckets[i] = &pb.GetBucketListResponse_Bucket{
|
||||
BucketId: keyToBucketID(b),
|
||||
RoutingNodes: routingNodes,
|
||||
|
@ -36,7 +36,7 @@ var (
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// Kademlia is an implementation of kademlia adhering to the DHT interface.
|
||||
// Kademlia is an implementation of kademlia network.
|
||||
type Kademlia struct {
|
||||
log *zap.Logger
|
||||
alpha int // alpha is a system wide concurrency parameter
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/pkg/dht"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
@ -54,7 +53,7 @@ type Grapher interface {
|
||||
Graph(io.Writer) error
|
||||
}
|
||||
|
||||
func SaveGraph(table dht.RoutingTable) {
|
||||
func SaveGraph(table interface{}) {
|
||||
if table, ok := table.(Grapher); ok {
|
||||
fh, err := os.Create(fmt.Sprintf("routing-graph-%003d.dot", atomic.AddInt64(graphCounter, 1)))
|
||||
if err != nil {
|
||||
|
@ -10,15 +10,24 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/pkg/dht"
|
||||
"storj.io/storj/pkg/kademlia/testrouting"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
type routingCtor func(context.Context, storj.NodeID, int, int, int) dht.RoutingTable
|
||||
// RoutingTableInterface contains information on nodes we have locally
|
||||
type RoutingTableInterface interface {
|
||||
K() int
|
||||
CacheSize() int
|
||||
FindNear(ctx context.Context, id storj.NodeID, limit int) ([]*pb.Node, error)
|
||||
ConnectionSuccess(ctx context.Context, node *pb.Node) error
|
||||
ConnectionFailed(ctx context.Context, node *pb.Node) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
func newRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, allowedFailures int) dht.RoutingTable {
|
||||
type routingCtor func(context.Context, storj.NodeID, int, int, int) RoutingTableInterface
|
||||
|
||||
func newRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, allowedFailures int) RoutingTableInterface {
|
||||
if allowedFailures != 0 {
|
||||
panic("failure counting currently unsupported")
|
||||
}
|
||||
@ -28,7 +37,7 @@ func newRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, a
|
||||
})
|
||||
}
|
||||
|
||||
func newTestRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, allowedFailures int) dht.RoutingTable {
|
||||
func newTestRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, allowedFailures int) RoutingTableInterface {
|
||||
return testrouting.New(self, bucketSize, cacheSize, allowedFailures)
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/dht"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
@ -59,9 +58,6 @@ func New(self storj.NodeID, bucketSize, cacheSize, allowedFailures int) *Table {
|
||||
}
|
||||
}
|
||||
|
||||
// make sure the Table implements the right interface
|
||||
var _ dht.RoutingTable = (*Table)(nil)
|
||||
|
||||
// K returns the Table's routing depth, or Kademlia k value
|
||||
func (t *Table) K() int { return t.bucketSize }
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user