storj/pkg/kademlia/routing.go

328 lines
9.9 KiB
Go
Raw Normal View History

2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
Node Identity (#193) * peertls: don't log errors for double close understood that this part of the code is undergoing heavy change right now, but just want to make sure this fix gets incorporated somewhere * git cleanup: node-id stuff * cleanup * rename identity_util.go * wip `CertificateAuthority` refactor * refactoring * gitignore update * wip * Merge remote-tracking branch 'storj/doubleclose' into node-id3 * storj/doubleclose: peertls: don't log errors for double close * add peertls tests & gomports * wip: + refactor + style changes + cleanup + [wip] add version to CA and identity configs + [wip] heavy client setup * refactor * wip: + refactor + style changes + add `CAConfig.Load` + add `CAConfig.Save` * wip: + add `LoadOrCreate` and `Create` to CA and Identity configs + add overwrite to CA and identity configs + heavy client setup + refactor + style changes + cleanup * wip * fixing things * fixing things * wip hc setup * hc setup: + refactor + bugfixing * improvements based on reveiw feedback * goimports * improvements: + responding to review feedback + refactor * feedback-based improvements * feedback-based improvements * feedback-based improvements * feedback-based improvements * feedback-based improvements * feedback-based improvements * cleanup * refactoring CA and Identity structs * Merge branch 'master' into node-id3 * move version field to setup config structs for CA and identity * fix typo * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * Merge branch 'master' into node-id3 * fix gateway setup finally * go imports * fix `FullCertificateAuthority.GenerateIdentity` * cleanup overlay tests * bugfixing * update ca/identity setup * go imports * fix peertls test copy/paste fail * responding to review feedback * setup tweaking * update farmer setup
2018-08-13 09:39:45 +01:00
"encoding/binary"
"fmt"
"sync"
"time"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
const (
// KademliaBucket is the string representing the bucket used for the kademlia routing table k-bucket ids
KademliaBucket = "kbuckets"
// NodeBucket is the string representing the bucket used for the kademlia routing table node ids
NodeBucket = "nodes"
// AntechamberBucket is the string representing the bucket used for the kademlia antechamber nodes
AntechamberBucket = "antechamber"
)
// RoutingErr is the class for all errors pertaining to routing table operations
var RoutingErr = errs.Class("routing table error")
// Bucket IDs exist in the same address space as node IDs
type bucketID = storj.NodeID
2019-01-02 18:57:11 +00:00
var firstBucketID = bucketID{
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
}
var emptyBucketID = bucketID{}
2019-01-29 06:51:07 +00:00
// RoutingTableConfig configures the routing table
type RoutingTableConfig struct {
BucketSize int `help:"size of each Kademlia bucket" default:"20"`
ReplacementCacheSize int `help:"size of Kademlia replacement cache" default:"5"`
}
// RoutingTable implements the RoutingTable interface
type RoutingTable struct {
log *zap.Logger
2019-04-22 10:07:50 +01:00
self *overlay.NodeDossier
kadBucketDB storage.KeyValueStore
nodeBucketDB storage.KeyValueStore
transport *pb.NodeTransport
mutex *sync.Mutex
rcMutex *sync.Mutex
acMutex *sync.Mutex
replacementCache map[bucketID][]*pb.Node
bucketSize int // max number of nodes stored in a kbucket = 20 (k)
rcBucketSize int // replacementCache bucket max length
antechamber storage.KeyValueStore
}
// NewRoutingTable returns a newly configured instance of a RoutingTable
func NewRoutingTable(logger *zap.Logger, localNode *overlay.NodeDossier, kdb, ndb, adb storage.KeyValueStore, config *RoutingTableConfig) (_ *RoutingTable, err error) {
2019-01-29 06:51:07 +00:00
if config == nil || config.BucketSize == 0 || config.ReplacementCacheSize == 0 {
// TODO: handle this more nicely
config = &RoutingTableConfig{
BucketSize: 20,
ReplacementCacheSize: 5,
}
}
rt := &RoutingTable{
log: logger,
self: localNode,
kadBucketDB: kdb,
nodeBucketDB: ndb,
transport: &defaultTransport,
mutex: &sync.Mutex{},
rcMutex: &sync.Mutex{},
acMutex: &sync.Mutex{},
replacementCache: make(map[bucketID][]*pb.Node),
2019-01-29 06:51:07 +00:00
bucketSize: config.BucketSize,
rcBucketSize: config.ReplacementCacheSize,
antechamber: adb,
}
ok, err := rt.addNode(context.TODO(), &localNode.Node)
if !ok || err != nil {
return nil, RoutingErr.New("could not add localNode to routing table: %s", err)
}
return rt, nil
}
// Close closes without closing dependencies
2018-09-11 14:57:12 +01:00
func (rt *RoutingTable) Close() error {
2019-01-29 06:51:07 +00:00
return nil
2018-09-11 14:57:12 +01:00
}
2019-04-22 10:07:50 +01:00
// Local returns the local node
func (rt *RoutingTable) Local() overlay.NodeDossier {
2019-01-08 16:01:22 +00:00
rt.mutex.Lock()
defer rt.mutex.Unlock()
2019-04-22 10:07:50 +01:00
return *rt.self
}
// UpdateSelf updates the local node with the provided info
func (rt *RoutingTable) UpdateSelf(capacity *pb.NodeCapacity) {
rt.mutex.Lock()
defer rt.mutex.Unlock()
if capacity != nil {
rt.self.Capacity = *capacity
}
}
// K returns the currently configured maximum of nodes to store in a bucket
func (rt *RoutingTable) K() int {
return rt.bucketSize
}
// CacheSize returns the total current size of the replacement cache
func (rt *RoutingTable) CacheSize() int {
return rt.rcBucketSize
}
2018-12-18 18:04:46 +00:00
// GetNodes retrieves nodes within the same kbucket as the given node id
// Note: id doesn't need to be stored at time of search
func (rt *RoutingTable) GetNodes(ctx context.Context, id storj.NodeID) ([]*pb.Node, bool) {
defer mon.Task()(&ctx)(nil)
bID, err := rt.getKBucketID(ctx, id)
if err != nil {
2018-12-18 18:04:46 +00:00
return nil, false
}
if bID == (bucketID{}) {
2018-12-18 18:04:46 +00:00
return nil, false
}
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(ctx, bID)
if err != nil {
2018-12-18 18:04:46 +00:00
return nil, false
}
2018-12-18 18:04:46 +00:00
return unmarshaledNodes, true
}
// GetBucketIds returns a storage.Keys type of bucket ID's in the Kademlia instance
func (rt *RoutingTable) GetBucketIds(ctx context.Context) (_ storage.Keys, err error) {
defer mon.Task()(&ctx)(&err)
kbuckets, err := rt.kadBucketDB.List(ctx, nil, 0)
if err != nil {
return nil, err
}
return kbuckets, nil
}
// DumpNodes iterates through all nodes in the nodeBucketDB and marshals them to &pb.Nodes, then returns them
func (rt *RoutingTable) DumpNodes(ctx context.Context) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
var nodes []*pb.Node
var nodeErrors errs.Group
err = rt.iterateNodes(ctx, storj.NodeID{}, func(ctx context.Context, newID storj.NodeID, protoNode []byte) error {
newNode := pb.Node{}
err := proto.Unmarshal(protoNode, &newNode)
if err != nil {
nodeErrors.Add(err)
}
nodes = append(nodes, &newNode)
return nil
}, false)
if err != nil {
nodeErrors.Add(err)
}
return nodes, nodeErrors.Err()
}
// FindNear returns the node corresponding to the provided nodeID
// returns all Nodes (excluding self) closest via XOR to the provided nodeID up to the provided limit
func (rt *RoutingTable) FindNear(ctx context.Context, target storj.NodeID, limit int) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
2019-07-30 19:08:29 +01:00
// initialize a slice of limit+1 to allow for expansion while reordering
closestNodes := make([]*pb.Node, 0, limit+1)
2019-07-30 19:08:29 +01:00
// Insertion sort the nodes by xor
err = rt.iterateNodes(ctx, storj.NodeID{}, func(ctx context.Context, newID storj.NodeID, protoNode []byte) error {
newPos := len(closestNodes)
2019-07-30 19:08:29 +01:00
// compare values starting with the greatest xor to newID in the iteration
for newPos > 0 && compareByXor(closestNodes[newPos-1].Id, newID, target) > 0 {
// decrement newPos until newID has a greater xor (farther away) than closestNode[newPos-1]
// this final newPos is the index at which the newID belongs
newPos--
}
if newPos != limit {
newNode := pb.Node{}
err := proto.Unmarshal(protoNode, &newNode)
if err != nil {
return err
}
2019-04-22 10:07:50 +01:00
closestNodes = append(closestNodes, &newNode)
2019-07-30 19:08:29 +01:00
// if the new node is not the furthest away, insert the node at its correct index
if newPos != len(closestNodes) {
2019-04-22 10:07:50 +01:00
copy(closestNodes[newPos+1:], closestNodes[newPos:])
closestNodes[newPos] = &newNode
if len(closestNodes) > limit {
closestNodes = closestNodes[:limit]
}
}
}
return nil
}, true)
return closestNodes, Error.Wrap(err)
}
// ConnectionSuccess updates or adds a node to the routing table when
// a successful connection is made to the node on the network
func (rt *RoutingTable) ConnectionSuccess(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
// valid to connect to node without ID but don't store connection
if node.Id == (storj.NodeID{}) {
return nil
}
v, err := rt.nodeBucketDB.Get(ctx, storage.Key(node.Id.Bytes()))
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return RoutingErr.New("could not get node %s", err)
}
if v != nil {
err = rt.updateNode(ctx, node)
if err != nil {
return RoutingErr.New("could not update node %s", err)
}
return nil
}
_, err = rt.addNode(ctx, node)
if err != nil {
return RoutingErr.New("could not add node %s", err)
}
return nil
}
// ConnectionFailed removes a node from the routing table when
// a connection fails for the node on the network
func (rt *RoutingTable) ConnectionFailed(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
err = rt.removeNode(ctx, node)
if err != nil {
return RoutingErr.New("could not remove node %s", err)
}
return nil
}
// SetBucketTimestamp records the time of the last node lookup for a bucket
func (rt *RoutingTable) SetBucketTimestamp(ctx context.Context, bIDBytes []byte, now time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
rt.mutex.Lock()
defer rt.mutex.Unlock()
err = rt.createOrUpdateKBucket(ctx, keyToBucketID(bIDBytes), now)
if err != nil {
return NodeErr.New("could not update bucket timestamp %s", err)
}
return nil
}
// GetBucketTimestamp retrieves time of the last node lookup for a bucket
func (rt *RoutingTable) GetBucketTimestamp(ctx context.Context, bIDBytes []byte) (_ time.Time, err error) {
defer mon.Task()(&ctx)(&err)
t, err := rt.kadBucketDB.Get(ctx, bIDBytes)
if err != nil {
return time.Now(), RoutingErr.New("could not get bucket timestamp %s", err)
}
timestamp, _ := binary.Varint(t)
return time.Unix(0, timestamp).UTC(), nil
}
func (rt *RoutingTable) iterateNodes(ctx context.Context, start storj.NodeID, f func(context.Context, storj.NodeID, []byte) error, skipSelf bool) (err error) {
defer mon.Task()(&ctx)(&err)
return rt.nodeBucketDB.Iterate(ctx, storage.IterateOptions{First: storage.Key(start.Bytes()), Recurse: true},
func(ctx context.Context, it storage.Iterator) error {
var item storage.ListItem
for it.Next(ctx, &item) {
nodeID, err := storj.NodeIDFromBytes(item.Key)
if err != nil {
return err
}
if skipSelf && nodeID == rt.self.Id {
continue
}
err = f(ctx, nodeID, item.Value)
if err != nil {
return err
}
}
return nil
},
)
}
// ConnFailure implements the Transport failure function
func (rt *RoutingTable) ConnFailure(ctx context.Context, node *pb.Node, err error) {
err2 := rt.ConnectionFailed(ctx, node)
if err2 != nil {
rt.log.Debug(fmt.Sprintf("error with ConnFailure hook %+v : %+v", err, err2))
}
}
// ConnSuccess implements the Transport success function
func (rt *RoutingTable) ConnSuccess(ctx context.Context, node *pb.Node) {
err := rt.ConnectionSuccess(ctx, node)
if err != nil {
rt.log.Debug("connection success error:", zap.Error(err))
}
}