storj/pkg/kademlia/routing.go

222 lines
6.6 KiB
Go
Raw Normal View History

// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
Node Identity (#193) * peertls: don't log errors for double close understood that this part of the code is undergoing heavy change right now, but just want to make sure this fix gets incorporated somewhere * git cleanup: node-id stuff * cleanup * rename identity_util.go * wip `CertificateAuthority` refactor * refactoring * gitignore update * wip * Merge remote-tracking branch 'storj/doubleclose' into node-id3 * storj/doubleclose: peertls: don't log errors for double close * add peertls tests & gomports * wip: + refactor + style changes + cleanup + [wip] add version to CA and identity configs + [wip] heavy client setup * refactor * wip: + refactor + style changes + add `CAConfig.Load` + add `CAConfig.Save` * wip: + add `LoadOrCreate` and `Create` to CA and Identity configs + add overwrite to CA and identity configs + heavy client setup + refactor + style changes + cleanup * wip * fixing things * fixing things * wip hc setup * hc setup: + refactor + bugfixing * improvements based on reveiw feedback * goimports * improvements: + responding to review feedback + refactor * feedback-based improvements * feedback-based improvements * feedback-based improvements * feedback-based improvements * feedback-based improvements * feedback-based improvements * cleanup * refactoring CA and Identity structs * Merge branch 'master' into node-id3 * move version field to setup config structs for CA and identity * fix typo * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * responding to revieiw feedback * Merge branch 'master' into node-id3 * fix gateway setup finally * go imports * fix `FullCertificateAuthority.GenerateIdentity` * cleanup overlay tests * bugfixing * update ca/identity setup * go imports * fix peertls test copy/paste fail * responding to review feedback * setup tweaking * update farmer setup
2018-08-13 09:39:45 +01:00
"encoding/binary"
"encoding/hex"
"sync"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/dht"
proto "storj.io/storj/protos/overlay"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
)
// RoutingErr is the class for all errors pertaining to routing table operations
var RoutingErr = errs.Class("routing table error")
// RoutingTable implements the RoutingTable interface
type RoutingTable struct {
self *proto.Node
kadBucketDB storage.KeyValueStore
nodeBucketDB storage.KeyValueStore
transport *proto.NodeTransport
mutex *sync.Mutex
replacementCache map[string][]*proto.Node
idLength int // kbucket and node id bit length (SHA256) = 256
bucketSize int // max number of nodes stored in a kbucket = 20 (k)
rcBucketSize int // replacementCache bucket max length
}
//RoutingOptions for configuring RoutingTable
type RoutingOptions struct {
kpath string
npath string
idLength int //TODO (JJ): add checks for > 0
bucketSize int
rcBucketSize int
}
// NewRoutingTable returns a newly configured instance of a RoutingTable
func NewRoutingTable(localNode *proto.Node, options *RoutingOptions) (*RoutingTable, error) {
logger := zap.L()
kdb, err := boltdb.NewClient(logger, options.kpath, boltdb.KBucket)
if err != nil {
return nil, RoutingErr.New("could not create kadBucketDB: %s", err)
}
ndb, err := boltdb.NewClient(logger, options.npath, boltdb.NodeBucket)
if err != nil {
return nil, RoutingErr.New("could not create nodeBucketDB: %s", err)
}
rp := make(map[string][]*proto.Node)
rt := &RoutingTable{
self: localNode,
kadBucketDB: kdb,
nodeBucketDB: ndb,
transport: &defaultTransport,
mutex: &sync.Mutex{},
replacementCache: rp,
idLength: options.idLength,
bucketSize: options.bucketSize,
rcBucketSize: options.rcBucketSize,
}
ok, err := rt.addNode(localNode)
if !ok || err != nil {
return nil, RoutingErr.New("could not add localNode to routing table: %s", err)
}
return rt, nil
}
// Local returns the local nodes ID
func (rt *RoutingTable) Local() proto.Node {
return *rt.self
}
// K returns the currently configured maximum of nodes to store in a bucket
func (rt *RoutingTable) K() int {
return rt.bucketSize
}
// CacheSize returns the total current size of the replacement cache
func (rt *RoutingTable) CacheSize() int {
return rt.rcBucketSize
}
// GetBucket retrieves the corresponding kbucket from node id
// Note: id doesn't need to be stored at time of search
func (rt *RoutingTable) GetBucket(id string) (bucket dht.Bucket, ok bool) {
i, err := hex.DecodeString(id)
if err != nil {
Testcoverage kademlia (#154) * Unit test covarege increased for kademlia pkg go style formatting added Removed DHT param from newTestKademlia method, added comments for Bucket methods that informs that these tests will need to be updated unnecessary comment deleted from newTestKademlia Adjust Segment Store to the updated interface (#160) * Adjust Segment Store to the updated interface * Move /pkg/storage/segment to /pkg/storage/segments * Fix overlay client tests * Revert changes in NewOverlayClient return value * Rename `rem` to `seg` * Implement Meta() captplanet (#159) * captplanet I kind of went overboard this weekend. The major goal of this changeset is to provide an environment for local development where all of the various services can be easily run together. Developing on Storj v3 should be as easy as running a setup command and a run command! To do this, this changeset introduces a new tool called captplanet, which combines the powers of the Overlay Cache, the PointerDB, the PieceStore, Kademlia, the Minio Gateway, etc. Running 40 farmers and a heavy client inside the same process forced a rethinking of the "services" that we had. To avoid confusion by reusing prior terms, this changeset introduces two new types: Providers and Responsibilities. I wanted to avoid as many merge conflicts as possible, so I left the existing Services and code for now, but if people like this route we can clean up the duplication. A Responsibility is a collection of gRPC methods and corresponding state. The following systems are examples of Responsibilities: * Kademlia * OverlayCache * PointerDB * StatDB * PieceStore * etc. A Provider is a collection of Responsibilities that share an Identity, such as: * The heavy client * The farmer * The gateway An Identity is a public/private key pair, a node id, etc. Farmers all need different Identities, so captplanet needs to support running multiple concurrent Providers with different Identities. Each Responsibility and Provider should allow for configuration of multiple copies on its own so creating Responsibilities and Providers use a new workflow. To make a Responsibility, one should create a "config" struct, such as: ``` type Config struct { RepairThreshold int `help:"If redundancy falls below this number of pieces, repair is triggered" default:"30"` SuccessThreshold int `help:"If redundancy is above this number then no additional uploads are needed" default:"40"` } ``` To use "config" structs, this changeset introduces another new library called 'cfgstruct', which allows for the configuration of arbitrary structs through flagsets, and thus through cobra and viper. cfgstruct relies on Go's "struct tags" feature to document help information and default values. Config structs can be configured via cfgstruct.Bind for binding the struct to a flagset. Because this configuration system makes setup and configuration easier *in general*, additional commands are provided that allow for easy standup of separate Providers. Please make sure to check out: * cmd/captplanet/farmer/main.go (a new farmer binary) * cmd/captplanet/hc/main.go (a new heavy client binary) * cmd/captplanet/gw/main.go (a new minio gateway binary) Usage: ``` $ go install -v storj.io/storj/cmd/captplanet $ captplanet setup $ captplanet run ``` Configuration is placed by default in `~/.storj/capt/` Other changes: * introduces new config structs for currently existing Responsibilities that conform to the new Responsibility interface. Please see the `pkg/*/config.go` files for examples. * integrates the PointerDB API key with other global configuration via flags, instead of through environment variables through viper like it's been doing. (ultimately this should also change to use the PointerDB config struct but this is an okay shortterm solution). * changes the Overlay cache to use a URL for database configuration instead of separate redis and bolt config settings. * stubs out some peer identity skeleton code (but not the meat). * Fixes the SegmentStore to use the overlay client and pointerdb clients instead of gRPC client code directly * Leaves a very clear spot where we need to tie the object to stream to segment store together. There's sort of a "golden spike" opportunity to connect all the train tracks together at the bottom of pkg/miniogw/config.go, labeled with a bunch of TODOs. Future stuff: * I now prefer this design over the original pkg/process.Service thing I had been pushing before (sorry!) * The experience of trying to have multiple farmers configurable concurrently led me to prefer config structs over global flags (I finally came around) or using viper directly. I think global flags are okay sometimes but in general going forward we should try and get all relevant config into config structs. * If you all like this direction, I think we can go delete my old Service interfaces and a bunch of flags and clean up a bunch of stuff. * If you don't like this direction, it's no sweat at all, and despite how much code there is here I'm not very tied to any of this! Considering a lot of this was written between midnight and 6 am, it might not be any good! * bind tests Add files for testing builds in docker (#161) * Add files for testing builds in docker * Make tests check for redis running before trying to start redis-server, which may not exist. * Clean redis server before any tests use it. * Add more debugging for travis * Explicitly requiring redis for travis pkg/provider: with pkg/provider merged, make a single heavy client binary, gateway binary, and deprecate old services (#165) * pkg/provider: with pkg/provider merged, make a single heavy client binary and deprecate old services * add setup to gw binary too * captplanet: output what addresses everything is listening on * revert peertls/io_util changes * define config flag across all commands * use trimsuffix fix docker makefile (#170) * fix makefile protos: update protobufs with go generate (#169) the import for timestamp and duration should use the path provided by a standard protocol buffer library installation Refactor List in PointerDB (#163) * Refactor List in Pointer DB * Fix pointerdb-client example * Fix issue in Path type related to empty paths * Test for the PointerDB service with some fixes * Fixed debug message in example: trancated --> more * GoDoc comments for unexported methods * TODO comment to check if Put is overwriting * Log warning if protobuf timestamp cannot be converted * TODO comment to make ListPageLimit configurable * Rename 'segment' package to 'segments' to reflect folder name Minio integration with Object store (#156) * initial WIP integration with Object store * List WIP * minio listobject function changes complete * Code review changes and work in progress for the mock objectstore unit testing cases * Warning fix redeclaration of err * Warning fix redeclaration of err * code review comments & unit testing inprogress * fix compilation bug * Fixed code review comments & added GetObject Mock test case * rearraged the mock test file and gateway storj test file in to the proper directory * added the missing file * code clean up * fix lint error on the mock generated code * modified per code review comments * added the PutObject mock test case * added the GetObjectInfo mock test case * added listobject mock test case * fixed package from storj to miniogw * resolved the gateway-storj.go initialization merge conflict update readme (#174) added assertion for unused errors (#152) merging this PR to avoid future issues updating github user to personal account (#171) Test coverage ranger (#168) * Fixed go panic for corner case * Initial test coverage for ranger pkg streamstore: add passthrough implementation (#176) this doesn't implement streamstore, this just allows us to try and get the june demo working again in the meantime StatDB (#144) * add statdb proto and example client * server logic * update readme * remove boltdb from service.go * sqlite3 * add statdb server executable file * create statdb node table if it does not exist already * get UpdateBatch working * update based on jt review * remove some commented lines * fix linting issues * reformat * apiKey -> APIKey * update statdb client apiKey->APIKey Update README.md Update README.md overlay: correct dockerfile db (#179) cmd/hc, cmd/gw, cmd/captplanet: simplify setup/run commands (#178) also allows much more customization of services within captain planet, such as reconfiguring the overlay service to use redis pkg/process: don't require json formatting (#177) Cleanup metadata across layers (#180) * Cleanup metadata across layers * Fix pointer db tests Kademlia Routing Table (#164) * adds comment * runs deps * creates boltdb kademlia routing table * protobuf updates * adds reverselist to mockkeyvaluestore interface * xor wip * xor wip * fixes xor sort * runs go fmt * fixes * goimports again * trying to fix travis tests * fixes mock tests Ranger refactoring (#158) * Fixed go panic for corner case * Cosmetic changes, and small error fixes miniogw: log all errors (#182) * miniogw: log all errors * tests added * doc comment to satisfy linter * fix test failure Jennifer added to CLA list * Temporary fix for storage/redis list method test
2018-08-02 19:36:57 +01:00
return &KBucket{}, false
}
bucketID, err := rt.getKBucketID(i)
if err != nil {
return &KBucket{}, false
}
if bucketID == nil {
return &KBucket{}, false
}
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(bucketID)
if err != nil {
return &KBucket{}, false
}
return &KBucket{nodes: unmarshaledNodes}, true
}
// GetBuckets retrieves all buckets from the local node
func (rt *RoutingTable) GetBuckets() (k []dht.Bucket, err error) {
bs := []dht.Bucket{}
kbuckets, err := rt.kadBucketDB.List(nil, 0)
if err != nil {
return bs, RoutingErr.New("could not get bucket ids %s", err)
}
for _, v := range kbuckets {
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(v)
if err != nil {
return bs, err
}
bs = append(bs, &KBucket{nodes: unmarshaledNodes})
}
return bs, nil
}
// FindNear returns the node corresponding to the provided nodeID if present in the routing table
// otherwise returns all Nodes closest via XOR to the provided nodeID up to the provided limit
func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*proto.Node, error) {
//if id is in the routing table
n, err := rt.nodeBucketDB.Get(id.Bytes())
if n != nil {
ns, err := unmarshalNodes(storage.Keys{id.Bytes()}, []storage.Value{n})
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not unmarshal node %s", err)
}
return ns, nil
}
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return []*proto.Node{}, RoutingErr.New("could not get key from rt %s", err)
}
// if id is not in the routing table
nodeIDs, err := rt.nodeBucketDB.List(nil, 0)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not get node ids %s", err)
}
sortedIDs := sortByXOR(nodeIDs, id.Bytes())
if len(sortedIDs) >= limit {
sortedIDs = sortedIDs[:limit]
}
ids, serializedNodes, err := rt.getNodesFromIDs(sortedIDs)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not get nodes %s", err)
}
unmarshaledNodes, err := unmarshalNodes(ids, serializedNodes)
if err != nil {
return []*proto.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
}
return unmarshaledNodes, nil
}
// ConnectionSuccess updates or adds a node to the routing table when
// a successful connection is made to the node on the network
func (rt *RoutingTable) ConnectionSuccess(node *proto.Node) error {
v, err := rt.nodeBucketDB.Get(storage.Key(node.Id))
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return RoutingErr.New("could not get node %s", err)
}
if v != nil {
err = rt.updateNode(node)
if err != nil {
return RoutingErr.New("could not update node %s", err)
}
return nil
}
_, err = rt.addNode(node)
if err != nil {
return RoutingErr.New("could not add node %s", err)
}
return nil
}
// ConnectionFailed removes a node from the routing table when
// a connection fails for the node on the network
func (rt *RoutingTable) ConnectionFailed(node *proto.Node) error {
nodeID := storage.Key(node.Id)
bucketID, err := rt.getKBucketID(nodeID)
if err != nil {
return RoutingErr.New("could not get k bucket %s", err)
}
err = rt.removeNode(bucketID, nodeID)
if err != nil {
return RoutingErr.New("could not remove node %s", err)
}
return nil
}
// SetBucketTimestamp updates the last updated time for a bucket
func (rt *RoutingTable) SetBucketTimestamp(id string, now time.Time) error {
rt.mutex.Lock()
defer rt.mutex.Unlock()
err := rt.createOrUpdateKBucket([]byte(id), now)
if err != nil {
return NodeErr.New("could not update bucket timestamp %s", err)
}
return nil
}
// GetBucketTimestamp retrieves the last updated time for a bucket
func (rt *RoutingTable) GetBucketTimestamp(id string, bucket dht.Bucket) (time.Time, error) {
t, err := rt.kadBucketDB.Get([]byte(id))
if err != nil {
return time.Now(), RoutingErr.New("could not get bucket timestamp %s", err)
}
timestamp, _ := binary.Varint(t)
return time.Unix(0, timestamp).UTC(), nil
}