Delete Bootstrap and Kademlia (#2974)

This commit is contained in:
Jennifer Li Johnson 2019-10-04 16:48:41 -04:00 committed by GitHub
parent 4fab22d691
commit 7ceaabb18e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
86 changed files with 276 additions and 8301 deletions

View File

@ -53,7 +53,7 @@ pipeline {
stage('Tests') {
environment {
STORJ_POSTGRES_TEST = 'postgres://postgres@localhost/teststorj?sslmode=disable'
COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=-coverpkg=storj.io/storj/bootstrap/...,storj.io/storj/internal/...,storj.io/storj/lib/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/uplink/...,storj.io/storj/versioncontrol/...'}"
COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=-coverpkg=storj.io/storj/internal/...,storj.io/storj/lib/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/uplink/...,storj.io/storj/versioncontrol/...'}"
}
steps {
sh 'psql -U postgres -c \'create database teststorj;\''

View File

@ -85,7 +85,7 @@ build-npm:
.PHONY: install-sim
install-sim: ## install storj-sim
@echo "Running ${@}"
@go install -race -v storj.io/storj/cmd/storj-sim storj.io/storj/cmd/versioncontrol storj.io/storj/cmd/bootstrap storj.io/storj/cmd/satellite storj.io/storj/cmd/storagenode storj.io/storj/cmd/uplink storj.io/storj/cmd/gateway storj.io/storj/cmd/identity storj.io/storj/cmd/certificates
@go install -race -v storj.io/storj/cmd/storj-sim storj.io/storj/cmd/versioncontrol storj.io/storj/cmd/satellite storj.io/storj/cmd/storagenode storj.io/storj/cmd/uplink storj.io/storj/cmd/gateway storj.io/storj/cmd/identity storj.io/storj/cmd/certificates
##@ Test
@ -134,19 +134,9 @@ test-sim-backwards-compatible: ## Test uploading a file with lastest release (je
##@ Build
.PHONY: images
images: bootstrap-image gateway-image satellite-image storagenode-image uplink-image versioncontrol-image ## Build bootstrap, gateway, satellite, storagenode, uplink, and versioncontrol Docker images
images: gateway-image satellite-image storagenode-image uplink-image versioncontrol-image ## Build gateway, satellite, storagenode, uplink, and versioncontrol Docker images
echo Built version: ${TAG}
.PHONY: bootstrap-image
bootstrap-image: bootstrap_linux_arm bootstrap_linux_arm64 bootstrap_linux_amd64 ## Build bootstrap Docker image
${DOCKER_BUILD} --pull=true -t storjlabs/bootstrap:${TAG}${CUSTOMTAG}-amd64 \
-f cmd/bootstrap/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/bootstrap:${TAG}${CUSTOMTAG}-arm32v6 \
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm32v6 \
-f cmd/bootstrap/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/bootstrap:${TAG}${CUSTOMTAG}-aarch64 \
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=aarch64 \
-f cmd/bootstrap/Dockerfile .
.PHONY: gateway-image
gateway-image: gateway_linux_arm gateway_linux_arm64 gateway_linux_amd64 ## Build gateway Docker image
${DOCKER_BUILD} --pull=true -t storjlabs/gateway:${TAG}${CUSTOMTAG}-amd64 \
@ -223,10 +213,6 @@ binary:
[ "${FILEEXT}" = ".exe" ] && storj-sign release/${TAG}/$(COMPONENT)_${GOOS}_${GOARCH}${FILEEXT} || echo "Skipping signing"
rm -f release/${TAG}/${COMPONENT}_${GOOS}_${GOARCH}.zip
.PHONY: bootstrap_%
bootstrap_%:
GOOS=$(word 2, $(subst _, ,$@)) GOARCH=$(word 3, $(subst _, ,$@)) COMPONENT=bootstrap $(MAKE) binary
$(MAKE) binary-check COMPONENT=bootstrap GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
.PHONY: gateway_%
gateway_%:
GOOS=$(word 2, $(subst _, ,$@)) GOARCH=$(word 3, $(subst _, ,$@)) COMPONENT=gateway $(MAKE) binary
@ -264,11 +250,12 @@ linksharing_%:
storagenode-updater_%:
GOOS=$(word 2, $(subst _, ,$@)) GOARCH=$(word 3, $(subst _, ,$@)) COMPONENT=storagenode-updater $(MAKE) binary
COMPONENTLIST := bootstrap certificates gateway identity inspector linksharing satellite storagenode storagenode-updater uplink versioncontrol
COMPONENTLIST := certificates gateway identity inspector linksharing satellite storagenode storagenode-updater uplink versioncontrol
OSARCHLIST := darwin_amd64 linux_amd64 linux_arm linux_arm64 windows_amd64 freebsd_amd64
BINARIES := $(foreach C,$(COMPONENTLIST),$(foreach O,$(OSARCHLIST),$C_$O))
.PHONY: binaries
binaries: ${BINARIES} ## Build bootstrap, certificates, gateway, identity, inspector, linksharing, satellite, storagenode, uplink, and versioncontrol binaries (jenkins)
binaries: ${BINARIES} ## Build certificates, gateway, identity, inspector, linksharing, satellite, storagenode, uplink, and versioncontrol binaries (jenkins)
.PHONY: libuplink
libuplink:
@ -292,7 +279,7 @@ deploy: ## Update Kubernetes deployments in staging (jenkins)
push-images: ## Push Docker images to Docker Hub (jenkins)
# images have to be pushed before a manifest can be created
# satellite
for c in bootstrap gateway satellite storagenode uplink versioncontrol ; do \
for c in gateway satellite storagenode uplink versioncontrol ; do \
docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-amd64 \
&& docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-arm32v6 \
&& docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-aarch64 \
@ -324,7 +311,6 @@ binaries-clean: ## Remove all local release binaries (jenkins)
.PHONY: clean-images
clean-images:
-docker rmi storjlabs/bootstrap:${TAG}${CUSTOMTAG}
-docker rmi storjlabs/gateway:${TAG}${CUSTOMTAG}
-docker rmi storjlabs/satellite:${TAG}${CUSTOMTAG}
-docker rmi storjlabs/storagenode:${TAG}${CUSTOMTAG}

View File

@ -1,67 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapdb
import (
"github.com/zeebo/errs"
"storj.io/storj/bootstrap"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/teststore"
)
var _ bootstrap.DB = (*DB)(nil)
// Config configures storage node database
type Config struct {
Kademlia string
}
// DB contains access to different database tables
type DB struct {
kdb, ndb, adb storage.KeyValueStore
}
// New creates a new master database for storage node
func New(config Config) (*DB, error) {
dbs, err := boltdb.NewShared(config.Kademlia, kademlia.KademliaBucket, kademlia.NodeBucket, kademlia.AntechamberBucket)
if err != nil {
return nil, err
}
return &DB{
kdb: dbs[0],
ndb: dbs[1],
adb: dbs[2],
}, nil
}
// NewInMemory creates new in-memory master database for storage node
// TODO: still stores data on disk
func NewInMemory() (*DB, error) {
return &DB{
kdb: teststore.New(),
ndb: teststore.New(),
adb: teststore.New(),
}, nil
}
// CreateTables initializes the database
func (db *DB) CreateTables() error { return nil }
// Close closes any resources.
func (db *DB) Close() error {
return errs.Combine(
db.kdb.Close(),
db.ndb.Close(),
db.adb.Close(),
)
}
// RoutingTable returns kademlia routing table
func (db *DB) RoutingTable() (kdb, ndb, adb storage.KeyValueStore) {
return db.kdb, db.ndb, db.adb
}

View File

@ -1,48 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapql
import (
"github.com/graphql-go/graphql"
"storj.io/storj/bootstrap/bootstrapweb"
"storj.io/storj/pkg/storj"
)
const (
// Query is immutable graphql request
Query = "query"
// IsNodeUpQuery is a query name for checking if node is up
IsNodeUpQuery = "isNodeUp"
// NodeID is a field name for nodeID
NodeID = "nodeID"
)
// rootQuery creates query for graphql
func rootQuery(service *bootstrapweb.Service) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{
Name: Query,
Fields: graphql.Fields{
IsNodeUpQuery: &graphql.Field{
Type: graphql.Boolean,
Args: graphql.FieldConfigArgument{
NodeID: &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
inputNodeID, _ := p.Args[NodeID].(string)
nodeID, err := storj.NodeIDFromString(inputNodeID)
if err != nil {
return false, err
}
return service.IsNodeAvailable(p.Context, nodeID)
},
},
},
})
}

View File

@ -1,24 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapql
import (
"github.com/graphql-go/graphql"
"storj.io/storj/bootstrap/bootstrapweb"
)
// CreateSchema creates a schema for bootstrap graphql api
func CreateSchema(service *bootstrapweb.Service) (schema graphql.Schema, err error) {
creator := TypeCreator{}
err = creator.Create(service)
if err != nil {
return
}
return graphql.NewSchema(graphql.SchemaConfig{
Query: creator.RootQuery(),
})
}

View File

@ -1,38 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapql
import (
"github.com/graphql-go/graphql"
"storj.io/storj/bootstrap/bootstrapweb"
)
// Types return graphql type objects
type Types interface {
RootQuery() *graphql.Object
}
// TypeCreator handles graphql type creation and error checking
type TypeCreator struct {
query *graphql.Object
}
// Create create types and check for error
func (c *TypeCreator) Create(service *bootstrapweb.Service) error {
// root objects
c.query = rootQuery(service)
err := c.query.Error()
if err != nil {
return err
}
return nil
}
// RootQuery returns instance of query *graphql.Object
func (c *TypeCreator) RootQuery() *graphql.Object {
return c.query
}

View File

@ -1,136 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapserver
import (
"context"
"encoding/json"
"net"
"net/http"
"path/filepath"
"github.com/graphql-go/graphql"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"storj.io/storj/bootstrap/bootstrapweb"
"storj.io/storj/bootstrap/bootstrapweb/bootstrapserver/bootstrapql"
)
const (
contentType = "Content-Type"
applicationJSON = "application/json"
applicationGraphql = "application/graphql"
)
// Error is bootstrap web error type
var Error = errs.Class("bootstrap web error")
// Config contains configuration for bootstrap web server
type Config struct {
Address string `help:"server address of the graphql api gateway and frontend app" default:"127.0.0.1:8082"`
StaticDir string `help:"path to static resources" default:""`
}
// Server represents bootstrap web server
type Server struct {
log *zap.Logger
config Config
service *bootstrapweb.Service
listener net.Listener
schema graphql.Schema
server http.Server
}
// NewServer creates new instance of bootstrap web server
func NewServer(logger *zap.Logger, config Config, service *bootstrapweb.Service, listener net.Listener) *Server {
server := Server{
log: logger,
service: service,
config: config,
listener: listener,
}
mux := http.NewServeMux()
fs := http.FileServer(http.Dir(server.config.StaticDir))
mux.Handle("/api/graphql/v0", http.HandlerFunc(server.grapqlHandler))
if server.config.StaticDir != "" {
mux.Handle("/", http.HandlerFunc(server.appHandler))
mux.Handle("/static/", http.StripPrefix("/static", fs))
}
server.server = http.Server{
Handler: mux,
}
return &server
}
// appHandler is web app http handler function
func (s *Server) appHandler(w http.ResponseWriter, req *http.Request) {
http.ServeFile(w, req, filepath.Join(s.config.StaticDir, "dist", "public", "index.html"))
}
// grapqlHandler is graphql endpoint http handler function
func (s *Server) grapqlHandler(w http.ResponseWriter, req *http.Request) {
w.Header().Set(contentType, applicationJSON)
query, err := getQuery(req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
result := graphql.Do(graphql.Params{
Schema: s.schema,
Context: context.Background(),
RequestString: query.Query,
VariableValues: query.Variables,
OperationName: query.OperationName,
RootObject: make(map[string]interface{}),
})
err = json.NewEncoder(w).Encode(result)
if err != nil {
s.log.Error(err.Error())
return
}
sugar := s.log.Sugar()
sugar.Debug(result)
}
// Run starts the server that host webapp and api endpoint
func (s *Server) Run(ctx context.Context) error {
var err error
s.schema, err = bootstrapql.CreateSchema(s.service)
if err != nil {
return Error.Wrap(err)
}
ctx, cancel := context.WithCancel(ctx)
var group errgroup.Group
group.Go(func() error {
<-ctx.Done()
return s.server.Shutdown(context.Background())
})
group.Go(func() error {
defer cancel()
return s.server.Serve(s.listener)
})
return group.Wait()
}
// Close closes server and underlying listener
func (s *Server) Close() error {
return s.server.Close()
}

View File

@ -1,49 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapserver
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/zeebo/errs"
"storj.io/storj/bootstrap/bootstrapweb/bootstrapserver/bootstrapql"
)
// JSON request from graphql clients
type graphqlJSON struct {
Query string
OperationName string
Variables map[string]interface{}
}
// getQuery retrieves graphql query from request
func getQuery(req *http.Request) (query graphqlJSON, err error) {
switch req.Method {
case http.MethodGet:
query.Query = req.URL.Query().Get(bootstrapql.Query)
return query, nil
case http.MethodPost:
return queryPOST(req)
default:
return query, errs.New("wrong http request type")
}
}
// queryPOST retrieves graphql query from POST request
func queryPOST(req *http.Request) (query graphqlJSON, err error) {
switch typ := req.Header.Get(contentType); typ {
case applicationGraphql:
body, err := ioutil.ReadAll(req.Body)
query.Query = string(body)
return query, errs.Combine(err, req.Body.Close())
case applicationJSON:
err := json.NewDecoder(req.Body).Decode(&query)
return query, errs.Combine(err, req.Body.Close())
default:
return query, errs.New("can't parse request body of type %s", typ)
}
}

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrapweb
import (
"context"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
)
// Service is handling bootstrap related logic
type Service struct {
log *zap.Logger
kademlia *kademlia.Kademlia
}
// NewService returns new instance of Service
func NewService(log *zap.Logger, kademlia *kademlia.Kademlia) (*Service, error) {
if log == nil {
return nil, errs.New("log can't be nil")
}
if kademlia == nil {
return nil, errs.New("kademlia can't be nil")
}
return &Service{log: log, kademlia: kademlia}, nil
}
// IsNodeAvailable is a method for checking if node is up
func (s *Service) IsNodeAvailable(ctx context.Context, nodeID pb.NodeID) (bool, error) {
_, err := s.kademlia.FetchPeerIdentity(ctx, nodeID)
isNodeAvailable := err == nil
return isNodeAvailable, err
}

View File

@ -1,266 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package bootstrap
import (
"context"
"net"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"storj.io/storj/bootstrap/bootstrapweb"
"storj.io/storj/bootstrap/bootstrapweb/bootstrapserver"
"storj.io/storj/internal/errs2"
"storj.io/storj/internal/version"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/extensions"
"storj.io/storj/pkg/peertls/tlsopts"
"storj.io/storj/pkg/rpc"
"storj.io/storj/pkg/server"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
// DB is the master database for Boostrap Node
type DB interface {
// CreateTables initializes the database
CreateTables() error
// Close closes the database
Close() error
// TODO: use better interfaces
RoutingTable() (kdb, ndb, adb storage.KeyValueStore)
}
// Config is all the configuration parameters for a Bootstrap Node
type Config struct {
Identity identity.Config
Server server.Config
Kademlia kademlia.Config
Web bootstrapserver.Config
Version version.Config
}
// Verify verifies whether configuration is consistent and acceptable.
func (config *Config) Verify(log *zap.Logger) error {
return config.Kademlia.Verify(log)
}
// Peer is the representation of a Bootstrap Node.
type Peer struct {
// core dependencies
Log *zap.Logger
Identity *identity.FullIdentity
DB DB
Dialer rpc.Dialer
Server *server.Server
Version *version.Service
// services and endpoints
Kademlia struct {
RoutingTable *kademlia.RoutingTable
Service *kademlia.Kademlia
Endpoint *kademlia.Endpoint
Inspector *kademlia.Inspector
}
// Web server with web UI
Web struct {
Listener net.Listener
Service *bootstrapweb.Service
Endpoint *bootstrapserver.Server
}
}
// New creates a new Bootstrap Node.
func New(log *zap.Logger, full *identity.FullIdentity, db DB, revDB extensions.RevocationDB, config Config, versionInfo version.Info) (*Peer, error) {
peer := &Peer{
Log: log,
Identity: full,
DB: db,
}
var err error
{
test := version.Info{}
if test != versionInfo {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
}
peer.Version = version.NewService(log.Named("version"), config.Version, versionInfo, "Bootstrap")
}
{ // setup listener and server
sc := config.Server
tlsOptions, err := tlsopts.NewOptions(peer.Identity, sc.Config, revDB)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Dialer = rpc.NewDefaultDialer(tlsOptions)
peer.Server, err = server.New(log.Named("server"), tlsOptions, sc.Address, sc.PrivateAddress, nil)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
}
{ // setup kademlia
config := config.Kademlia
// TODO: move this setup logic into kademlia package
if config.ExternalAddress == "" {
config.ExternalAddress = peer.Addr()
}
pbVersion, err := versionInfo.Proto()
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
self := &overlay.NodeDossier{
Node: pb.Node{
Id: peer.ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: config.ExternalAddress,
},
},
Type: pb.NodeType_BOOTSTRAP,
Operator: pb.NodeOperator{
Email: config.Operator.Email,
Wallet: config.Operator.Wallet,
},
Version: *pbVersion,
}
kdb, ndb, adb := peer.DB.RoutingTable()
peer.Kademlia.RoutingTable, err = kademlia.NewRoutingTable(peer.Log.Named("routing"), self, kdb, ndb, adb, &config.RoutingTableConfig)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Kademlia.Service, err = kademlia.NewService(peer.Log.Named("kademlia"), peer.Dialer, peer.Kademlia.RoutingTable, config)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, nil, peer.Kademlia.RoutingTable, nil)
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Kademlia.Endpoint)
pb.DRPCRegisterNodes(peer.Server.DRPC(), peer.Kademlia.Endpoint)
peer.Kademlia.Inspector = kademlia.NewInspector(peer.Kademlia.Service, peer.Identity)
pb.RegisterKadInspectorServer(peer.Server.PrivateGRPC(), peer.Kademlia.Inspector)
pb.DRPCRegisterKadInspector(peer.Server.PrivateDRPC(), peer.Kademlia.Inspector)
}
{ // setup bootstrap web ui
config := config.Web
peer.Web.Listener, err = net.Listen("tcp", config.Address)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Web.Service, err = bootstrapweb.NewService(
peer.Log.Named("bootstrapWeb:service"),
peer.Kademlia.Service,
)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Web.Endpoint = bootstrapserver.NewServer(
peer.Log.Named("bootstrapWeb:endpoint"),
config,
peer.Web.Service,
peer.Web.Listener,
)
}
return peer, nil
}
// Run runs bootstrap node until it's either closed or it errors.
func (peer *Peer) Run(ctx context.Context) error {
group, ctx := errgroup.WithContext(ctx)
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Version.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Kademlia.Service.Bootstrap(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Kademlia.Service.Run(ctx))
})
group.Go(func() error {
// TODO: move the message into Server instead
// Don't change the format of this comment, it is used to figure out the node id.
peer.Log.Sugar().Infof("Node %s started", peer.Identity.ID)
peer.Log.Sugar().Infof("Public server started on %s", peer.Addr())
peer.Log.Sugar().Infof("Private server started on %s", peer.PrivateAddr())
return errs2.IgnoreCanceled(peer.Server.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(peer.Web.Endpoint.Run(ctx))
})
return group.Wait()
}
// Close closes all the resources.
func (peer *Peer) Close() error {
var errlist errs.Group
// TODO: ensure that Close can be called on nil-s that way this code won't need the checks.
// close servers, to avoid new connections to closing subsystems
if peer.Server != nil {
errlist.Add(peer.Server.Close())
}
if peer.Web.Endpoint != nil {
errlist.Add(peer.Web.Endpoint.Close())
} else if peer.Web.Listener != nil {
errlist.Add(peer.Web.Listener.Close())
}
// close services in reverse initialization order
if peer.Kademlia.Service != nil {
errlist.Add(peer.Kademlia.Service.Close())
}
if peer.Kademlia.RoutingTable != nil {
errlist.Add(peer.Kademlia.RoutingTable.Close())
}
return errlist.Err()
}
// ID returns the peer ID.
func (peer *Peer) ID() storj.NodeID { return peer.Identity.ID }
// Local returns the peer local node info.
func (peer *Peer) Local() overlay.NodeDossier { return peer.Kademlia.RoutingTable.Local() }
// Addr returns the public address.
func (peer *Peer) Addr() string { return peer.Server.Addr().String() }
// URL returns the storj.NodeURL
func (peer *Peer) URL() storj.NodeURL { return storj.NodeURL{ID: peer.ID(), Address: peer.Addr()} }
// PrivateAddr returns the private address.
func (peer *Peer) PrivateAddr() string { return peer.Server.PrivateAddr().String() }

View File

@ -1,10 +0,0 @@
ARG DOCKER_ARCH
FROM ${DOCKER_ARCH:-amd64}/alpine
ARG TAG
ARG GOARCH
ENV GOARCH ${GOARCH}
EXPOSE 28967
WORKDIR /app
COPY release/${TAG}/bootstrap_linux_${GOARCH:-amd64} /app/bootstrap
COPY cmd/bootstrap/entrypoint /entrypoint
ENTRYPOINT ["/entrypoint"]

View File

@ -1,8 +0,0 @@
#!/bin/sh
set -euo pipefail
if [ ! -f $HOME/.local/share/storj/bootstrap/config.yaml ]; then
/app/bootstrap setup
fi
exec ./bootstrap run "$@"

View File

@ -1,160 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/bootstrap"
"storj.io/storj/bootstrap/bootstrapdb"
"storj.io/storj/internal/fpath"
"storj.io/storj/internal/version"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/process"
"storj.io/storj/pkg/revocation"
)
var (
rootCmd = &cobra.Command{
Use: "bootstrap",
Short: "bootstrap",
}
runCmd = &cobra.Command{
Use: "run",
Short: "Run the bootstrap server",
RunE: cmdRun,
}
setupCmd = &cobra.Command{
Use: "setup",
Short: "Create config files",
RunE: cmdSetup,
Annotations: map[string]string{"type": "setup"},
}
runCfg bootstrap.Config
setupCfg bootstrap.Config
confDir string
identityDir string
)
const (
defaultServerAddr = ":28967"
)
func init() {
defaultConfDir := fpath.ApplicationDir("storj", "bootstrap")
defaultIdentityDir := fpath.ApplicationDir("storj", "identity", "bootstrap")
cfgstruct.SetupFlag(zap.L(), rootCmd, &confDir, "config-dir", defaultConfDir, "main directory for bootstrap configuration")
cfgstruct.SetupFlag(zap.L(), rootCmd, &identityDir, "identity-dir", defaultIdentityDir, "main directory for bootstrap identity credentials")
defaults := cfgstruct.DefaultsFlag(rootCmd)
rootCmd.AddCommand(runCmd)
rootCmd.AddCommand(setupCmd)
process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(setupCmd, &setupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir), cfgstruct.SetupMode())
}
func cmdRun(cmd *cobra.Command, args []string) (err error) {
// inert constructors only ====
ctx, _ := process.Ctx(cmd)
log := zap.L()
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
}
if err := runCfg.Verify(log); err != nil {
log.Sugar().Error("Invalid configuration: ", err)
return err
}
db, err := bootstrapdb.New(bootstrapdb.Config{
Kademlia: runCfg.Kademlia.DBPath,
})
if err != nil {
return errs.New("Error starting master database on bootstrap: %+v", err)
}
defer func() {
err = errs.Combine(err, db.Close())
}()
revocationDB, err := revocation.NewDBFromCfg(runCfg.Server.Config)
if err != nil {
return errs.New("Error creating revocation database: %+v", err)
}
defer func() {
err = errs.Combine(err, revocationDB.Close())
}()
peer, err := bootstrap.New(log, identity, db, revocationDB, runCfg, version.Build)
if err != nil {
return err
}
// okay, start doing stuff ====
err = peer.Version.CheckVersion(ctx)
if err != nil {
return err
}
if err := process.InitMetricsWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher: ", err)
}
err = db.CreateTables()
if err != nil {
return errs.New("Error creating tables for master database on bootstrap: %+v", err)
}
runError := peer.Run(ctx)
closeError := peer.Close()
return errs.Combine(runError, closeError)
}
func cmdSetup(cmd *cobra.Command, args []string) (err error) {
setupDir, err := filepath.Abs(confDir)
if err != nil {
return err
}
valid, _ := fpath.IsValidSetupDir(setupDir)
if !valid {
return fmt.Errorf("bootstrap configuration already exists (%v)", setupDir)
}
err = os.MkdirAll(setupDir, 0700)
if err != nil {
return err
}
overrides := map[string]interface{}{}
serverAddress := cmd.Flag("server.address")
if !serverAddress.Changed {
overrides[serverAddress.Name] = defaultServerAddr
}
kademliaBootstrapAddr := cmd.Flag("kademlia.bootstrap-addr")
if !kademliaBootstrapAddr.Changed {
overrides[kademliaBootstrapAddr.Name] = "127.0.0.1" + defaultServerAddr
}
return process.SaveConfig(cmd, filepath.Join(setupDir, "config.yaml"),
process.SaveConfigWithOverrides(overrides))
}
func main() {
process.Exec(rootCmd)
}

View File

@ -93,7 +93,7 @@ func main() {
defaultConfDir := fpath.ApplicationDir("storj", "cert-signing")
defaultIdentityDir := fpath.ApplicationDir("storj", "identity", "certificates")
cfgstruct.SetupFlag(zap.L(), rootCmd, &confDir, "config-dir", defaultConfDir, "main directory for certificates configuration")
cfgstruct.SetupFlag(zap.L(), rootCmd, &identityDir, "identity-dir", defaultIdentityDir, "main directory for bootstrap identity credentials")
cfgstruct.SetupFlag(zap.L(), rootCmd, &identityDir, "identity-dir", defaultIdentityDir, "main directory for identity credentials")
defaults := cfgstruct.DefaultsFlag(rootCmd)
rootCmd.AddCommand(authCmd)

View File

@ -19,18 +19,10 @@ if [[ -n "${IDENTITY_ADDR:-}" ]]; then
export STORJ_SERVER_ADDRESS="${IDENTITY_ADDR}"
fi
if [[ -n "${BOOTSTRAP_ADDR:-}" ]]; then
export STORJ_KADEMLIA_BOOTSTRAP_ADDR="${BOOTSTRAP_ADDR}"
fi
if [[ ! -f "${CONF_PATH}/config.yaml" ]]; then
./satellite setup $SETUP_PARAMS
fi
RUN_PARAMS="${RUN_PARAMS:-} --config-dir ${CONF_PATH}"
if [[ -n "${BOOTSTRAP_ADDR:-}" ]]; then
RUN_PARAMS="${RUN_PARAMS} --kademlia.bootstrap-addr ${BOOTSTRAP_ADDR}"
fi
exec ./satellite run $RUN_PARAMS "$@"

View File

@ -43,7 +43,6 @@ func TestAutoUpdater(t *testing.T) {
config := &versioncontrol.Config{
Address: "127.0.0.1:0",
Versions: versioncontrol.ServiceVersions{
Bootstrap: "v0.0.1",
Satellite: "v0.0.1",
Storagenode: "v0.0.1",
Uplink: "v0.0.1",

View File

@ -138,7 +138,6 @@ func printDashboard(data *pb.DashboardResponse) error {
w = tabwriter.NewWriter(color.Output, 0, 0, 1, ' ', 0)
// TODO: Get addresses from server data
fmt.Fprintf(w, "\nBootstrap\t%s\n", color.WhiteString(data.GetBootstrapAddress()))
fmt.Fprintf(w, "Internal\t%s\n", color.WhiteString(dashboardCfg.Address))
fmt.Fprintf(w, "External\t%s\n", color.WhiteString(data.GetExternalAddress()))
// Disabling the Link to the Dashboard as its not working yet

View File

@ -43,12 +43,6 @@ spec:
value: "127.0.0.1"
- name: RPC_PORT
value: "7777"
- name: KAD_PORT
value: "8080"
- name: KAD_HOST
value: "bootstrap.storj.io"
- name: KAD_LISTEN_PORT
value: "7776"
- name: PS_DIR
value: "/home/"
ports:

View File

@ -49,8 +49,7 @@ const (
satellitePeer = 0
gatewayPeer = 1
versioncontrolPeer = 2
bootstrapPeer = 3
storagenodePeer = 4
storagenodePeer = 3
// Endpoint
publicGRPC = 0
@ -213,43 +212,9 @@ func newNetwork(flags *Flags) (*Processes, error) {
return readConfigString(&versioncontrol.Address, versioncontrol.Directory, "address")
}
bootstrap := processes.New(Info{
Name: "bootstrap/0",
Executable: "bootstrap",
Directory: filepath.Join(processes.Directory, "bootstrap", "0"),
Address: net.JoinHostPort(host, port(bootstrapPeer, 0, publicGRPC)),
})
// gateway must wait for the versioncontrol to start up
bootstrap.WaitForStart(versioncontrol)
bootstrap.Arguments = withCommon(bootstrap.Directory, Arguments{
"setup": {
"--identity-dir", bootstrap.Directory,
"--web.address", net.JoinHostPort(host, port(bootstrapPeer, 0, publicHTTP)),
"--server.address", bootstrap.Address,
"--server.private-address", net.JoinHostPort(host, port(bootstrapPeer, 0, privateGRPC)),
"--kademlia.bootstrap-addr", bootstrap.Address,
"--kademlia.operator.email", "bootstrap@mail.test",
"--kademlia.operator.wallet", "0x0123456789012345678901234567890123456789",
"--server.extensions.revocation=false",
"--server.use-peer-ca-whitelist=false",
"--version.server-address", fmt.Sprintf("http://%s/", versioncontrol.Address),
"--debug.addr", net.JoinHostPort(host, port(bootstrapPeer, 0, debugHTTP)),
},
"run": {},
})
bootstrap.ExecBefore["run"] = func(process *Process) error {
return readConfigString(&bootstrap.Address, bootstrap.Directory, "server.address")
}
// Create satellites making all satellites wait for bootstrap to start
// Create satellites
if flags.SatelliteCount > maxInstanceCount {
return nil, fmt.Errorf("exceeded the max instance count of %d with Satellite count of %d", maxInstanceCount, flags.SatelliteCount)
}
@ -264,9 +229,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
})
satellites = append(satellites, process)
// satellite must wait for bootstrap to start
process.WaitForStart(bootstrap)
consoleAuthToken := "secure_token"
process.Arguments = withCommon(process.Directory, Arguments{
@ -434,8 +396,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
Address: net.JoinHostPort(host, port(storagenodePeer, i, publicGRPC)),
})
// storage node must wait for bootstrap and satellites to start
process.WaitForStart(bootstrap)
for _, satellite := range satellites {
process.WaitForStart(satellite)
}
@ -448,8 +408,8 @@ func newNetwork(flags *Flags) (*Processes, error) {
"--server.address", process.Address,
"--server.private-address", net.JoinHostPort(host, port(storagenodePeer, i, privateGRPC)),
"--kademlia.operator.email", fmt.Sprintf("storage%d@mail.test", i),
"--kademlia.operator.wallet", "0x0123456789012345678901234567890123456789",
"--operator.email", fmt.Sprintf("storage%d@mail.test", i),
"--operator.wallet", "0x0123456789012345678901234567890123456789",
"--storage2.monitor.minimum-disk-space", "0",
"--storage2.monitor.minimum-bandwidth", "0",

View File

@ -15,7 +15,6 @@ services:
image: storjlabs/satellite:${VERSION:-latest}
environment:
- API_KEY=abc123
- BOOTSTRAP_ADDR=localhost:8080
- STORJ_CHECKER_QUEUE_ADDRESS=redis://redis:6379/?db=0
- STORJ_DATABASE=postgres://postgres:postgres@postgres/satellite?sslmode=disable
- STORJ_LOG_LEVEL=debug
@ -34,9 +33,9 @@ services:
image: storjlabs/storagenode:${VERSION:-latest}
environment:
- SATELLITE_ADDR=satellite:7777
- STORJ_KADEMLIA_EXTERNAL_ADDRESS=storagenode:7777
- STORJ_KADEMLIA_OPERATOR_EMAIL=hello@storj.io
- STORJ_KADEMLIA_OPERATOR_WALLET=0x0000000000000000000000000000000000000000
- STORJ_CONTACT_EXTERNAL_ADDRESS=storagenode:7777
- STORJ_OPERATOR_EMAIL=hello@storj.io
- STORJ_OPERATOR_WALLET=0x0000000000000000000000000000000000000000
- STORJ_LOG_LEVEL=debug
restart: always
links:

View File

@ -1,173 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package testplanet
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/zeebo/errs"
"storj.io/storj/bootstrap"
"storj.io/storj/bootstrap/bootstrapdb"
"storj.io/storj/bootstrap/bootstrapweb/bootstrapserver"
"storj.io/storj/internal/version"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/peertls/extensions"
"storj.io/storj/pkg/peertls/tlsopts"
"storj.io/storj/pkg/revocation"
"storj.io/storj/pkg/server"
"storj.io/storj/versioncontrol"
)
// newBootstrap initializes the bootstrap node
func (planet *Planet) newBootstrap() (peer *bootstrap.Peer, err error) {
defer func() {
planet.peers = append(planet.peers, closablePeer{peer: peer})
}()
prefix := "bootstrap"
log := planet.log.Named(prefix)
dbDir := filepath.Join(planet.directory, prefix)
if err := os.MkdirAll(dbDir, 0700); err != nil {
return nil, err
}
identity, err := planet.NewIdentity()
if err != nil {
return nil, err
}
var db bootstrap.DB
if planet.config.Reconfigure.NewBootstrapDB != nil {
db, err = planet.config.Reconfigure.NewBootstrapDB(0)
} else {
db, err = bootstrapdb.NewInMemory()
}
if err != nil {
return nil, err
}
err = db.CreateTables()
if err != nil {
return nil, err
}
planet.databases = append(planet.databases, db)
config := bootstrap.Config{
Server: server.Config{
Address: "127.0.0.1:0",
PrivateAddress: "127.0.0.1:0",
Config: tlsopts.Config{
RevocationDBURL: "bolt://" + filepath.Join(dbDir, "revocation.db"),
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: planet.whitelistPath,
PeerIDVersions: "latest",
Extensions: extensions.Config{
Revocation: false,
WhitelistSignedLeaf: false,
},
},
},
Kademlia: kademlia.Config{
BootstrapBackoffBase: 500 * time.Millisecond,
BootstrapBackoffMax: 2 * time.Second,
Alpha: 5,
DBPath: dbDir, // TODO: replace with master db
Operator: kademlia.OperatorConfig{
Email: prefix + "@mail.test",
Wallet: "0x" + strings.Repeat("00", 20),
},
},
Web: bootstrapserver.Config{
Address: "127.0.0.1:0",
StaticDir: "./web/bootstrap", // TODO: for development only
},
Version: planet.NewVersionConfig(),
}
if planet.config.Reconfigure.Bootstrap != nil {
planet.config.Reconfigure.Bootstrap(0, &config)
}
versionInfo := planet.NewVersionInfo()
revocationDB, err := revocation.NewDBFromCfg(config.Server.Config)
if err != nil {
return nil, errs.New("Error creating revocation database: %+v", err)
}
defer func() {
err = errs.Combine(err, revocationDB.Close())
}()
peer, err = bootstrap.New(log, identity, db, revocationDB, config, versionInfo)
if err != nil {
return nil, err
}
log.Debug("id=" + peer.ID().String() + " addr=" + peer.Addr())
return peer, nil
}
// newVersionControlServer initializes the Versioning Server
func (planet *Planet) newVersionControlServer() (peer *versioncontrol.Peer, err error) {
prefix := "versioncontrol"
log := planet.log.Named(prefix)
dbDir := filepath.Join(planet.directory, prefix)
if err := os.MkdirAll(dbDir, 0700); err != nil {
return nil, err
}
config := &versioncontrol.Config{
Address: "127.0.0.1:0",
Versions: versioncontrol.ServiceVersions{
Bootstrap: "v0.0.1",
Satellite: "v0.0.1",
Storagenode: "v0.0.1",
Uplink: "v0.0.1",
Gateway: "v0.0.1",
Identity: "v0.0.1",
},
}
peer, err = versioncontrol.New(log, config)
if err != nil {
return nil, err
}
log.Debug(" addr= " + peer.Addr())
return peer, nil
}
// NewVersionInfo returns the Version Info for this planet with tuned metrics.
func (planet *Planet) NewVersionInfo() version.Info {
info := version.Info{
Timestamp: time.Now(),
CommitHash: "testplanet",
Version: version.SemVer{
Major: 0,
Minor: 0,
Patch: 1},
Release: false,
}
return info
}
// NewVersionConfig returns the Version Config for this planet with tuned metrics.
func (planet *Planet) NewVersionConfig() version.Config {
return version.Config{
ServerAddress: fmt.Sprintf("http://%s/", planet.VersionControl.Addr()),
RequestTimeout: time.Second * 15,
CheckInterval: time.Minute * 5,
}
}

View File

@ -20,7 +20,6 @@ import (
"go.uber.org/zap/zaptest"
"golang.org/x/sync/errgroup"
"storj.io/storj/bootstrap"
"storj.io/storj/internal/testidentity"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/storj"
@ -67,7 +66,6 @@ type Planet struct {
databases []io.Closer
uplinks []*Uplink
Bootstrap *bootstrap.Peer
VersionControl *versioncontrol.Peer
Satellites []*SatelliteSystem
StorageNodes []*storagenode.Peer
@ -175,11 +173,6 @@ func NewCustom(log *zap.Logger, config Config) (*Planet, error) {
return nil, errs.Combine(err, planet.Shutdown())
}
planet.Bootstrap, err = planet.newBootstrap()
if err != nil {
return nil, errs.Combine(err, planet.Shutdown())
}
planet.Satellites, err = planet.newSatellites(config.SatelliteCount)
if err != nil {
return nil, errs.Combine(err, planet.Shutdown())

View File

@ -8,16 +8,12 @@ import (
"go.uber.org/zap"
"storj.io/storj/bootstrap"
"storj.io/storj/satellite"
"storj.io/storj/storagenode"
)
// Reconfigure allows to change node configurations
type Reconfigure struct {
NewBootstrapDB func(index int) (bootstrap.DB, error)
Bootstrap func(index int, config *bootstrap.Config)
NewSatelliteDB func(log *zap.Logger, index int) (satellite.DB, error)
Satellite func(log *zap.Logger, index int, config *satellite.Config)
@ -29,9 +25,6 @@ type Reconfigure struct {
// DisablePeerCAWhitelist returns a `Reconfigure` that sets `UsePeerCAWhitelist` for
// all node types that use kademlia.
var DisablePeerCAWhitelist = Reconfigure{
Bootstrap: func(index int, config *bootstrap.Config) {
config.Server.UsePeerCAWhitelist = false
},
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Server.UsePeerCAWhitelist = false
},

View File

@ -37,7 +37,6 @@ func Run(t *testing.T, config Config, test func(t *testing.T, ctx *testcontext.C
}
planetConfig := config
planetConfig.Reconfigure.NewBootstrapDB = nil
planetConfig.Reconfigure.NewSatelliteDB = func(log *zap.Logger, index int) (satellite.DB, error) {
schema := strings.ToLower(t.Name() + "-satellite/" + strconv.Itoa(index) + "-" + schemaSuffix)
db, err := satellitedb.New(log, pgutil.ConnstrWithSchema(satelliteDB.MasterDB.URL, schema))

View File

@ -0,0 +1,68 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package testplanet
import (
"fmt"
"os"
"path/filepath"
"time"
"storj.io/storj/internal/version"
"storj.io/storj/versioncontrol"
)
// newVersionControlServer initializes the Versioning Server
func (planet *Planet) newVersionControlServer() (peer *versioncontrol.Peer, err error) {
prefix := "versioncontrol"
log := planet.log.Named(prefix)
dbDir := filepath.Join(planet.directory, prefix)
if err := os.MkdirAll(dbDir, 0700); err != nil {
return nil, err
}
config := &versioncontrol.Config{
Address: "127.0.0.1:0",
Versions: versioncontrol.ServiceVersions{
Satellite: "v0.0.1",
Storagenode: "v0.0.1",
Uplink: "v0.0.1",
Gateway: "v0.0.1",
Identity: "v0.0.1",
},
}
peer, err = versioncontrol.New(log, config)
if err != nil {
return nil, err
}
log.Debug(" addr= " + peer.Addr())
return peer, nil
}
// NewVersionInfo returns the Version Info for this planet with tuned metrics.
func (planet *Planet) NewVersionInfo() version.Info {
info := version.Info{
Timestamp: time.Now(),
CommitHash: "testplanet",
Version: version.SemVer{
Major: 0,
Minor: 0,
Patch: 1},
Release: false,
}
return info
}
// NewVersionConfig returns the Version Config for this planet with tuned metrics.
func (planet *Planet) NewVersionConfig() version.Config {
return version.Config{
ServerAddress: fmt.Sprintf("http://%s/", planet.VersionControl.Addr()),
RequestTimeout: time.Second * 15,
CheckInterval: time.Minute * 5,
}
}

View File

@ -52,7 +52,6 @@ type SemVer struct {
// AllowedVersions provides the Minimum SemVer per Service
type AllowedVersions struct {
Bootstrap SemVer
Satellite SemVer
Storagenode SemVer
Uplink SemVer
@ -64,7 +63,6 @@ type AllowedVersions struct {
// Processes describes versions for each binary.
type Processes struct {
Bootstrap Process `json:"bootstrap"`
Satellite Process `json:"satellite"`
Storagenode Process `json:"storagenode"`
Uplink Process `json:"uplink"`

View File

@ -1,106 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
// AntechamberErr is the class for all errors pertaining to antechamber operations
var AntechamberErr = errs.Class("antechamber error")
// antechamberAddNode attempts to add a node the antechamber. Only allowed in if within rt neighborhood
func (rt *RoutingTable) antechamberAddNode(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
rt.mutex.Lock()
rt.acMutex.Lock()
defer rt.mutex.Unlock()
defer rt.acMutex.Unlock()
inNeighborhood, err := rt.wouldBeInNearestK(ctx, node.Id)
if err != nil {
return AntechamberErr.New("could not check node neighborhood: %s", err)
}
if inNeighborhood {
v, err := proto.Marshal(node)
if err != nil {
return AntechamberErr.New("could not marshall node: %s", err)
}
err = rt.antechamber.Put(ctx, node.Id.Bytes(), v)
if err != nil {
return AntechamberErr.New("could not add key value pair to antechamber: %s", err)
}
}
return nil
}
// antechamberRemoveNode removes a node from the antechamber
// Called when node moves into RT, node is outside neighborhood (check when any node is added to RT), or node failed contact
func (rt *RoutingTable) antechamberRemoveNode(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
rt.acMutex.Lock()
defer rt.acMutex.Unlock()
err = rt.antechamber.Delete(ctx, node.Id.Bytes())
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return AntechamberErr.New("could not delete node %s", err)
}
return nil
}
// antechamberFindNear returns the closest nodes to self from the antechamber up to the limit
// it is called in conjunction with RT FindNear in some circumstances
func (rt *RoutingTable) antechamberFindNear(ctx context.Context, target storj.NodeID, limit int) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
rt.acMutex.Lock()
defer rt.acMutex.Unlock()
closestNodes := make([]*pb.Node, 0, limit+1)
err = rt.iterateAntechamber(ctx, storj.NodeID{}, func(ctx context.Context, newID storj.NodeID, protoNode []byte) error {
newPos := len(closestNodes)
for ; newPos > 0 && compareByXor(closestNodes[newPos-1].Id, newID, target) > 0; newPos-- {
}
if newPos != limit {
newNode := pb.Node{}
err := proto.Unmarshal(protoNode, &newNode)
if err != nil {
return err
}
closestNodes = append(closestNodes, &newNode)
if newPos != len(closestNodes) { //reorder
copy(closestNodes[newPos+1:], closestNodes[newPos:])
closestNodes[newPos] = &newNode
if len(closestNodes) > limit {
closestNodes = closestNodes[:limit]
}
}
}
return nil
})
return closestNodes, Error.Wrap(err)
}
func (rt *RoutingTable) iterateAntechamber(ctx context.Context, start storj.NodeID, f func(context.Context, storj.NodeID, []byte) error) (err error) {
defer mon.Task()(&ctx)(&err)
return rt.antechamber.Iterate(ctx, storage.IterateOptions{First: storage.Key(start.Bytes()), Recurse: true},
func(ctx context.Context, it storage.Iterator) error {
var item storage.ListItem
for it.Next(ctx, &item) {
nodeID, err := storj.NodeIDFromBytes(item.Key)
if err != nil {
return err
}
err = f(ctx, nodeID, item.Value)
if err != nil {
return err
}
}
return nil
},
)
}

View File

@ -1,119 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/testcontext"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
func TestAntechamberAddNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTableWith(ctx, storj.NodeID{127, 255}, routingTableOpts{bucketSize: 2})
defer ctx.Check(rt.Close)
// Add node to antechamber even if there are no neighborhood nodes
node := &pb.Node{Id: storj.NodeID{63, 255}}
err := rt.antechamberAddNode(ctx, node)
assert.NoError(t, err)
val, err := rt.antechamber.Get(ctx, node.Id.Bytes())
assert.NoError(t, err)
unmarshaled := &pb.Node{}
err = proto.Unmarshal(val, unmarshaled)
assert.NoError(t, err)
assert.Equal(t, node.Id, unmarshaled.Id)
// Add two nodes to routing table
node1 := &pb.Node{Id: storj.NodeID{191, 255}} // [191, 255] XOR [127, 255] = 192
ok, err := rt.addNode(ctx, node1)
assert.True(t, ok)
assert.NoError(t, err)
node2 := &pb.Node{Id: storj.NodeID{143, 255}} // [143, 255] XOR [127, 255] = 240
ok, err = rt.addNode(ctx, node2)
assert.True(t, ok)
assert.NoError(t, err)
// node not in neighborhood, should not be added to antechamber
node3 := &pb.Node{Id: storj.NodeID{133, 255}} // [133, 255] XOR [127, 255] = 250 > 240 neighborhood XOR boundary
err = rt.antechamberAddNode(ctx, node3)
assert.NoError(t, err)
_, err = rt.antechamber.Get(ctx, node3.Id.Bytes())
assert.Error(t, err)
// node in neighborhood, should be added to antechamber
node4 := &pb.Node{Id: storj.NodeID{255, 255}} // [255, 255] XOR [127, 255] = 128 < 240
err = rt.antechamberAddNode(ctx, node4)
assert.NoError(t, err)
val, err = rt.antechamber.Get(ctx, node4.Id.Bytes())
assert.NoError(t, err)
unmarshaled = &pb.Node{}
err = proto.Unmarshal(val, unmarshaled)
assert.NoError(t, err)
assert.Equal(t, node4.Id, unmarshaled.Id)
}
func TestAntechamberRemoveNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, storj.NodeID{127, 255})
defer ctx.Check(rt.Close)
// remove non existent node
node := &pb.Node{Id: storj.NodeID{191, 255}}
err := rt.antechamberRemoveNode(ctx, node)
assert.NoError(t, err)
// add node to antechamber
err = rt.antechamberAddNode(ctx, node)
assert.NoError(t, err)
// remove node
err = rt.antechamberRemoveNode(ctx, node)
assert.NoError(t, err)
// check if gone
_, err = rt.antechamber.Get(ctx, node.Id.Bytes())
assert.Error(t, err)
}
func TestAntechamberFindNear(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
nodeID := storj.NodeID{127, 255}
rt := createRoutingTable(ctx, nodeID)
defer ctx.Check(rt.Close)
// Check empty antechamber, expect empty findNear
nodes, err := rt.antechamberFindNear(ctx, nodeID, 2)
assert.NoError(t, err)
assert.Equal(t, 0, len(nodes))
// add 4 nodes
node1 := &pb.Node{Id: storj.NodeID{191, 255}} // [191, 255] XOR [127, 255] = 192 -> second closest
err = rt.antechamberAddNode(ctx, node1)
assert.NoError(t, err)
node2 := &pb.Node{Id: storj.NodeID{143, 255}}
err = rt.antechamberAddNode(ctx, node2)
assert.NoError(t, err)
node3 := &pb.Node{Id: storj.NodeID{133, 255}}
err = rt.antechamberAddNode(ctx, node3)
assert.NoError(t, err)
node4 := &pb.Node{Id: storj.NodeID{255, 255}} // [255, 255] XOR [127, 255] = 128 -> closest node
err = rt.antechamberAddNode(ctx, node4)
assert.NoError(t, err)
// select 2 closest
nodes, err = rt.antechamberFindNear(ctx, nodeID, 2)
assert.NoError(t, err)
assert.Equal(t, 2, len(nodes))
assert.Equal(t, node4.Id, nodes[0].Id)
assert.Equal(t, node1.Id, nodes[1].Id)
}

View File

@ -1,94 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"fmt"
"regexp"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/pb"
)
var (
// Error defines a Kademlia error
Error = errs.Class("kademlia error")
// mon = monkit.Package() // TODO: figure out whether this is needed
)
// Config defines all of the things that are needed to start up Kademlia
// server endpoints (and not necessarily client code).
type Config struct {
BootstrapAddr string `help:"the Kademlia node to bootstrap against" releaseDefault:"bootstrap.storj.io:8888" devDefault:""`
BootstrapBackoffMax time.Duration `help:"the maximum amount of time to wait when retrying bootstrap" default:"30s"`
BootstrapBackoffBase time.Duration `help:"the base interval to wait when retrying bootstrap" default:"1s"`
DBPath string `help:"the path for storage node db services to be created on" default:"$CONFDIR/kademlia"`
ExternalAddress string `user:"true" help:"the public address of the Kademlia node, useful for nodes behind NAT" default:""`
Operator OperatorConfig
// TODO: reduce the number of flags here
Alpha int `help:"alpha is a system wide concurrency parameter" default:"5"`
RoutingTableConfig
}
// BootstrapNodes returns bootstrap nodes defined in the config
func (c Config) BootstrapNodes() []pb.Node {
var nodes []pb.Node
if c.BootstrapAddr != "" {
nodes = append(nodes, pb.Node{
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: c.BootstrapAddr,
},
})
}
return nodes
}
// Verify verifies whether kademlia config is valid.
func (c Config) Verify(log *zap.Logger) error {
return c.Operator.Verify(log)
}
// OperatorConfig defines properties related to storage node operator metadata
type OperatorConfig struct {
Email string `user:"true" help:"operator email address" default:""`
Wallet string `user:"true" help:"operator wallet address" default:""`
}
// Verify verifies whether operator config is valid.
func (c OperatorConfig) Verify(log *zap.Logger) error {
if err := isOperatorEmailValid(log, c.Email); err != nil {
return err
}
if err := isOperatorWalletValid(log, c.Wallet); err != nil {
return err
}
return nil
}
func isOperatorEmailValid(log *zap.Logger, email string) error {
if email == "" {
log.Sugar().Warn("Operator email address isn't specified.")
} else {
log.Sugar().Info("Operator email: ", email)
}
return nil
}
func isOperatorWalletValid(log *zap.Logger, wallet string) error {
if wallet == "" {
return fmt.Errorf("operator wallet address isn't specified")
}
r := regexp.MustCompile("^0x[a-fA-F0-9]{40}$")
if match := r.MatchString(wallet); !match {
return fmt.Errorf("operator wallet address isn't valid")
}
log.Sugar().Info("operator wallet: ", wallet)
return nil
}

View File

@ -1,145 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"sync/atomic"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/rpc/rpcstatus"
"storj.io/storj/pkg/storj"
)
// EndpointError defines errors class for Endpoint
var EndpointError = errs.Class("kademlia endpoint error")
// SatelliteIDVerifier checks if the connection is from a trusted satellite
type SatelliteIDVerifier interface {
VerifySatelliteID(ctx context.Context, id storj.NodeID) error
}
type pingStatsSource interface {
WasPinged(when time.Time)
}
// Endpoint implements the kademlia Endpoints
type Endpoint struct {
log *zap.Logger
service *Kademlia
pingStats pingStatsSource
routingTable *RoutingTable
trust SatelliteIDVerifier
connected int32
}
// NewEndpoint returns a new kademlia endpoint
func NewEndpoint(log *zap.Logger, service *Kademlia, pingStats pingStatsSource, routingTable *RoutingTable, trust SatelliteIDVerifier) *Endpoint {
return &Endpoint{
log: log,
service: service,
pingStats: pingStats,
routingTable: routingTable,
trust: trust,
}
}
// Query is a node to node communication query
func (endpoint *Endpoint) Query(ctx context.Context, req *pb.QueryRequest) (_ *pb.QueryResponse, err error) {
defer mon.Task()(&ctx)(&err)
if req.GetPingback() {
endpoint.pingback(ctx, req.Sender)
}
limit := int(req.Limit)
if limit <= 0 || limit > endpoint.routingTable.bucketSize {
limit = endpoint.routingTable.bucketSize
}
nodes, err := endpoint.routingTable.FindNear(ctx, req.Target.Id, limit)
if err != nil {
return &pb.QueryResponse{}, EndpointError.New("could not find near endpoint: %v", err)
}
return &pb.QueryResponse{Sender: req.Sender, Response: nodes}, nil
}
// pingback implements pingback for queries
func (endpoint *Endpoint) pingback(ctx context.Context, target *pb.Node) {
var err error
defer mon.Task()(&ctx)(&err)
_, err = endpoint.service.Ping(ctx, *target)
if err != nil {
endpoint.log.Debug("connection to node failed", zap.Error(err), zap.Stringer("nodeID", target.Id))
err = endpoint.routingTable.ConnectionFailed(ctx, target)
if err != nil {
endpoint.log.Error("could not respond to connection failed", zap.Error(err))
}
} else {
err = endpoint.routingTable.ConnectionSuccess(ctx, target)
if err != nil {
endpoint.log.Error("could not respond to connection success", zap.Error(err))
} else {
count := atomic.AddInt32(&endpoint.connected, 1)
if count == 1 {
endpoint.log.Sugar().Debugf("Successfully connected with %s", target.Address.Address)
} else if count%100 == 0 {
endpoint.log.Sugar().Debugf("Successfully connected with %s %dx times", target.Address.Address, count)
}
}
}
}
// Ping provides an easy way to verify a node is online and accepting requests
func (endpoint *Endpoint) Ping(ctx context.Context, req *pb.PingRequest) (_ *pb.PingResponse, err error) {
defer mon.Task()(&ctx)(&err)
// NOTE: this code is very similar to that in storagenode/contact.(*Endpoint).PingNode().
// That other will be used going forward, and this will soon be gutted and deprecated. The
// code similarity will only exist until the transition away from Kademlia is complete.
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
}
if endpoint.pingStats != nil {
endpoint.pingStats.WasPinged(time.Now())
}
return &pb.PingResponse{}, nil
}
// RequestInfo returns the node info
func (endpoint *Endpoint) RequestInfo(ctx context.Context, req *pb.InfoRequest) (_ *pb.InfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
self := endpoint.service.Local()
if self.Type == pb.NodeType_STORAGE {
if endpoint.trust == nil {
return nil, rpcstatus.Error(rpcstatus.Internal, "missing trust")
}
peer, err := identity.PeerIdentityFromContext(ctx)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
}
err = endpoint.trust.VerifySatelliteID(ctx, peer.ID)
if err != nil {
return nil, rpcstatus.Errorf(rpcstatus.PermissionDenied, "untrusted peer %v", peer.ID)
}
}
return &pb.InfoResponse{
Type: self.Type,
Operator: &self.Operator,
Capacity: &self.Capacity,
Version: &self.Version,
}, nil
}

View File

@ -1,169 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// Inspector is a gRPC service for inspecting kademlia internals
type Inspector struct {
kademlia *Kademlia
identity *identity.FullIdentity
}
// NewInspector creates an Inspector
func NewInspector(kademlia *Kademlia, identity *identity.FullIdentity) *Inspector {
return &Inspector{
kademlia: kademlia,
identity: identity,
}
}
// CountNodes returns the number of nodes in the routing table
func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest) (_ *pb.CountNodesResponse, err error) {
defer mon.Task()(&ctx)(&err)
// TODO: this is definitely the wrong way to get this
kadNodes, err := srv.kademlia.FindNear(ctx, srv.identity.ID, 100000)
if err != nil {
return nil, err
}
return &pb.CountNodesResponse{
Count: int64(len(kadNodes)),
}, nil
}
// GetBuckets returns all kademlia buckets for current kademlia instance
func (srv *Inspector) GetBuckets(ctx context.Context, req *pb.GetBucketsRequest) (_ *pb.GetBucketsResponse, err error) {
defer mon.Task()(&ctx)(&err)
b, err := srv.kademlia.GetBucketIds(ctx)
if err != nil {
return nil, err
}
// TODO(bryanchriswhite): should use bucketID type
nodeIDs, err := storj.NodeIDsFromBytes(b.ByteSlices())
if err != nil {
return nil, err
}
return &pb.GetBucketsResponse{
Total: int64(len(b)),
// TODO(bryanchriswhite): should use bucketID type
Ids: nodeIDs,
}, nil
}
// FindNear sends back limit of near nodes
func (srv *Inspector) FindNear(ctx context.Context, req *pb.FindNearRequest) (_ *pb.FindNearResponse, err error) {
defer mon.Task()(&ctx)(&err)
start := req.Start
limit := req.Limit
nodes, err := srv.kademlia.FindNear(ctx, start, int(limit))
if err != nil {
return &pb.FindNearResponse{}, err
}
return &pb.FindNearResponse{
Nodes: nodes,
}, nil
}
// PingNode sends a PING RPC to the provided node ID in the Kad network.
func (srv *Inspector) PingNode(ctx context.Context, req *pb.PingNodeRequest) (_ *pb.PingNodeResponse, err error) {
defer mon.Task()(&ctx)(&err)
_, err = srv.kademlia.Ping(ctx, pb.Node{
Id: req.Id,
Address: &pb.NodeAddress{
Address: req.Address,
},
})
res := &pb.PingNodeResponse{Ok: err == nil}
if err != nil {
return res, Error.Wrap(err)
}
return res, nil
}
// LookupNode triggers a Kademlia lookup and returns the node the network found.
func (srv *Inspector) LookupNode(ctx context.Context, req *pb.LookupNodeRequest) (_ *pb.LookupNodeResponse, err error) {
defer mon.Task()(&ctx)(&err)
id, err := storj.NodeIDFromString(req.Id)
if err != nil {
return &pb.LookupNodeResponse{}, err
}
node, err := srv.kademlia.FindNode(ctx, id)
if err != nil {
return &pb.LookupNodeResponse{}, err
}
return &pb.LookupNodeResponse{
Node: &node,
}, nil
}
// DumpNodes returns all of the nodes in the routing table database.
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (_ *pb.DumpNodesResponse, err error) {
defer mon.Task()(&ctx)(&err)
nodes, err := srv.kademlia.DumpNodes(ctx)
if err != nil {
return nil, err
}
return &pb.DumpNodesResponse{
Nodes: nodes,
}, nil
}
// NodeInfo sends a PING RPC to a node and returns its local info.
func (srv *Inspector) NodeInfo(ctx context.Context, req *pb.NodeInfoRequest) (_ *pb.NodeInfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
info, err := srv.kademlia.FetchInfo(ctx, pb.Node{
Id: req.Id,
Address: req.Address,
})
if err != nil {
return &pb.NodeInfoResponse{}, err
}
return &pb.NodeInfoResponse{
Type: info.GetType(),
Operator: info.GetOperator(),
Capacity: info.GetCapacity(),
Version: info.GetVersion(),
}, nil
}
// GetBucketList returns the list of buckets with their routing nodes and their cached nodes
func (srv *Inspector) GetBucketList(ctx context.Context, req *pb.GetBucketListRequest) (_ *pb.GetBucketListResponse, err error) {
defer mon.Task()(&ctx)(&err)
bucketIds, err := srv.kademlia.GetBucketIds(ctx)
if err != nil {
return nil, err
}
buckets := make([]*pb.GetBucketListResponse_Bucket, len(bucketIds))
for i, b := range bucketIds {
bucketID := keyToBucketID(b)
routingNodes, err := srv.kademlia.GetNodesWithinKBucket(ctx, bucketID)
if err != nil {
return nil, err
}
cachedNodes := srv.kademlia.GetCachedNodesWithinKBucket(bucketID)
buckets[i] = &pb.GetBucketListResponse_Bucket{
BucketId: keyToBucketID(b),
RoutingNodes: routingNodes,
CachedNodes: cachedNodes,
}
}
return &pb.GetBucketListResponse{
Buckets: buckets,
}, nil
}

View File

@ -1,390 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"math/rand"
"sync/atomic"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/kademlia/kademliaclient"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/rpc"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
var (
// NodeErr is the class for all errors pertaining to node operations
NodeErr = errs.Class("node error")
// BootstrapErr is the class for all errors pertaining to bootstrapping a node
BootstrapErr = errs.Class("bootstrap node error")
// NodeNotFound is returned when a lookup can not produce the requested node
NodeNotFound = errs.Class("node not found")
// TODO: shouldn't default to TCP but not sure what to do yet
defaultTransport = pb.NodeTransport_TCP_TLS_GRPC
mon = monkit.Package()
)
// Kademlia is an implementation of kademlia network.
type Kademlia struct {
log *zap.Logger
alpha int // alpha is a system wide concurrency parameter
routingTable *RoutingTable
bootstrapNodes []pb.Node
dialer *kademliaclient.Dialer
lookups sync2.WorkGroup
bootstrapFinished sync2.Fence
bootstrapBackoffMax time.Duration
bootstrapBackoffBase time.Duration
refreshThreshold int64
RefreshBuckets sync2.Cycle
}
// NewService returns a newly configured Kademlia instance
func NewService(log *zap.Logger, dialer rpc.Dialer, rt *RoutingTable, config Config) (*Kademlia, error) {
k := &Kademlia{
log: log,
alpha: config.Alpha,
routingTable: rt,
bootstrapNodes: config.BootstrapNodes(),
bootstrapBackoffMax: config.BootstrapBackoffMax,
bootstrapBackoffBase: config.BootstrapBackoffBase,
dialer: kademliaclient.NewDialer(log.Named("dialer"), dialer, rt),
refreshThreshold: int64(time.Minute),
}
return k, nil
}
// Close closes all kademlia connections and prevents new ones from being created.
func (k *Kademlia) Close() error {
dialerErr := k.dialer.Close()
k.lookups.Close()
k.lookups.Wait()
return dialerErr
}
// FindNear returns all nodes from a starting node up to a maximum limit
// stored in the local routing table.
func (k *Kademlia) FindNear(ctx context.Context, start storj.NodeID, limit int) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
return k.routingTable.FindNear(ctx, start, limit)
}
// GetBucketIds returns a storage.Keys type of bucket ID's in the Kademlia instance
func (k *Kademlia) GetBucketIds(ctx context.Context) (_ storage.Keys, err error) {
defer mon.Task()(&ctx)(&err)
return k.routingTable.GetBucketIds(ctx)
}
// Local returns the local node
func (k *Kademlia) Local() overlay.NodeDossier {
return k.routingTable.Local()
}
// SetBootstrapNodes sets the bootstrap nodes.
// Must be called before anything starting to use kademlia.
func (k *Kademlia) SetBootstrapNodes(nodes []pb.Node) { k.bootstrapNodes = nodes }
// GetBootstrapNodes gets the bootstrap nodes.
func (k *Kademlia) GetBootstrapNodes() []pb.Node { return k.bootstrapNodes }
// DumpNodes returns all the nodes in the node database
func (k *Kademlia) DumpNodes(ctx context.Context) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
return k.routingTable.DumpNodes(ctx)
}
// Bootstrap contacts one of a set of pre defined trusted nodes on the network and
// begins populating the local Kademlia node
func (k *Kademlia) Bootstrap(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
defer k.bootstrapFinished.Release()
defer k.timeTrack(time.Now(), "Bootstrap")
if !k.lookups.Start() {
return context.Canceled
}
defer k.lookups.Done()
if len(k.bootstrapNodes) == 0 {
k.log.Warn("No bootsrap address specified", zap.Stringer(k.routingTable.self.Type.String(), k.routingTable.self.Id))
return nil
}
waitInterval := k.bootstrapBackoffBase
var errGroup errs.Group
for i := 0; waitInterval < k.bootstrapBackoffMax; i++ {
if i > 0 {
time.Sleep(waitInterval)
waitInterval *= 2
}
var foundOnlineBootstrap bool
for i, node := range k.bootstrapNodes {
if ctx.Err() != nil {
k.log.Debug("Context Error received while Boostraping ", zap.Stringer(k.routingTable.self.Type.String(), k.routingTable.self.Id), zap.Stringer("Bootstrap Node", node.Id))
errGroup.Add(ctx.Err())
return errGroup.Err()
}
ident, err := k.dialer.FetchPeerIdentityUnverified(ctx, node.Address.Address)
if err != nil {
errGroup.Add(BootstrapErr.New("%s : %s unable to fetch unverified peer identity node address %s: %s", k.routingTable.self.Type.String(), k.routingTable.self.Id.String(), node.Address.Address, err))
continue
}
k.routingTable.mutex.Lock()
node.Id = ident.ID
k.bootstrapNodes[i] = node
k.routingTable.mutex.Unlock()
foundOnlineBootstrap = true
}
if !foundOnlineBootstrap {
errGroup.Add(BootstrapErr.New("%s : %s found no bootstrap node found online", k.routingTable.self.Type.String(), k.routingTable.self.Id.String()))
continue
}
//find nodes most similar to self
k.routingTable.mutex.Lock()
id := k.routingTable.self.Id
k.routingTable.mutex.Unlock()
_, err := k.lookup(ctx, id)
if err != nil {
errGroup.Add(BootstrapErr.Wrap(err))
continue
}
return nil
// TODO(dylan): We do not currently handle this last bit of behavior.
// ```
// Finally, u refreshes all k-buckets further away than its closest neighbor.
// During the refreshes, u both populates its own k-buckets and inserts
// itself into other nodes' k-buckets as necessary.
// ```
}
errGroup.Add(BootstrapErr.New("%s : %s unable to start bootstrap after final wait time of %s", k.routingTable.self.Type.String(), k.routingTable.self.Id.String(), waitInterval))
return errGroup.Err()
}
// WaitForBootstrap waits for bootstrap pinging has been completed.
func (k *Kademlia) WaitForBootstrap() {
k.bootstrapFinished.Wait()
}
// FetchPeerIdentity connects to a node and returns its peer identity
func (k *Kademlia) FetchPeerIdentity(ctx context.Context, nodeID storj.NodeID) (_ *identity.PeerIdentity, err error) {
defer mon.Task()(&ctx)(&err)
if !k.lookups.Start() {
return nil, context.Canceled
}
defer k.lookups.Done()
node, err := k.FindNode(ctx, nodeID)
if err != nil {
return nil, err
}
return k.dialer.FetchPeerIdentity(ctx, node)
}
// Ping checks that the provided node is still accessible on the network
func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (_ pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if !k.lookups.Start() {
return pb.Node{}, context.Canceled
}
defer k.lookups.Done()
ok, err := k.dialer.PingNode(ctx, node)
if err != nil {
return pb.Node{}, NodeErr.Wrap(err)
}
if !ok {
return pb.Node{}, NodeErr.New("%s : %s failed to ping node ID %s", k.routingTable.self.Type.String(), k.routingTable.self.Id.String(), node.Id.String())
}
return node, nil
}
// FetchInfo connects to a node address and returns the node info
func (k *Kademlia) FetchInfo(ctx context.Context, node pb.Node) (_ *pb.InfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
if !k.lookups.Start() {
return nil, context.Canceled
}
defer k.lookups.Done()
info, err := k.dialer.FetchInfo(ctx, node)
if err != nil {
return nil, NodeErr.Wrap(err)
}
return info, nil
}
// FindNode looks up the provided NodeID first in the local Node, and if it is not found
// begins searching the network for the NodeID. Returns and error if node was not found
func (k *Kademlia) FindNode(ctx context.Context, nodeID storj.NodeID) (_ pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if !k.lookups.Start() {
return pb.Node{}, context.Canceled
}
defer k.lookups.Done()
results, err := k.lookup(ctx, nodeID)
if err != nil {
return pb.Node{}, err
}
if len(results) < 1 {
return pb.Node{}, NodeNotFound.Wrap(NodeNotFound.New("%s : %s couldn't find node ID %s: %s", k.routingTable.self.Type.String(), k.routingTable.self.Id.String(), nodeID.String(), err))
}
return *results[0], nil
}
//lookup initiates a kadmelia node lookup
func (k *Kademlia) lookup(ctx context.Context, nodeID storj.NodeID) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if !k.lookups.Start() {
return nil, context.Canceled
}
defer k.lookups.Done()
nodes, err := k.routingTable.FindNear(ctx, nodeID, k.routingTable.K())
if err != nil {
return nil, err
}
self := k.routingTable.Local().Node
lookup := newPeerDiscovery(k.log, k.dialer, nodeID, nodes, k.routingTable.K(), k.alpha, &self)
results, err := lookup.Run(ctx)
if err != nil {
return nil, err
}
bucket, err := k.routingTable.getKBucketID(ctx, nodeID)
if err != nil {
k.log.Warn("Error getting getKBucketID in kad lookup", zap.Stringer(k.routingTable.self.Type.String(), k.routingTable.self.Id))
} else {
err = k.routingTable.SetBucketTimestamp(ctx, bucket[:], time.Now())
if err != nil {
k.log.Warn("Error updating bucket timestamp in kad lookup", zap.Stringer(k.routingTable.self.Type.String(), k.routingTable.self.Id))
}
}
return results, nil
}
// GetNodesWithinKBucket returns all the routing nodes in the specified k-bucket
func (k *Kademlia) GetNodesWithinKBucket(ctx context.Context, bID bucketID) (_ []*pb.Node, err error) {
return k.routingTable.getUnmarshaledNodesFromBucket(ctx, bID)
}
// GetCachedNodesWithinKBucket returns all the cached nodes in the specified k-bucket
func (k *Kademlia) GetCachedNodesWithinKBucket(bID bucketID) []*pb.Node {
return k.routingTable.replacementCache[bID]
}
// SetBucketRefreshThreshold changes the threshold when buckets are considered stale and need refreshing.
func (k *Kademlia) SetBucketRefreshThreshold(threshold time.Duration) {
atomic.StoreInt64(&k.refreshThreshold, int64(threshold))
}
// Run occasionally refreshes stale kad buckets
func (k *Kademlia) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
defer k.timeTrack(time.Now(), "Kad Refresh")
if !k.lookups.Start() {
return context.Canceled
}
defer k.lookups.Done()
k.RefreshBuckets.SetInterval(5 * time.Minute)
return k.RefreshBuckets.Run(ctx, func(ctx context.Context) error {
threshold := time.Duration(atomic.LoadInt64(&k.refreshThreshold))
err := k.refresh(ctx, threshold)
if err != nil {
k.log.Warn("bucket refresh failed", zap.Stringer(k.routingTable.self.Type.String(), k.routingTable.self.Id), zap.Error(err))
}
return nil
})
}
// refresh updates each Kademlia bucket not contacted in the last hour
func (k *Kademlia) refresh(ctx context.Context, threshold time.Duration) (err error) {
defer mon.Task()(&ctx)(&err)
bIDs, err := k.routingTable.GetBucketIds(ctx)
if err != nil {
return Error.Wrap(err)
}
now := time.Now()
startID := bucketID{}
var errors errs.Group
for _, bID := range bIDs {
endID := keyToBucketID(bID)
ts, tErr := k.routingTable.GetBucketTimestamp(ctx, bID)
if tErr != nil {
errors.Add(tErr)
} else if now.After(ts.Add(threshold)) {
rID, _ := randomIDInRange(startID, endID)
_, _ = k.FindNode(ctx, rID) // ignore node not found
}
startID = endID
}
return Error.Wrap(errors.Err())
}
// randomIDInRange finds a random node ID with a range (start..end]
func randomIDInRange(start, end bucketID) (storj.NodeID, error) {
randID := storj.NodeID{}
divergedHigh := false
divergedLow := false
for x := 0; x < len(randID); x++ {
s := byte(0)
if !divergedLow {
s = start[x]
}
e := byte(255)
if !divergedHigh {
e = end[x]
}
if s > e {
return storj.NodeID{}, errs.New("Random id range was invalid")
}
if s == e {
randID[x] = s
} else {
r := s + byte(rand.Intn(int(e-s))) + 1
if r < e {
divergedHigh = true
}
if r > s {
divergedLow = true
}
randID[x] = r
}
}
if !divergedLow {
if !divergedHigh { // start == end
return storj.NodeID{}, errs.New("Random id range was invalid")
} else if randID[len(randID)-1] == start[len(randID)-1] { // start == randID
randID[len(randID)-1] = start[len(randID)-1] + 1
}
}
return randID, nil
}
// timeTrack tracks how long a function ran for
func (k *Kademlia) timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
k.log.Debug("", zap.Stringer(k.routingTable.self.Type.String(), k.routingTable.self.Id), zap.Duration(name, elapsed))
}

View File

@ -1,107 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// +build drpc
package kademlia
import (
"context"
"crypto/tls"
"net"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/drpc/drpcserver"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testidentity"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/listenmux"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts"
)
func newListener(t *testing.T, ctx *testcontext.Context, addr string) (net.Listener, func()) {
lis, err := net.Listen("tcp", addr)
require.NoError(t, err)
listenCtx, cancel := context.WithCancel(ctx)
mux := listenmux.New(lis, 8)
ctx.Go(func() error { return mux.Run(listenCtx) })
return mux.Route("DRPC!!!1"), cancel
}
func testNode(t *testing.T, ctx *testcontext.Context, name string, bn []pb.Node) (*Kademlia, func()) {
lis, lisCancel := newListener(t, ctx, "127.0.0.1:0")
fid, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
logger := zaptest.NewLogger(t)
k, err := newKademlia(logger, pb.NodeType_STORAGE, bn, lis.Addr().String(), pb.NodeOperator{}, fid, defaultAlpha)
require.NoError(t, err)
s := NewEndpoint(logger, k, nil, k.routingTable, nil)
tlsOptions, err := tlsopts.NewOptions(fid, tlsopts.Config{PeerIDVersions: "*"}, nil)
require.NoError(t, err)
tlsLis := tls.NewListener(lis, tlsOptions.ServerTLSConfig())
drpcServer := drpcserver.New()
pb.DRPCRegisterNodes(drpcServer, s)
serveCtx, cancel := context.WithCancel(ctx)
ctx.Go(func() error { return drpcServer.Serve(serveCtx, tlsLis) })
return k, func() {
cancel()
lisCancel()
assert.NoError(t, k.Close())
}
}
func startTestNodeServer(t *testing.T, ctx *testcontext.Context) (*mockNodesServer, *identity.FullIdentity, string, func()) {
lis, lisCancel := newListener(t, ctx, "127.0.0.1:0")
ca, err := testidentity.NewTestCA(ctx)
require.NoError(t, err)
fullIdentity, err := ca.NewIdentity()
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(fullIdentity, tlsopts.Config{}, nil)
require.NoError(t, err)
tlsLis := tls.NewListener(lis, tlsOptions.ServerTLSConfig())
drpcServer := drpcserver.New()
mn := &mockNodesServer{queryCalled: 0}
pb.DRPCRegisterNodes(drpcServer, mn)
serveCtx, cancel := context.WithCancel(context.Background())
ctx.Go(func() error { return drpcServer.Serve(serveCtx, tlsLis) })
return mn, fullIdentity, lis.Addr().String(), func() {
cancel()
lisCancel()
}
}
func newTestServer(t *testing.T, ctx *testcontext.Context, lis net.Listener) (*mockNodesServer, func()) {
ca, err := testidentity.NewTestCA(ctx)
require.NoError(t, err)
fullIdentity, err := ca.NewIdentity()
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(fullIdentity, tlsopts.Config{}, nil)
require.NoError(t, err)
tlsLis := tls.NewListener(lis, tlsOptions.ServerTLSConfig())
drpcServer := drpcserver.New()
mn := &mockNodesServer{queryCalled: 0}
pb.DRPCRegisterNodes(drpcServer, mn)
serveCtx, cancel := context.WithCancel(context.Background())
ctx.Go(func() error { return drpcServer.Serve(serveCtx, tlsLis) })
return mn, cancel
}

View File

@ -1,112 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// +build !drpc
package kademlia
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"google.golang.org/grpc"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testidentity"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts"
)
func newListener(t *testing.T, ctx *testcontext.Context, addr string) (net.Listener, func()) {
lis, err := net.Listen("tcp", addr)
require.NoError(t, err)
return lis, func() { _ = lis.Close() }
}
func testNode(t *testing.T, ctx *testcontext.Context, name string, bn []pb.Node) (*Kademlia, func()) {
lis, lisCancel := newListener(t, ctx, "127.0.0.1:0")
fid, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(fid, tlsopts.Config{PeerIDVersions: "*"}, nil)
require.NoError(t, err)
logger := zaptest.NewLogger(t)
k, err := newKademlia(logger, pb.NodeType_STORAGE, bn, lis.Addr().String(), pb.NodeOperator{}, fid, defaultAlpha)
require.NoError(t, err)
s := NewEndpoint(logger, k, nil, k.routingTable, nil)
grpcServer := grpc.NewServer(tlsOptions.ServerOption())
pb.RegisterNodesServer(grpcServer, s)
ctx.Go(func() error {
err := grpcServer.Serve(lis)
if err == grpc.ErrServerStopped {
err = nil
}
return err
})
return k, func() {
grpcServer.GracefulStop()
lisCancel()
assert.NoError(t, k.Close())
}
}
func startTestNodeServer(t *testing.T, ctx *testcontext.Context) (*mockNodesServer, *identity.FullIdentity, string, func()) {
lis, lisCancel := newListener(t, ctx, "127.0.0.1:0")
ca, err := testidentity.NewTestCA(ctx)
require.NoError(t, err)
fullIdentity, err := ca.NewIdentity()
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(fullIdentity, tlsopts.Config{}, nil)
require.NoError(t, err)
grpcServer := grpc.NewServer(tlsOptions.ServerOption())
mn := &mockNodesServer{queryCalled: 0}
pb.RegisterNodesServer(grpcServer, mn)
ctx.Go(func() error {
err := grpcServer.Serve(lis)
if err == grpc.ErrServerStopped {
err = nil
}
return err
})
return mn, fullIdentity, lis.Addr().String(), func() {
grpcServer.GracefulStop()
lisCancel()
}
}
func newTestServer(t *testing.T, ctx *testcontext.Context, lis net.Listener) (*mockNodesServer, func()) {
ca, err := testidentity.NewTestCA(ctx)
require.NoError(t, err)
fullIdentity, err := ca.NewIdentity()
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(fullIdentity, tlsopts.Config{}, nil)
require.NoError(t, err)
grpcServer := grpc.NewServer(tlsOptions.ServerOption())
mn := &mockNodesServer{queryCalled: 0}
pb.RegisterNodesServer(grpcServer, mn)
ctx.Go(func() error {
err := grpcServer.Serve(lis)
if err == grpc.ErrServerStopped {
err = nil
}
return err
})
return mn, grpcServer.Stop
}

View File

@ -1,330 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"bytes"
"context"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testidentity"
"storj.io/storj/internal/testrand"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts"
"storj.io/storj/pkg/rpc"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage/teststore"
)
const (
defaultAlpha = 5
)
func TestNewKademlia(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
cases := []struct {
id *identity.FullIdentity
bn []pb.Node
addr string
expectedErr error
}{
{
id: func() *identity.FullIdentity {
id, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
return id
}(),
bn: []pb.Node{{Id: teststorj.NodeIDFromString("foo")}},
addr: "127.0.0.1:8080",
},
{
id: func() *identity.FullIdentity {
id, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
return id
}(),
bn: []pb.Node{{Id: teststorj.NodeIDFromString("foo")}},
addr: "127.0.0.1:8080",
},
}
for _, v := range cases {
kad, err := newKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, v.bn, v.addr, pb.NodeOperator{}, v.id, defaultAlpha)
require.NoError(t, err)
assert.Equal(t, v.expectedErr, err)
assert.Equal(t, kad.bootstrapNodes, v.bn)
assert.NotNil(t, kad.dialer)
assert.NotNil(t, kad.routingTable)
assert.NoError(t, kad.Close())
}
}
func TestPeerDiscovery(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
// make new identity
mockBootServer, bootID, bootAddress, cancel := startTestNodeServer(t, ctx)
defer cancel()
_, testID, testAddress, cancel := startTestNodeServer(t, ctx)
defer cancel()
_, targetID, targetAddress, cancel := startTestNodeServer(t, ctx)
defer cancel()
bootstrapNodes := []pb.Node{{Id: bootID.ID, Address: &pb.NodeAddress{Address: bootAddress}}}
operator := pb.NodeOperator{
Wallet: "OperatorWallet",
}
k, err := newKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrapNodes, testAddress, operator, testID, defaultAlpha)
require.NoError(t, err)
rt := k.routingTable
assert.Equal(t, rt.Local().Operator.Wallet, "OperatorWallet")
defer ctx.Check(k.Close)
cases := []struct {
target storj.NodeID
expected *pb.Node
expectedErr error
}{
{target: func() storj.NodeID {
mockBootServer.returnValue = []*pb.Node{{Id: targetID.ID, Address: &pb.NodeAddress{Address: targetAddress}}}
return targetID.ID
}(),
expected: &pb.Node{},
expectedErr: nil,
},
{target: bootID.ID,
expected: nil,
expectedErr: nil,
},
}
for _, v := range cases {
_, err := k.lookup(ctx, v.target)
assert.Equal(t, v.expectedErr, err)
}
}
func TestBootstrap(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
bn, clean := testNode(t, ctx, "1", []pb.Node{})
defer clean()
n1, clean := testNode(t, ctx, "2", []pb.Node{bn.routingTable.self.Node})
defer clean()
err := n1.Bootstrap(ctx)
require.NoError(t, err)
n2, clean := testNode(t, ctx, "3", []pb.Node{bn.routingTable.self.Node})
defer clean()
err = n2.Bootstrap(ctx)
require.NoError(t, err)
nodeIDs, err := n2.routingTable.nodeBucketDB.List(ctx, nil, 0)
require.NoError(t, err)
assert.Len(t, nodeIDs, 3)
}
func TestRefresh(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
k, clean := testNode(t, ctx, "refresh", []pb.Node{})
defer clean()
//turn back time for only bucket
rt := k.routingTable
now := time.Now().UTC()
bID := firstBucketID //always exists
err := rt.SetBucketTimestamp(ctx, bID[:], now.Add(-2*time.Hour))
require.NoError(t, err)
//refresh should call FindNode, updating the time
err = k.refresh(ctx, time.Minute)
require.NoError(t, err)
ts1, err := rt.GetBucketTimestamp(ctx, bID[:])
require.NoError(t, err)
assert.True(t, now.Add(-5*time.Minute).Before(ts1))
//refresh should not call FindNode, leaving the previous time
err = k.refresh(ctx, time.Minute)
require.NoError(t, err)
ts2, err := rt.GetBucketTimestamp(ctx, bID[:])
require.NoError(t, err)
assert.True(t, ts1.Equal(ts2))
}
func TestFindNear(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
// make new identity
fid, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
fid2, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
assert.NotEqual(t, fid.ID, fid2.ID)
//start kademlia
lis, lisCancel := newListener(t, ctx, "127.0.0.1:0")
defer lisCancel()
_, cancel := newTestServer(t, ctx, lis)
defer cancel()
bootstrap := []pb.Node{{Id: fid2.ID, Address: &pb.NodeAddress{Address: lis.Addr().String()}}}
k, err := newKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrap,
lis.Addr().String(), pb.NodeOperator{}, fid, defaultAlpha)
require.NoError(t, err)
defer ctx.Check(k.Close)
// add nodes
var nodes []*pb.Node
newNode := func(id string, bw, disk int64) pb.Node {
nodeID := teststorj.NodeIDFromString(id)
n := &pb.Node{Id: nodeID}
nodes = append(nodes, n)
err = k.routingTable.ConnectionSuccess(ctx, n)
require.NoError(t, err)
return *n
}
nodeIDA := newNode("AAAAA", 1, 4)
newNode("BBBBB", 2, 3)
newNode("CCCCC", 3, 2)
newNode("DDDDD", 4, 1)
require.Len(t, nodes, 4)
cases := []struct {
testID string
target storj.NodeID
limit int
restrictions []pb.Restriction
expected []*pb.Node
}{
{testID: "three", target: nodeIDA.Id, limit: 4, expected: nodes, restrictions: []pb.Restriction{}},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
ns, err := k.FindNear(ctx, testCase.target, testCase.limit)
require.NoError(t, err)
assert.Equal(t, len(testCase.expected), len(ns))
for _, e := range testCase.expected {
found := false
for _, n := range ns {
if e.Id == n.Id {
found = true
}
}
assert.True(t, found, e.String())
}
})
}
}
// TestRandomIds makes sure finds a random node ID is within a range (start..end]
func TestRandomIds(t *testing.T) {
for x := 0; x < 1000; x++ {
var start, end bucketID
// many valid options
start = testrand.NodeID()
end = testrand.NodeID()
if bytes.Compare(start[:], end[:]) > 0 {
start, end = end, start
}
id, err := randomIDInRange(start, end)
require.NoError(t, err, "Unexpected err in randomIDInRange")
assert.True(t, bytes.Compare(id[:], start[:]) > 0, "Random id was less than starting id")
assert.True(t, bytes.Compare(id[:], end[:]) <= 0, "Random id was greater than end id")
//invalid range
_, err = randomIDInRange(end, start)
assert.Error(t, err, "Missing expected err in invalid randomIDInRange")
//no valid options
end = start
_, err = randomIDInRange(start, end)
assert.Error(t, err, "Missing expected err in empty randomIDInRange")
// one valid option
if start[31] == 255 {
start[31] = 254
} else {
end[31] = start[31] + 1
}
id, err = randomIDInRange(start, end)
require.NoError(t, err, "Unexpected err in randomIDInRange")
assert.True(t, bytes.Equal(id[:], end[:]), "Not-so-random id was incorrect")
}
}
type mockNodesServer struct {
queryCalled int32
pingCalled int32
infoCalled int32
returnValue []*pb.Node
}
func (mn *mockNodesServer) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResponse, error) {
atomic.AddInt32(&mn.queryCalled, 1)
return &pb.QueryResponse{Response: mn.returnValue}, nil
}
func (mn *mockNodesServer) Ping(ctx context.Context, req *pb.PingRequest) (*pb.PingResponse, error) {
atomic.AddInt32(&mn.pingCalled, 1)
return &pb.PingResponse{}, nil
}
func (mn *mockNodesServer) RequestInfo(ctx context.Context, req *pb.InfoRequest) (*pb.InfoResponse, error) {
atomic.AddInt32(&mn.infoCalled, 1)
return &pb.InfoResponse{}, nil
}
// newKademlia returns a newly configured Kademlia instance
func newKademlia(log *zap.Logger, nodeType pb.NodeType, bootstrapNodes []pb.Node, address string, operator pb.NodeOperator, identity *identity.FullIdentity, alpha int) (*Kademlia, error) {
self := &overlay.NodeDossier{
Node: pb.Node{
Id: identity.ID,
Address: &pb.NodeAddress{Address: address},
},
Type: nodeType,
Operator: operator,
}
rt, err := NewRoutingTable(log, self, teststore.New(), teststore.New(), teststore.New(), nil)
if err != nil {
return nil, err
}
tlsOptions, err := tlsopts.NewOptions(identity, tlsopts.Config{PeerIDVersions: "*"}, nil)
if err != nil {
return nil, err
}
kadConfig := Config{
BootstrapBackoffMax: 10 * time.Second,
BootstrapBackoffBase: 1 * time.Second,
Alpha: alpha,
}
kad, err := NewService(log, rpc.NewDefaultDialer(tlsOptions), rt, kadConfig)
if err != nil {
return nil, err
}
kad.bootstrapNodes = bootstrapNodes
return kad, nil
}

View File

@ -1,215 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademliaclient
import (
"context"
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/rpc"
"storj.io/storj/pkg/storj"
)
var mon = monkit.Package()
// Conn represents a connection
type Conn struct {
conn *rpc.Conn
client rpc.NodesClient
}
// Close closes this connection.
func (conn *Conn) Close() error {
return conn.conn.Close()
}
// Dialer sends requests to kademlia endpoints on storage nodes
type Dialer struct {
log *zap.Logger
dialer rpc.Dialer
obs Observer
limit sync2.Semaphore
}
// Observer implements the ConnSuccess and ConnFailure methods
// for Discovery and other services to use
type Observer interface {
ConnSuccess(ctx context.Context, node *pb.Node)
ConnFailure(ctx context.Context, node *pb.Node, err error)
}
// NewDialer creates a new kademlia dialer.
func NewDialer(log *zap.Logger, dialer rpc.Dialer, obs Observer) *Dialer {
d := &Dialer{
log: log,
dialer: dialer,
obs: obs,
}
d.limit.Init(32) // TODO: limit should not be hardcoded
return d
}
// Close closes the pool resources and prevents new connections to be made.
func (dialer *Dialer) Close() error {
dialer.limit.Close()
return nil
}
// Lookup queries ask about find, and also sends information about self.
// If self is nil, pingback will be false.
func (dialer *Dialer) Lookup(ctx context.Context, self *pb.Node, ask pb.Node, find storj.NodeID, limit int) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if !dialer.limit.Lock() {
return nil, context.Canceled
}
defer dialer.limit.Unlock()
req := pb.QueryRequest{
Limit: int64(limit),
Target: &pb.Node{Id: find}, // TODO: should not be a Node protobuf!
}
if self != nil {
req.Pingback = true
req.Sender = self
}
conn, err := dialer.dialNode(ctx, ask)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, conn.Close()) }()
resp, err := conn.client.Query(ctx, &req)
if err != nil {
return nil, err
}
return resp.Response, nil
}
// PingNode pings target.
func (dialer *Dialer) PingNode(ctx context.Context, target pb.Node) (_ bool, err error) {
defer mon.Task()(&ctx)(&err)
if !dialer.limit.Lock() {
return false, context.Canceled
}
defer dialer.limit.Unlock()
conn, err := dialer.dialNode(ctx, target)
if err != nil {
return false, err
}
defer func() { err = errs.Combine(err, conn.Close()) }()
_, err = conn.client.Ping(ctx, &pb.PingRequest{})
return err == nil, err
}
// FetchPeerIdentity connects to a node and returns its peer identity
func (dialer *Dialer) FetchPeerIdentity(ctx context.Context, target pb.Node) (_ *identity.PeerIdentity, err error) {
defer mon.Task()(&ctx)(&err)
if !dialer.limit.Lock() {
return nil, context.Canceled
}
defer dialer.limit.Unlock()
conn, err := dialer.dialNode(ctx, target)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, conn.Close()) }()
return conn.conn.PeerIdentity()
}
// FetchPeerIdentityUnverified connects to an address and returns its peer identity (no node ID verification).
func (dialer *Dialer) FetchPeerIdentityUnverified(ctx context.Context, address string) (_ *identity.PeerIdentity, err error) {
defer mon.Task()(&ctx)(&err)
if !dialer.limit.Lock() {
return nil, context.Canceled
}
defer dialer.limit.Unlock()
conn, err := dialer.dialAddress(ctx, address)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, conn.Close()) }()
return conn.conn.PeerIdentity()
}
// FetchInfo connects to a node and returns its node info.
func (dialer *Dialer) FetchInfo(ctx context.Context, target pb.Node) (_ *pb.InfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
if !dialer.limit.Lock() {
return nil, context.Canceled
}
defer dialer.limit.Unlock()
conn, err := dialer.dialNode(ctx, target)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, conn.Close()) }()
resp, err := conn.client.RequestInfo(ctx, &pb.InfoRequest{})
if err != nil {
return nil, err
}
return resp, nil
}
// dialNode dials the specified node.
func (dialer *Dialer) dialNode(ctx context.Context, target pb.Node) (_ *Conn, err error) {
defer mon.Task()(&ctx)(&err)
conn, err := dialer.dialer.DialNode(ctx, &target)
if err != nil {
if dialer.obs != nil {
dialer.obs.ConnFailure(ctx, &target, err)
}
return nil, err
}
if dialer.obs != nil {
dialer.obs.ConnSuccess(ctx, &target)
}
return &Conn{
conn: conn,
client: conn.NodesClient(),
}, nil
}
// dialAddress dials the specified node by address (no node ID verification)
func (dialer *Dialer) dialAddress(ctx context.Context, address string) (_ *Conn, err error) {
defer mon.Task()(&ctx)(&err)
conn, err := dialer.dialer.DialAddressInsecure(ctx, address)
if err != nil {
// TODO: can't get an id here because we failed to dial
return nil, err
}
if ident, err := conn.PeerIdentity(); err == nil && dialer.obs != nil {
dialer.obs.ConnSuccess(ctx, &pb.Node{
Id: ident.ID,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: address,
},
})
}
return &Conn{
conn: conn,
client: conn.NodesClient(),
}, nil
}

View File

@ -1,248 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"sort"
"sync"
"go.uber.org/zap"
"storj.io/storj/pkg/kademlia/kademliaclient"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
type peerDiscovery struct {
log *zap.Logger
dialer *kademliaclient.Dialer
self *pb.Node
target storj.NodeID
k int
concurrency int
cond sync.Cond
queue discoveryQueue
}
func newPeerDiscovery(log *zap.Logger, dialer *kademliaclient.Dialer, target storj.NodeID, startingNodes []*pb.Node, k, alpha int, self *pb.Node) *peerDiscovery {
discovery := &peerDiscovery{
log: log,
dialer: dialer,
self: self,
target: target,
k: k,
concurrency: alpha,
cond: sync.Cond{L: &sync.Mutex{}},
queue: *newDiscoveryQueue(target, k),
}
discovery.queue.Insert(startingNodes...)
return discovery
}
func (lookup *peerDiscovery) Run(ctx context.Context) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if lookup.queue.Unqueried() == 0 {
return nil, nil
}
// protected by `lookup.cond.L`
working := 0
allDone := false
wg := sync.WaitGroup{}
wg.Add(lookup.concurrency)
for i := 0; i < lookup.concurrency; i++ {
go func() {
defer wg.Done()
for {
var next *pb.Node
lookup.cond.L.Lock()
for {
// everything is done, this routine can return
if allDone {
lookup.cond.L.Unlock()
return
}
next = lookup.queue.ClosestUnqueried()
if next != nil {
working++
break
}
// no work, wait until some other routine inserts into the queue
lookup.cond.Wait()
}
lookup.cond.L.Unlock()
neighbors, err := lookup.dialer.Lookup(ctx, lookup.self, *next, lookup.target, lookup.k)
if err != nil {
lookup.queue.QueryFailure(next)
if !isDone(ctx) {
lookup.log.Debug("connecting to node failed",
zap.Any("target", lookup.target),
zap.Any("dial-node", next.Id),
zap.Any("dial-address", next.Address.Address),
zap.Error(err),
)
}
} else {
lookup.queue.QuerySuccess(next, neighbors...)
}
lookup.cond.L.Lock()
working--
allDone = allDone || isDone(ctx) || (working == 0 && lookup.queue.Unqueried() == 0)
lookup.cond.L.Unlock()
lookup.cond.Broadcast()
}
}()
}
wg.Wait()
return lookup.queue.ClosestQueried(), ctx.Err()
}
func isDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
type queueState int
const (
stateUnqueried queueState = iota
stateQuerying
stateSuccess
stateFailure
)
// discoveryQueue is a limited priority queue for nodes with xor distance
type discoveryQueue struct {
target storj.NodeID
maxLen int
mu sync.Mutex
state map[storj.NodeID]queueState
items []queueItem
}
// queueItem is node with a priority
type queueItem struct {
node *pb.Node
priority storj.NodeID
}
// newDiscoveryQueue returns a items with priority based on XOR from targetBytes
func newDiscoveryQueue(target storj.NodeID, size int) *discoveryQueue {
return &discoveryQueue{
target: target,
state: make(map[storj.NodeID]queueState),
maxLen: size,
}
}
// Insert adds nodes into the queue.
func (queue *discoveryQueue) Insert(nodes ...*pb.Node) {
queue.mu.Lock()
defer queue.mu.Unlock()
queue.insert(nodes...)
}
// insert requires the mutex to be locked
func (queue *discoveryQueue) insert(nodes ...*pb.Node) {
for _, node := range nodes {
// TODO: empty node ids should be semantically different from the
// technically valid node id that is all zeros
if node.Id == (storj.NodeID{}) {
continue
}
if _, added := queue.state[node.Id]; added {
continue
}
queue.state[node.Id] = stateUnqueried
queue.items = append(queue.items, queueItem{
node: node,
priority: xorNodeID(queue.target, node.Id),
})
}
sort.Slice(queue.items, func(i, k int) bool {
return queue.items[i].priority.Less(queue.items[k].priority)
})
if len(queue.items) > queue.maxLen {
queue.items = queue.items[:queue.maxLen]
}
}
// ClosestUnqueried returns the closest unqueried item in the queue
func (queue *discoveryQueue) ClosestUnqueried() *pb.Node {
queue.mu.Lock()
defer queue.mu.Unlock()
for _, item := range queue.items {
if queue.state[item.node.Id] == stateUnqueried {
queue.state[item.node.Id] = stateQuerying
return item.node
}
}
return nil
}
// ClosestQueried returns the closest queried items in the queue
func (queue *discoveryQueue) ClosestQueried() []*pb.Node {
queue.mu.Lock()
defer queue.mu.Unlock()
rv := make([]*pb.Node, 0, len(queue.items))
for _, item := range queue.items {
if queue.state[item.node.Id] == stateSuccess {
rv = append(rv, item.node)
}
}
return rv
}
// QuerySuccess marks the node as successfully queried, and adds the results to the queue
// QuerySuccess marks nodes with a zero node ID as ignored, and ignores incoming
// nodes with a zero id.
func (queue *discoveryQueue) QuerySuccess(node *pb.Node, nodes ...*pb.Node) {
queue.mu.Lock()
defer queue.mu.Unlock()
queue.state[node.Id] = stateSuccess
queue.insert(nodes...)
}
// QueryFailure marks the node as failing query
func (queue *discoveryQueue) QueryFailure(node *pb.Node) {
queue.mu.Lock()
queue.state[node.Id] = stateFailure
queue.mu.Unlock()
}
// Unqueried returns the number of unqueried items in the queue
func (queue *discoveryQueue) Unqueried() (amount int) {
queue.mu.Lock()
defer queue.mu.Unlock()
for _, item := range queue.items {
if queue.state[item.node.Id] == stateUnqueried {
amount++
}
}
return amount
}

View File

@ -1,100 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"math/rand"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
func TestDiscoveryQueue(t *testing.T) {
target := storj.NodeID{1, 1} // 00000001
// // id -> id ^ target
nodeA := &pb.Node{Id: storj.NodeID{3, 2}} // 00000011:00000010 -> 00000010:00000011
nodeB := &pb.Node{Id: storj.NodeID{6, 5}} // 00000110:00000101 -> 00000111:00000100
nodeC := &pb.Node{Id: storj.NodeID{7, 7}} // 00000111:00000111 -> 00000110:00000110
nodeD := &pb.Node{Id: storj.NodeID{8, 4}} // 00001000:00000100 -> 00001001:00000101
nodeE := &pb.Node{Id: storj.NodeID{12, 1}} // 00001100:00000001 -> 00001101:00000000
nodeF := &pb.Node{Id: storj.NodeID{15, 16}} // 00001111:00010000 -> 00001110:00010001
nodeG := &pb.Node{Id: storj.NodeID{18, 74}} // 00010010:01001010 -> 00010011:01001011
nodeH := &pb.Node{Id: storj.NodeID{25, 61}} // 00011001:00111101 -> 00011000:00111100
nodes := []*pb.Node{nodeA, nodeB, nodeC, nodeD, nodeE, nodeF, nodeG, nodeH}
expected := []*pb.Node{
nodeA, // 00000011:00000010 -> 00000010:00000011
nodeC, // 00000111:00000111 -> 00000110:00000110
nodeB, // 00000110:00000101 -> 00000111:00000100
nodeD, // 00001000:00000100 -> 00001001:00000101
nodeE, // 00001100:00000001 -> 00001101:00000000
nodeF, // 00001111:00010000 -> 00001110:00010001
// nodeG, // 00010010:01001010 -> 00010011:01001011
// nodeH, // 00011001:00111101 -> 00011000:00111100
}
// // code for outputting the bits above
// for _, node := range nodes {
// xor := xorNodeID(target, node.Id)
// t.Logf("%08b,%08b -> %08b,%08b", node.Id[0], node.Id[1], xor[0], xor[1])
// }
queue := newDiscoveryQueue(target, 6)
queue.Insert(nodes...)
assert.Equal(t, queue.Unqueried(), 6)
for i, expect := range expected {
node := queue.ClosestUnqueried()
assert.Equal(t, node.Id, expect.Id, strconv.Itoa(i))
}
assert.Nil(t, queue.ClosestUnqueried())
}
func TestDiscoveryQueueRandom(t *testing.T) {
const maxLen = 8
seed := int64(rand.Uint64())
t.Logf("seed %v", seed)
r := rand.New(rand.NewSource(seed))
for i := 0; i < 100; i++ {
var target storj.NodeID
_, _ = r.Read(target[:])
var initial []*pb.Node
for k := 0; k < 10; k++ {
var nodeID storj.NodeID
_, _ = r.Read(nodeID[:])
initial = append(initial, &pb.Node{Id: nodeID})
}
queue := newDiscoveryQueue(target, maxLen)
queue.Insert(initial...)
for k := 0; k < 10; k++ {
var nodeID storj.NodeID
_, _ = r.Read(nodeID[:])
queue.Insert(&pb.Node{Id: nodeID})
}
assert.Equal(t, queue.Unqueried(), maxLen)
previousPriority := storj.NodeID{}
for queue.Unqueried() > 0 {
next := queue.ClosestUnqueried()
priority := xorNodeID(target, next.Id)
// ensure that priority is monotonically increasing
assert.False(t, priority.Less(previousPriority))
}
}
}

View File

@ -1,34 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"storj.io/storj/pkg/pb"
)
func (rt *RoutingTable) addToReplacementCache(kadBucketID bucketID, node *pb.Node) {
rt.rcMutex.Lock()
defer rt.rcMutex.Unlock()
nodes := rt.replacementCache[kadBucketID]
nodes = append(nodes, node)
if len(nodes) > rt.rcBucketSize {
copy(nodes, nodes[1:])
nodes = nodes[:len(nodes)-1]
}
rt.replacementCache[kadBucketID] = nodes
}
func (rt *RoutingTable) removeFromReplacementCache(kadBucketID bucketID, node *pb.Node) {
rt.rcMutex.Lock()
defer rt.rcMutex.Unlock()
nodes := rt.replacementCache[kadBucketID]
for i, n := range nodes {
if n.Id == node.Id && n.Address.GetAddress() == node.Address.GetAddress() {
nodes = append(nodes[:i], nodes[i+1:]...)
break
}
}
rt.replacementCache[kadBucketID] = nodes
}

View File

@ -1,60 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package kademlia
import (
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
func TestAddToReplacementCache(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, storj.NodeID{244, 255})
defer ctx.Check(rt.Close)
kadBucketID := bucketID{255, 255}
node1 := teststorj.MockNode(string([]byte{233, 255}))
rt.addToReplacementCache(kadBucketID, node1)
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[kadBucketID])
kadBucketID2 := bucketID{127, 255}
node2 := teststorj.MockNode(string([]byte{100, 255}))
node3 := teststorj.MockNode(string([]byte{90, 255}))
node4 := teststorj.MockNode(string([]byte{80, 255}))
rt.addToReplacementCache(kadBucketID2, node2)
rt.addToReplacementCache(kadBucketID2, node3)
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[kadBucketID])
assert.Equal(t, []*pb.Node{node2, node3}, rt.replacementCache[kadBucketID2])
rt.addToReplacementCache(kadBucketID2, node4)
assert.Equal(t, []*pb.Node{node3, node4}, rt.replacementCache[kadBucketID2])
}
func TestRemoveFromReplacementCache(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTableWith(ctx, storj.NodeID{244, 255}, routingTableOpts{cacheSize: 3})
defer ctx.Check(rt.Close)
kadBucketID2 := bucketID{127, 255}
node2 := teststorj.MockNode(string([]byte{100, 255}))
node3 := teststorj.MockNode(string([]byte{90, 255}))
node4 := teststorj.MockNode(string([]byte{80, 255}))
rt.addToReplacementCache(kadBucketID2, node2)
rt.addToReplacementCache(kadBucketID2, node3)
rt.addToReplacementCache(kadBucketID2, node4)
assert.Equal(t, []*pb.Node{node2, node3, node4}, rt.replacementCache[kadBucketID2])
rt.removeFromReplacementCache(kadBucketID2, node3)
assert.Equal(t, []*pb.Node{node2, node4}, rt.replacementCache[kadBucketID2])
rt.removeFromReplacementCache(kadBucketID2, node2)
assert.Equal(t, []*pb.Node{node4}, rt.replacementCache[kadBucketID2])
rt.removeFromReplacementCache(kadBucketID2, node4)
assert.Equal(t, []*pb.Node{}, rt.replacementCache[kadBucketID2])
}

View File

@ -1,327 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"encoding/binary"
"fmt"
"sync"
"time"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
const (
// KademliaBucket is the string representing the bucket used for the kademlia routing table k-bucket ids
KademliaBucket = "kbuckets"
// NodeBucket is the string representing the bucket used for the kademlia routing table node ids
NodeBucket = "nodes"
// AntechamberBucket is the string representing the bucket used for the kademlia antechamber nodes
AntechamberBucket = "antechamber"
)
// RoutingErr is the class for all errors pertaining to routing table operations
var RoutingErr = errs.Class("routing table error")
// Bucket IDs exist in the same address space as node IDs
type bucketID = storj.NodeID
var firstBucketID = bucketID{
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
}
var emptyBucketID = bucketID{}
// RoutingTableConfig configures the routing table
type RoutingTableConfig struct {
BucketSize int `help:"size of each Kademlia bucket" default:"20"`
ReplacementCacheSize int `help:"size of Kademlia replacement cache" default:"5"`
}
// RoutingTable implements the RoutingTable interface
type RoutingTable struct {
log *zap.Logger
self *overlay.NodeDossier
kadBucketDB storage.KeyValueStore
nodeBucketDB storage.KeyValueStore
transport *pb.NodeTransport
mutex *sync.Mutex
rcMutex *sync.Mutex
acMutex *sync.Mutex
replacementCache map[bucketID][]*pb.Node
bucketSize int // max number of nodes stored in a kbucket = 20 (k)
rcBucketSize int // replacementCache bucket max length
antechamber storage.KeyValueStore
}
// NewRoutingTable returns a newly configured instance of a RoutingTable
func NewRoutingTable(logger *zap.Logger, localNode *overlay.NodeDossier, kdb, ndb, adb storage.KeyValueStore, config *RoutingTableConfig) (_ *RoutingTable, err error) {
if config == nil || config.BucketSize == 0 || config.ReplacementCacheSize == 0 {
// TODO: handle this more nicely
config = &RoutingTableConfig{
BucketSize: 20,
ReplacementCacheSize: 5,
}
}
rt := &RoutingTable{
log: logger,
self: localNode,
kadBucketDB: kdb,
nodeBucketDB: ndb,
transport: &defaultTransport,
mutex: &sync.Mutex{},
rcMutex: &sync.Mutex{},
acMutex: &sync.Mutex{},
replacementCache: make(map[bucketID][]*pb.Node),
bucketSize: config.BucketSize,
rcBucketSize: config.ReplacementCacheSize,
antechamber: adb,
}
ok, err := rt.addNode(context.TODO(), &localNode.Node)
if !ok || err != nil {
return nil, RoutingErr.New("could not add localNode to routing table: %s", err)
}
return rt, nil
}
// Close closes without closing dependencies
func (rt *RoutingTable) Close() error {
return nil
}
// Local returns the local node
func (rt *RoutingTable) Local() overlay.NodeDossier {
rt.mutex.Lock()
defer rt.mutex.Unlock()
return *rt.self
}
// UpdateSelf updates the local node with the provided info
func (rt *RoutingTable) UpdateSelf(capacity *pb.NodeCapacity) {
rt.mutex.Lock()
defer rt.mutex.Unlock()
if capacity != nil {
rt.self.Capacity = *capacity
}
}
// K returns the currently configured maximum of nodes to store in a bucket
func (rt *RoutingTable) K() int {
return rt.bucketSize
}
// CacheSize returns the total current size of the replacement cache
func (rt *RoutingTable) CacheSize() int {
return rt.rcBucketSize
}
// GetNodes retrieves nodes within the same kbucket as the given node id
// Note: id doesn't need to be stored at time of search
func (rt *RoutingTable) GetNodes(ctx context.Context, id storj.NodeID) ([]*pb.Node, bool) {
defer mon.Task()(&ctx)(nil)
bID, err := rt.getKBucketID(ctx, id)
if err != nil {
return nil, false
}
if bID == (bucketID{}) {
return nil, false
}
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(ctx, bID)
if err != nil {
return nil, false
}
return unmarshaledNodes, true
}
// GetBucketIds returns a storage.Keys type of bucket ID's in the Kademlia instance
func (rt *RoutingTable) GetBucketIds(ctx context.Context) (_ storage.Keys, err error) {
defer mon.Task()(&ctx)(&err)
kbuckets, err := rt.kadBucketDB.List(ctx, nil, 0)
if err != nil {
return nil, err
}
return kbuckets, nil
}
// DumpNodes iterates through all nodes in the nodeBucketDB and marshals them to &pb.Nodes, then returns them
func (rt *RoutingTable) DumpNodes(ctx context.Context) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
var nodes []*pb.Node
var nodeErrors errs.Group
err = rt.iterateNodes(ctx, storj.NodeID{}, func(ctx context.Context, newID storj.NodeID, protoNode []byte) error {
newNode := pb.Node{}
err := proto.Unmarshal(protoNode, &newNode)
if err != nil {
nodeErrors.Add(err)
}
nodes = append(nodes, &newNode)
return nil
}, false)
if err != nil {
nodeErrors.Add(err)
}
return nodes, nodeErrors.Err()
}
// FindNear returns the node corresponding to the provided nodeID
// returns all Nodes (excluding self) closest via XOR to the provided nodeID up to the provided limit
func (rt *RoutingTable) FindNear(ctx context.Context, target storj.NodeID, limit int) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
// initialize a slice of limit+1 to allow for expansion while reordering
closestNodes := make([]*pb.Node, 0, limit+1)
// Insertion sort the nodes by xor
err = rt.iterateNodes(ctx, storj.NodeID{}, func(ctx context.Context, newID storj.NodeID, protoNode []byte) error {
newPos := len(closestNodes)
// compare values starting with the greatest xor to newID in the iteration
for newPos > 0 && compareByXor(closestNodes[newPos-1].Id, newID, target) > 0 {
// decrement newPos until newID has a greater xor (farther away) than closestNode[newPos-1]
// this final newPos is the index at which the newID belongs
newPos--
}
if newPos != limit {
newNode := pb.Node{}
err := proto.Unmarshal(protoNode, &newNode)
if err != nil {
return err
}
closestNodes = append(closestNodes, &newNode)
// if the new node is not the furthest away, insert the node at its correct index
if newPos != len(closestNodes) {
copy(closestNodes[newPos+1:], closestNodes[newPos:])
closestNodes[newPos] = &newNode
if len(closestNodes) > limit {
closestNodes = closestNodes[:limit]
}
}
}
return nil
}, true)
return closestNodes, Error.Wrap(err)
}
// ConnectionSuccess updates or adds a node to the routing table when
// a successful connection is made to the node on the network
func (rt *RoutingTable) ConnectionSuccess(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
// valid to connect to node without ID but don't store connection
if node.Id == (storj.NodeID{}) {
return nil
}
v, err := rt.nodeBucketDB.Get(ctx, storage.Key(node.Id.Bytes()))
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return RoutingErr.New("could not get node %s", err)
}
if v != nil {
err = rt.updateNode(ctx, node)
if err != nil {
return RoutingErr.New("could not update node %s", err)
}
return nil
}
_, err = rt.addNode(ctx, node)
if err != nil {
return RoutingErr.New("could not add node %s", err)
}
return nil
}
// ConnectionFailed removes a node from the routing table when
// a connection fails for the node on the network
func (rt *RoutingTable) ConnectionFailed(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
err = rt.removeNode(ctx, node)
if err != nil {
return RoutingErr.New("could not remove node %s", err)
}
return nil
}
// SetBucketTimestamp records the time of the last node lookup for a bucket
func (rt *RoutingTable) SetBucketTimestamp(ctx context.Context, bIDBytes []byte, now time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
rt.mutex.Lock()
defer rt.mutex.Unlock()
err = rt.createOrUpdateKBucket(ctx, keyToBucketID(bIDBytes), now)
if err != nil {
return NodeErr.New("could not update bucket timestamp %s", err)
}
return nil
}
// GetBucketTimestamp retrieves time of the last node lookup for a bucket
func (rt *RoutingTable) GetBucketTimestamp(ctx context.Context, bIDBytes []byte) (_ time.Time, err error) {
defer mon.Task()(&ctx)(&err)
t, err := rt.kadBucketDB.Get(ctx, bIDBytes)
if err != nil {
return time.Now(), RoutingErr.New("could not get bucket timestamp %s", err)
}
timestamp, _ := binary.Varint(t)
return time.Unix(0, timestamp).UTC(), nil
}
func (rt *RoutingTable) iterateNodes(ctx context.Context, start storj.NodeID, f func(context.Context, storj.NodeID, []byte) error, skipSelf bool) (err error) {
defer mon.Task()(&ctx)(&err)
return rt.nodeBucketDB.Iterate(ctx, storage.IterateOptions{First: storage.Key(start.Bytes()), Recurse: true},
func(ctx context.Context, it storage.Iterator) error {
var item storage.ListItem
for it.Next(ctx, &item) {
nodeID, err := storj.NodeIDFromBytes(item.Key)
if err != nil {
return err
}
if skipSelf && nodeID == rt.self.Id {
continue
}
err = f(ctx, nodeID, item.Value)
if err != nil {
return err
}
}
return nil
},
)
}
// ConnFailure implements the Transport failure function
func (rt *RoutingTable) ConnFailure(ctx context.Context, node *pb.Node, err error) {
err2 := rt.ConnectionFailed(ctx, node)
if err2 != nil {
rt.log.Debug(fmt.Sprintf("error with ConnFailure hook %+v : %+v", err, err2))
}
}
// ConnSuccess implements the Transport success function
func (rt *RoutingTable) ConnSuccess(ctx context.Context, node *pb.Node) {
err := rt.ConnectionSuccess(ctx, node)
if err != nil {
rt.log.Debug("connection success error:", zap.Error(err))
}
}

View File

@ -1,366 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"encoding/binary"
"time"
"github.com/gogo/protobuf/proto"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
// addNode attempts to add a new contact to the routing table
// Requires node not already in table
// Returns true if node was added successfully
func (rt *RoutingTable) addNode(ctx context.Context, node *pb.Node) (_ bool, err error) {
defer mon.Task()(&ctx)(&err)
rt.mutex.Lock()
defer rt.mutex.Unlock()
if node.Id == rt.self.Id {
err := rt.createOrUpdateKBucket(ctx, firstBucketID, time.Now())
if err != nil {
return false, RoutingErr.New("could not create initial K bucket: %s", err)
}
err = rt.putNode(ctx, node)
if err != nil {
return false, RoutingErr.New("could not add initial node to nodeBucketDB: %s", err)
}
return true, nil
}
kadBucketID, err := rt.getKBucketID(ctx, node.Id)
if err != nil {
return false, RoutingErr.New("could not getKBucketID: %s", err)
}
hasRoom, err := rt.kadBucketHasRoom(ctx, kadBucketID)
if err != nil {
return false, err
}
containsLocal, err := rt.kadBucketContainsLocalNode(ctx, kadBucketID)
if err != nil {
return false, err
}
withinK, err := rt.wouldBeInNearestK(ctx, node.Id)
if err != nil {
return false, RoutingErr.New("could not determine if node is within k: %s", err)
}
for !hasRoom {
if containsLocal || withinK {
depth, err := rt.determineLeafDepth(ctx, kadBucketID)
if err != nil {
return false, RoutingErr.New("could not determine leaf depth: %s", err)
}
kadBucketID = rt.splitBucket(kadBucketID, depth)
err = rt.createOrUpdateKBucket(ctx, kadBucketID, time.Now())
if err != nil {
return false, RoutingErr.New("could not split and create K bucket: %s", err)
}
kadBucketID, err = rt.getKBucketID(ctx, node.Id)
if err != nil {
return false, RoutingErr.New("could not get k bucket Id within add node split bucket checks: %s", err)
}
hasRoom, err = rt.kadBucketHasRoom(ctx, kadBucketID)
if err != nil {
return false, err
}
containsLocal, err = rt.kadBucketContainsLocalNode(ctx, kadBucketID)
if err != nil {
return false, err
}
} else {
rt.addToReplacementCache(kadBucketID, node)
return false, nil
}
}
err = rt.putNode(ctx, node)
if err != nil {
return false, RoutingErr.New("could not add node to nodeBucketDB: %s", err)
}
err = rt.createOrUpdateKBucket(ctx, kadBucketID, time.Now())
if err != nil {
return false, RoutingErr.New("could not create or update K bucket: %s", err)
}
return true, nil
}
// updateNode will update the node information given that
// the node is already in the routing table.
func (rt *RoutingTable) updateNode(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
if err := rt.putNode(ctx, node); err != nil {
return RoutingErr.New("could not update node: %v", err)
}
return nil
}
// removeNode will remove churned nodes and replace those entries with nodes from the replacement cache.
func (rt *RoutingTable) removeNode(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
rt.mutex.Lock()
defer rt.mutex.Unlock()
kadBucketID, err := rt.getKBucketID(ctx, node.Id)
if err != nil {
return RoutingErr.New("could not get k bucket %s", err)
}
existingMarshalled, err := rt.nodeBucketDB.Get(ctx, node.Id.Bytes())
if storage.ErrKeyNotFound.Has(err) {
//check replacement cache
rt.removeFromReplacementCache(kadBucketID, node)
// check antechamber
err = rt.antechamberRemoveNode(ctx, node)
if err != nil {
return RoutingErr.Wrap(err)
}
return nil
} else if err != nil {
return RoutingErr.New("could not get node %s", err)
}
var existing pb.Node
err = proto.Unmarshal(existingMarshalled, &existing)
if err != nil {
return RoutingErr.New("could not unmarshal node %s", err)
}
if !pb.AddressEqual(existing.Address, node.Address) {
// don't remove a node if the address is different
return nil
}
err = rt.nodeBucketDB.Delete(ctx, node.Id.Bytes())
if err != nil {
return RoutingErr.New("could not delete node %s", err)
}
nodes := rt.replacementCache[kadBucketID]
if len(nodes) == 0 {
return nil
}
err = rt.putNode(ctx, nodes[len(nodes)-1])
if err != nil {
return err
}
rt.replacementCache[kadBucketID] = nodes[:len(nodes)-1]
return nil
}
// putNode: helper, adds or updates Node and ID to nodeBucketDB
func (rt *RoutingTable) putNode(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
v, err := proto.Marshal(node)
if err != nil {
return RoutingErr.Wrap(err)
}
err = rt.nodeBucketDB.Put(ctx, node.Id.Bytes(), v)
if err != nil {
return RoutingErr.New("could not add key value pair to nodeBucketDB: %s", err)
}
return nil
}
// createOrUpdateKBucket: helper, adds or updates given kbucket
func (rt *RoutingTable) createOrUpdateKBucket(ctx context.Context, bID bucketID, now time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
dateTime := make([]byte, binary.MaxVarintLen64)
binary.PutVarint(dateTime, now.UnixNano())
err = rt.kadBucketDB.Put(ctx, bID[:], dateTime)
if err != nil {
return RoutingErr.New("could not add or update k bucket: %s", err)
}
return nil
}
// getKBucketID: helper, returns the id of the corresponding k bucket given a node id.
// The node doesn't have to be in the routing table at time of search
func (rt *RoutingTable) getKBucketID(ctx context.Context, nodeID storj.NodeID) (_ bucketID, err error) {
defer mon.Task()(&ctx)(&err)
match := bucketID{}
err = rt.kadBucketDB.Iterate(ctx, storage.IterateOptions{First: storage.Key{}, Recurse: true},
func(ctx context.Context, it storage.Iterator) error {
var item storage.ListItem
for it.Next(ctx, &item) {
match = keyToBucketID(item.Key)
if nodeID.Less(match) {
break
}
}
return nil
},
)
if err != nil {
return bucketID{}, RoutingErr.Wrap(err)
}
return match, nil
}
// wouldBeInNearestK: helper, returns true if the node in question is within the nearest k from local node
func (rt *RoutingTable) wouldBeInNearestK(ctx context.Context, nodeID storj.NodeID) (_ bool, err error) {
defer mon.Task()(&ctx)(&err)
closestNodes, err := rt.FindNear(ctx, rt.self.Id, rt.bucketSize)
if err != nil {
return false, RoutingErr.Wrap(err)
}
if len(closestNodes) < rt.bucketSize {
return true, nil
}
var furthestIDWithinK storj.NodeID
if len(closestNodes) <= rt.bucketSize {
furthestIDWithinK = closestNodes[len(closestNodes)-1].Id
} else {
furthestIDWithinK = closestNodes[rt.bucketSize].Id
}
existingXor := xorNodeID(furthestIDWithinK, rt.self.Id)
newXor := xorNodeID(nodeID, rt.self.Id)
return newXor.Less(existingXor), nil
}
// kadBucketContainsLocalNode returns true if the kbucket in question contains the local node
func (rt *RoutingTable) kadBucketContainsLocalNode(ctx context.Context, queryID bucketID) (_ bool, err error) {
defer mon.Task()(&ctx)(&err)
bID, err := rt.getKBucketID(ctx, rt.self.Id)
if err != nil {
return false, err
}
return queryID == bID, nil
}
// kadBucketHasRoom: helper, returns true if it has fewer than k nodes
func (rt *RoutingTable) kadBucketHasRoom(ctx context.Context, bID bucketID) (_ bool, err error) {
nodes, err := rt.getNodeIDsWithinKBucket(ctx, bID)
if err != nil {
return false, err
}
if len(nodes) < rt.bucketSize {
return true, nil
}
return false, nil
}
// getNodeIDsWithinKBucket: helper, returns a collection of all the node ids contained within the kbucket
func (rt *RoutingTable) getNodeIDsWithinKBucket(ctx context.Context, bID bucketID) (_ storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
endpoints, err := rt.getKBucketRange(ctx, bID)
if err != nil {
return nil, err
}
left := endpoints[0]
right := endpoints[1]
var ids []storj.NodeID
err = rt.iterateNodes(ctx, left, func(ctx context.Context, nodeID storj.NodeID, protoNode []byte) error {
if left.Less(nodeID) && (nodeID.Less(right) || nodeID == right) {
ids = append(ids, nodeID)
}
return nil
}, false)
if err != nil {
return nil, err
}
return ids, nil
}
// getNodesFromIDsBytes: helper, returns array of encoded nodes from node ids
func (rt *RoutingTable) getNodesFromIDsBytes(ctx context.Context, nodeIDs storj.NodeIDList) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
var marshaledNodes []storage.Value
for _, v := range nodeIDs {
n, err := rt.nodeBucketDB.Get(ctx, v.Bytes())
if err != nil {
return nil, RoutingErr.New("could not get node id %v, %s", v, err)
}
marshaledNodes = append(marshaledNodes, n)
}
return unmarshalNodes(marshaledNodes)
}
// unmarshalNodes: helper, returns slice of reconstructed node pointers given a map of nodeIDs:serialized nodes
func unmarshalNodes(nodes []storage.Value) ([]*pb.Node, error) {
var unmarshaled []*pb.Node
for _, n := range nodes {
node := &pb.Node{}
err := proto.Unmarshal(n, node)
if err != nil {
return unmarshaled, RoutingErr.New("could not unmarshal node %s", err)
}
unmarshaled = append(unmarshaled, node)
}
return unmarshaled, nil
}
// getUnmarshaledNodesFromBucket: helper, gets nodes within kbucket
func (rt *RoutingTable) getUnmarshaledNodesFromBucket(ctx context.Context, bID bucketID) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
nodeIDsBytes, err := rt.getNodeIDsWithinKBucket(ctx, bID)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get nodeIds within kbucket %s", err)
}
nodes, err := rt.getNodesFromIDsBytes(ctx, nodeIDsBytes)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get node values %s", err)
}
return nodes, nil
}
// getKBucketRange: helper, returns the left and right endpoints of the range of node ids contained within the bucket
func (rt *RoutingTable) getKBucketRange(ctx context.Context, bID bucketID) (_ []bucketID, err error) {
defer mon.Task()(&ctx)(&err)
previousBucket := bucketID{}
endpoints := []bucketID{}
err = rt.kadBucketDB.Iterate(ctx, storage.IterateOptions{First: storage.Key{}, Recurse: true},
func(ctx context.Context, it storage.Iterator) error {
var item storage.ListItem
for it.Next(ctx, &item) {
thisBucket := keyToBucketID(item.Key)
if thisBucket == bID {
endpoints = []bucketID{previousBucket, bID}
break
}
previousBucket = thisBucket
}
return nil
},
)
if err != nil {
return endpoints, RoutingErr.Wrap(err)
}
return endpoints, nil
}
// determineLeafDepth determines the level of the bucket id in question.
// Eg level 0 means there is only 1 bucket, level 1 means the bucket has been split once, and so on
func (rt *RoutingTable) determineLeafDepth(ctx context.Context, bID bucketID) (_ int, err error) {
defer mon.Task()(&ctx)(&err)
bucketRange, err := rt.getKBucketRange(ctx, bID)
if err != nil {
return -1, RoutingErr.New("could not get k bucket range %s", err)
}
smaller := bucketRange[0]
diffBit, err := determineDifferingBitIndex(bID, smaller)
if err != nil {
return diffBit + 1, RoutingErr.New("could not determine differing bit %s", err)
}
return diffBit + 1, nil
}
// splitBucket: helper, returns the smaller of the two new bucket ids
// the original bucket id becomes the greater of the 2 new
func (rt *RoutingTable) splitBucket(bID bucketID, depth int) bucketID {
var newID bucketID
copy(newID[:], bID[:])
byteIndex := depth / 8
bitInByteIndex := 7 - (depth % 8)
toggle := byte(1 << uint(bitInByteIndex))
newID[byteIndex] ^= toggle
return newID
}

View File

@ -1,738 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package kademlia
import (
"bytes"
"context"
"sync"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
"storj.io/storj/storage/storelogger"
"storj.io/storj/storage/teststore"
)
type routingTableOpts struct {
bucketSize int
cacheSize int
}
// newTestRoutingTable returns a newly configured instance of a RoutingTable
func newTestRoutingTable(ctx context.Context, local *overlay.NodeDossier, opts routingTableOpts) (*RoutingTable, error) {
if opts.bucketSize == 0 {
opts.bucketSize = 6
}
if opts.cacheSize == 0 {
opts.cacheSize = 2
}
rt := &RoutingTable{
self: local,
kadBucketDB: storelogger.New(zap.L().Named("rt.kad"), teststore.New()),
nodeBucketDB: storelogger.New(zap.L().Named("rt.node"), teststore.New()),
transport: &defaultTransport,
mutex: &sync.Mutex{},
rcMutex: &sync.Mutex{},
acMutex: &sync.Mutex{},
replacementCache: make(map[bucketID][]*pb.Node),
bucketSize: opts.bucketSize,
rcBucketSize: opts.cacheSize,
antechamber: storelogger.New(zap.L().Named("rt.antechamber"), teststore.New()),
}
ok, err := rt.addNode(ctx, &local.Node)
if !ok || err != nil {
return nil, RoutingErr.New("could not add localNode to routing table: %s", err)
}
return rt, nil
}
func createRoutingTableWith(ctx context.Context, localNodeID storj.NodeID, opts routingTableOpts) *RoutingTable {
if localNodeID == (storj.NodeID{}) {
panic("empty local node id")
}
local := &overlay.NodeDossier{Node: pb.Node{Id: localNodeID}}
rt, err := newTestRoutingTable(ctx, local, opts)
if err != nil {
panic(err)
}
return rt
}
func createRoutingTable(ctx context.Context, localNodeID storj.NodeID) *RoutingTable {
return createRoutingTableWith(ctx, localNodeID, routingTableOpts{})
}
func TestAddNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("OO"))
defer ctx.Check(rt.Close)
cases := []struct {
testID string
node *pb.Node
added bool
kadIDs [][]byte
nodeIDs [][]string
}{
{testID: "PO: add node to unfilled kbucket",
node: teststorj.MockNode("PO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"OO", "PO"}},
},
{testID: "NO: add node to full kbucket and split",
node: teststorj.MockNode("NO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"NO", "OO", "PO"}},
},
{testID: "MO",
node: teststorj.MockNode("MO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"MO", "NO", "OO", "PO"}},
},
{testID: "LO",
node: teststorj.MockNode("LO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"LO", "MO", "NO", "OO", "PO"}},
},
{testID: "QO",
node: teststorj.MockNode("QO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"LO", "MO", "NO", "OO", "PO", "QO"}},
},
{testID: "SO: split bucket",
node: teststorj.MockNode("SO"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "?O",
node: teststorj.MockNode("?O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: ">O",
node: teststorj.MockNode(">O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}}, nodeIDs: [][]string{{">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "=O",
node: teststorj.MockNode("=O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: ";O",
node: teststorj.MockNode(";O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: ":O",
node: teststorj.MockNode(":O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{":O", ";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "9O",
node: teststorj.MockNode("9O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "8O: should drop",
node: teststorj.MockNode("8O"),
added: false,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "KO",
node: teststorj.MockNode("KO"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "JO",
node: teststorj.MockNode("JO"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "]O",
node: teststorj.MockNode("]O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O"}, {}, {}},
},
{testID: "^O",
node: teststorj.MockNode("^O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O", "^O"}, {}, {}},
},
{testID: "_O",
node: teststorj.MockNode("_O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O", "^O", "_O"}, {}, {}},
},
{testID: "@O: split bucket 2",
node: teststorj.MockNode("@O"),
added: true,
kadIDs: [][]byte{{63, 255}, {71, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"@O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O", "^O", "_O"}, {}, {}},
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
ok, err := rt.addNode(ctx, testCase.node)
require.NoError(t, err)
require.Equal(t, testCase.added, ok)
kadKeys, err := rt.kadBucketDB.List(ctx, nil, 0)
require.NoError(t, err)
for i, v := range kadKeys {
require.True(t, bytes.Equal(testCase.kadIDs[i], v[:2]))
ids, err := rt.getNodeIDsWithinKBucket(ctx, keyToBucketID(v))
require.NoError(t, err)
require.True(t, len(ids) == len(testCase.nodeIDs[i]))
for j, id := range ids {
require.True(t, bytes.Equal(teststorj.NodeIDFromString(testCase.nodeIDs[i][j]).Bytes(), id.Bytes()))
}
}
if testCase.testID == "8O" {
nodeID80 := teststorj.NodeIDFromString("8O")
n := rt.replacementCache[keyToBucketID(nodeID80.Bytes())]
require.Equal(t, nodeID80.Bytes(), n[0].Id.Bytes())
}
})
}
}
func TestUpdateNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
node := teststorj.MockNode("BB")
ok, err := rt.addNode(ctx, node)
assert.True(t, ok)
assert.NoError(t, err)
val, err := rt.nodeBucketDB.Get(ctx, node.Id.Bytes())
assert.NoError(t, err)
unmarshaled, err := unmarshalNodes([]storage.Value{val})
assert.NoError(t, err)
x := unmarshaled[0].Address
assert.Nil(t, x)
node.Address = &pb.NodeAddress{Address: "BB"}
err = rt.updateNode(ctx, node)
assert.NoError(t, err)
val, err = rt.nodeBucketDB.Get(ctx, node.Id.Bytes())
assert.NoError(t, err)
unmarshaled, err = unmarshalNodes([]storage.Value{val})
assert.NoError(t, err)
y := unmarshaled[0].Address.Address
assert.Equal(t, "BB", y)
}
func TestRemoveNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
// Add node to RT
kadBucketID := firstBucketID
node := teststorj.MockNode("BB")
ok, err := rt.addNode(ctx, node)
assert.True(t, ok)
assert.NoError(t, err)
// make sure node is in RT
val, err := rt.nodeBucketDB.Get(ctx, node.Id.Bytes())
assert.NoError(t, err)
assert.NotNil(t, val)
// Add node2 to the replacement cache
node2 := teststorj.MockNode("CC")
rt.addToReplacementCache(kadBucketID, node2)
// remove node from RT
err = rt.removeNode(ctx, node)
assert.NoError(t, err)
// make sure node is removed
val, err = rt.nodeBucketDB.Get(ctx, node.Id.Bytes())
assert.Nil(t, val)
assert.Error(t, err)
// make sure node2 was moved from the replacement cache to the RT
val2, err := rt.nodeBucketDB.Get(ctx, node2.Id.Bytes())
assert.NoError(t, err)
assert.NotNil(t, val2)
assert.Equal(t, 0, len(rt.replacementCache[kadBucketID]))
// Add node to replacement cache
rt.addToReplacementCache(kadBucketID, node)
assert.Equal(t, 1, len(rt.replacementCache[kadBucketID]))
// check it was removed from replacement cache
err = rt.removeNode(ctx, node)
assert.NoError(t, err)
assert.Equal(t, 0, len(rt.replacementCache[kadBucketID]))
// Add node to antechamber
err = rt.antechamberAddNode(ctx, node)
assert.NoError(t, err)
val, err = rt.antechamber.Get(ctx, node.Id.Bytes())
assert.NoError(t, err)
assert.NotNil(t, val)
// check it was removed from antechamber
err = rt.removeNode(ctx, node)
assert.NoError(t, err)
val, err = rt.antechamber.Get(ctx, node.Id.Bytes())
assert.True(t, storage.ErrKeyNotFound.Has(err))
assert.Nil(t, val)
// remove a node that's not in rt, replacement cache, nor antechamber
node3 := teststorj.MockNode("DD")
err = rt.removeNode(ctx, node3)
assert.NoError(t, err)
// don't remove node with mismatched address
node4 := teststorj.MockNode("EE")
ok, err = rt.addNode(ctx, node4)
assert.True(t, ok)
assert.NoError(t, err)
err = rt.removeNode(ctx, &pb.Node{
Id: teststorj.NodeIDFromString("EE"),
Address: &pb.NodeAddress{Address: "address:1"},
})
assert.NoError(t, err)
val, err = rt.nodeBucketDB.Get(ctx, node4.Id.Bytes())
assert.NotNil(t, val)
assert.NoError(t, err)
}
func TestCreateOrUpdateKBucket(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
id := bucketID{255, 255}
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
err := rt.createOrUpdateKBucket(ctx, id, time.Now())
assert.NoError(t, err)
val, e := rt.kadBucketDB.Get(ctx, id[:])
assert.NotNil(t, val)
assert.NoError(t, e)
}
func TestGetKBucketID(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
kadIDA := bucketID{255, 255}
nodeIDA := teststorj.NodeIDFromString("AA")
rt := createRoutingTable(ctx, nodeIDA)
defer ctx.Check(rt.Close)
keyA, err := rt.getKBucketID(ctx, nodeIDA)
assert.NoError(t, err)
assert.Equal(t, kadIDA[:2], keyA[:2])
}
func TestWouldBeInNearestK(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTableWith(ctx, storj.NodeID{127, 255}, routingTableOpts{bucketSize: 2})
defer ctx.Check(rt.Close)
cases := []struct {
testID string
nodeID storj.NodeID
closest bool
}{
{testID: "A",
nodeID: storj.NodeID{127, 255}, //XOR from [127, 255] is 0
closest: true,
},
{testID: "B",
nodeID: storj.NodeID{143, 255}, //XOR from [127, 255] is 240
closest: true,
},
{testID: "C",
nodeID: storj.NodeID{255, 255}, //XOR from [127, 255] is 128
closest: true,
},
{testID: "D",
nodeID: storj.NodeID{191, 255}, //XOR from [127, 255] is 192
closest: false,
},
{testID: "E",
nodeID: storj.NodeID{133, 255}, //XOR from [127, 255] is 250
closest: false,
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
result, err := rt.wouldBeInNearestK(ctx, testCase.nodeID)
assert.NoError(t, err)
assert.Equal(t, testCase.closest, result)
assert.NoError(t, rt.nodeBucketDB.Put(ctx, testCase.nodeID.Bytes(), []byte("")))
})
}
}
func TestKadBucketContainsLocalNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
nodeIDA := storj.NodeID{183, 255} //[10110111, 1111111]
rt := createRoutingTable(ctx, nodeIDA)
defer ctx.Check(rt.Close)
kadIDA := firstBucketID
var kadIDB bucketID
copy(kadIDB[:], kadIDA[:])
kadIDB[0] = 127
now := time.Now()
err := rt.createOrUpdateKBucket(ctx, kadIDB, now)
assert.NoError(t, err)
resultTrue, err := rt.kadBucketContainsLocalNode(ctx, kadIDA)
assert.NoError(t, err)
resultFalse, err := rt.kadBucketContainsLocalNode(ctx, kadIDB)
assert.NoError(t, err)
assert.True(t, resultTrue)
assert.False(t, resultFalse)
}
func TestKadBucketHasRoom(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
node1 := storj.NodeID{255, 255}
rt := createRoutingTable(ctx, node1)
defer ctx.Check(rt.Close)
kadIDA := firstBucketID
node2 := storj.NodeID{191, 255}
node3 := storj.NodeID{127, 255}
node4 := storj.NodeID{63, 255}
node5 := storj.NodeID{159, 255}
node6 := storj.NodeID{0, 127}
resultA, err := rt.kadBucketHasRoom(ctx, kadIDA)
assert.NoError(t, err)
assert.True(t, resultA)
assert.NoError(t, rt.nodeBucketDB.Put(ctx, node2.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, node3.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, node4.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, node5.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, node6.Bytes(), []byte("")))
resultB, err := rt.kadBucketHasRoom(ctx, kadIDA)
assert.NoError(t, err)
assert.False(t, resultB)
}
func TestGetNodeIDsWithinKBucket(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
nodeIDA := storj.NodeID{183, 255} //[10110111, 1111111]
rt := createRoutingTable(ctx, nodeIDA)
defer ctx.Check(rt.Close)
kadIDA := firstBucketID
var kadIDB bucketID
copy(kadIDB[:], kadIDA[:])
kadIDB[0] = 127
now := time.Now()
assert.NoError(t, rt.createOrUpdateKBucket(ctx, kadIDB, now))
nodeIDB := storj.NodeID{111, 255} //[01101111, 1111111]
nodeIDC := storj.NodeID{47, 255} //[00101111, 1111111]
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeIDB.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeIDC.Bytes(), []byte("")))
cases := []struct {
testID string
kadID bucketID
expected storage.Keys
}{
{testID: "A",
kadID: kadIDA,
expected: storage.Keys{nodeIDA.Bytes()},
},
{testID: "B",
kadID: kadIDB,
expected: storage.Keys{nodeIDC.Bytes(), nodeIDB.Bytes()},
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
n, err := rt.getNodeIDsWithinKBucket(ctx, testCase.kadID)
assert.NoError(t, err)
for i, id := range testCase.expected {
assert.True(t, id.Equal(n[i].Bytes()))
}
})
}
}
func TestGetNodesFromIDs(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
nodeA := teststorj.MockNode("AA")
nodeB := teststorj.MockNode("BB")
nodeC := teststorj.MockNode("CC")
a, err := proto.Marshal(nodeA)
assert.NoError(t, err)
b, err := proto.Marshal(nodeB)
assert.NoError(t, err)
c, err := proto.Marshal(nodeC)
assert.NoError(t, err)
rt := createRoutingTable(ctx, nodeA.Id)
defer ctx.Check(rt.Close)
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeA.Id.Bytes(), a))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeB.Id.Bytes(), b))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeC.Id.Bytes(), c))
expected := []*pb.Node{nodeA, nodeB, nodeC}
nodeKeys, err := rt.nodeBucketDB.List(ctx, nil, 0)
assert.NoError(t, err)
values, err := rt.getNodesFromIDsBytes(ctx, teststorj.NodeIDsFromBytes(nodeKeys.ByteSlices()...))
assert.NoError(t, err)
for i, n := range expected {
assert.True(t, bytes.Equal(n.Id.Bytes(), values[i].Id.Bytes()))
}
}
func TestUnmarshalNodes(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
nodeA := teststorj.MockNode("AA")
nodeB := teststorj.MockNode("BB")
nodeC := teststorj.MockNode("CC")
a, err := proto.Marshal(nodeA)
assert.NoError(t, err)
b, err := proto.Marshal(nodeB)
assert.NoError(t, err)
c, err := proto.Marshal(nodeC)
assert.NoError(t, err)
rt := createRoutingTable(ctx, nodeA.Id)
defer ctx.Check(rt.Close)
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeA.Id.Bytes(), a))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeB.Id.Bytes(), b))
assert.NoError(t, rt.nodeBucketDB.Put(ctx, nodeC.Id.Bytes(), c))
nodeKeys, err := rt.nodeBucketDB.List(ctx, nil, 0)
assert.NoError(t, err)
nodes, err := rt.getNodesFromIDsBytes(ctx, teststorj.NodeIDsFromBytes(nodeKeys.ByteSlices()...))
assert.NoError(t, err)
expected := []*pb.Node{nodeA, nodeB, nodeC}
for i, v := range expected {
assert.True(t, bytes.Equal(v.Id.Bytes(), nodes[i].Id.Bytes()))
}
}
func TestGetUnmarshaledNodesFromBucket(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
nodeA := teststorj.MockNode("AA")
rt := createRoutingTable(ctx, nodeA.Id)
defer ctx.Check(rt.Close)
bucketID := firstBucketID
nodeB := teststorj.MockNode("BB")
nodeC := teststorj.MockNode("CC")
var err error
_, err = rt.addNode(ctx, nodeB)
assert.NoError(t, err)
_, err = rt.addNode(ctx, nodeC)
assert.NoError(t, err)
nodes, err := rt.getUnmarshaledNodesFromBucket(ctx, bucketID)
expected := []*pb.Node{nodeA, nodeB, nodeC}
assert.NoError(t, err)
for i, v := range expected {
assert.True(t, bytes.Equal(v.Id.Bytes(), nodes[i].Id.Bytes()))
}
}
func TestGetKBucketRange(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
idA := storj.NodeID{255, 255}
idB := storj.NodeID{127, 255}
idC := storj.NodeID{63, 255}
assert.NoError(t, rt.kadBucketDB.Put(ctx, idA.Bytes(), []byte("")))
assert.NoError(t, rt.kadBucketDB.Put(ctx, idB.Bytes(), []byte("")))
assert.NoError(t, rt.kadBucketDB.Put(ctx, idC.Bytes(), []byte("")))
zeroBID := bucketID{}
cases := []struct {
testID string
id storj.NodeID
expected storage.Keys
}{
{testID: "A",
id: idA,
expected: storage.Keys{idB.Bytes(), idA.Bytes()},
},
{testID: "B",
id: idB,
expected: storage.Keys{idC.Bytes(), idB.Bytes()}},
{testID: "C",
id: idC,
expected: storage.Keys{zeroBID[:], idC.Bytes()},
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
ep, err := rt.getKBucketRange(ctx, keyToBucketID(testCase.id.Bytes()))
assert.NoError(t, err)
for i, k := range testCase.expected {
assert.True(t, k.Equal(ep[i][:]))
}
})
}
}
func TestBucketIDZeroValue(t *testing.T) {
zero := bucketID{}
expected := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
assert.True(t, bytes.Equal(zero[:], expected))
}
func TestDetermineLeafDepth(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
idA, idB, idC := firstBucketID, firstBucketID, firstBucketID
idA[0] = 255
idB[0] = 127
idC[0] = 63
cases := []struct {
testID string
id storj.NodeID
depth int
addNode func()
}{
{testID: "A",
id: idA,
depth: 0,
addNode: func() {
e := rt.kadBucketDB.Put(ctx, idA.Bytes(), []byte(""))
assert.NoError(t, e)
},
},
{testID: "B",
id: idB,
depth: 1,
addNode: func() {
e := rt.kadBucketDB.Put(ctx, idB.Bytes(), []byte(""))
assert.NoError(t, e)
},
},
{testID: "C",
id: idA,
depth: 1,
addNode: func() {
e := rt.kadBucketDB.Put(ctx, idC.Bytes(), []byte(""))
assert.NoError(t, e)
},
},
{testID: "D",
id: idB,
depth: 2,
addNode: func() {},
},
{testID: "E",
id: idC,
depth: 2,
addNode: func() {},
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
testCase.addNode()
d, err := rt.determineLeafDepth(ctx, testCase.id)
assert.NoError(t, err)
assert.Equal(t, testCase.depth, d)
})
}
}
func TestSplitBucket(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
cases := []struct {
testID string
idA []byte
idB []byte
depth int
}{
{testID: "A: [11111111, 11111111] -> [10111111, 11111111]",
idA: []byte{255, 255},
idB: []byte{191, 255},
depth: 1,
},
{testID: "B: [10111111, 11111111] -> [10011111, 11111111]",
idA: []byte{191, 255},
idB: []byte{159, 255},
depth: 2,
},
{testID: "C: [01111111, 11111111] -> [00111111, 11111111]",
idA: []byte{127, 255},
idB: []byte{63, 255},
depth: 1,
},
{testID: "D: [00000000, 11111111] -> [00000000, 01111111]",
idA: []byte{0, 255},
idB: []byte{0, 127},
depth: 8,
},
{testID: "E: [01011111, 11111111] -> [01010111, 11111111]",
idA: []byte{95, 255},
idB: []byte{87, 255},
depth: 4,
},
{testID: "F: [01011111, 11111111] -> [01001111, 11111111]",
idA: []byte{95, 255},
idB: []byte{79, 255},
depth: 3,
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
newID := rt.splitBucket(keyToBucketID(testCase.idA), testCase.depth)
assert.Equal(t, testCase.idB, newID[:2])
})
}
}

View File

@ -1,86 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"encoding/hex"
"fmt"
"io"
"os"
"strings"
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
func id(hexID string) (rv storj.NodeID) {
bytes, err := hex.DecodeString(hexID)
if err != nil {
panic(err)
}
if len(bytes) != len(storj.NodeID{}) {
panic(fmt.Sprintf("invalid length for %q", hexID))
}
copy(rv[:], bytes)
if rv == (storj.NodeID{}) {
panic("to allow routing table implementations to use a node id zero value (unlikely to have a collision), tests shouldn't use it")
}
return rv
}
func PadID(hexPrefix, hexPad string) storj.NodeID {
repeats := (len(storj.NodeID{})*2 - len(hexPrefix)) / len(hexPad)
return id(hexPrefix + strings.Repeat(hexPad, repeats))
}
func Node(id storj.NodeID, address string) *pb.Node {
return &pb.Node{
Id: id,
Address: &pb.NodeAddress{
Address: address,
},
}
}
var graphCounter = new(int64)
type Grapher interface {
Graph(io.Writer) error
}
func SaveGraph(table interface{}) {
if table, ok := table.(Grapher); ok {
fh, err := os.Create(fmt.Sprintf("routing-graph-%003d.dot", atomic.AddInt64(graphCounter, 1)))
if err != nil {
panic(err)
}
defer func() {
err := fh.Close()
if err != nil {
panic(err)
}
}()
err = table.Graph(fh)
if err != nil {
panic(err)
}
}
}
func requireNodesEqual(t testing.TB, expected []*pb.Node, actual []*pb.Node) {
require.Equal(t, len(expected), len(actual))
for i, node := range expected {
require.Equal(t, node.Id, actual[i].Id)
require.Equal(t, node.Address.Transport, actual[i].Address.Transport)
require.Equal(t, node.Address.Address, actual[i].Address.Address)
}
}
func NodeFromPrefix(prefix string, pad string) *pb.Node {
return Node(PadID(prefix, pad), fmt.Sprintf("address-%s:1", prefix))
}

View File

@ -1,626 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"storj.io/storj/internal/testcontext"
"storj.io/storj/pkg/kademlia/testrouting"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// RoutingTableInterface contains information on nodes we have locally
type RoutingTableInterface interface {
K() int
CacheSize() int
FindNear(ctx context.Context, id storj.NodeID, limit int) ([]*pb.Node, error)
ConnectionSuccess(ctx context.Context, node *pb.Node) error
ConnectionFailed(ctx context.Context, node *pb.Node) error
Close() error
}
type routingCtor func(context.Context, storj.NodeID, int, int, int) RoutingTableInterface
func newRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, allowedFailures int) RoutingTableInterface {
if allowedFailures != 0 {
panic("failure counting currently unsupported")
}
return createRoutingTableWith(ctx, self, routingTableOpts{
bucketSize: bucketSize,
cacheSize: cacheSize,
})
}
func newTestRouting(ctx context.Context, self storj.NodeID, bucketSize, cacheSize, allowedFailures int) RoutingTableInterface {
return testrouting.New(self, bucketSize, cacheSize, allowedFailures)
}
func TestTableInit_Routing(t *testing.T) { testTableInit(t, newRouting) }
func TestTableInit_TestRouting(t *testing.T) { testTableInit(t, newTestRouting) }
func testTableInit(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
bucketSize := 5
cacheSize := 3
table := routingCtor(ctx, PadID("55", "5"), bucketSize, cacheSize, 0)
defer ctx.Check(table.Close)
require.Equal(t, bucketSize, table.K())
require.Equal(t, cacheSize, table.CacheSize())
nodes, err := table.FindNear(ctx, PadID("21", "0"), 3)
require.NoError(t, err)
require.Equal(t, 0, len(nodes))
}
func TestTableBasic_Routing(t *testing.T) { testTableBasic(t, newRouting) }
func TestTableBasic_TestRouting(t *testing.T) { testTableBasic(t, newTestRouting) }
func testTableBasic(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("5555", "5"), 5, 3, 0)
defer ctx.Check(table.Close)
err := table.ConnectionSuccess(ctx, Node(PadID("5556", "5"), "address:1"))
require.NoError(t, err)
nodes, err := table.FindNear(ctx, PadID("21", "0"), 3)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, PadID("5556", "5"), nodes[0].Id)
require.Equal(t, "address:1", nodes[0].Address.Address)
}
func TestNoSelf_Routing(t *testing.T) { testNoSelf(t, newRouting) }
func TestNoSelf_TestRouting(t *testing.T) { testNoSelf(t, newTestRouting) }
func testNoSelf(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("55", "5"), 5, 3, 0)
defer ctx.Check(table.Close)
err := table.ConnectionSuccess(ctx, Node(PadID("55", "5"), "address:2"))
require.NoError(t, err)
nodes, err := table.FindNear(ctx, PadID("21", "0"), 3)
require.NoError(t, err)
require.Equal(t, 0, len(nodes))
}
func TestSplits_Routing(t *testing.T) { testSplits(t, newRouting) }
func TestSplits_TestRouting(t *testing.T) { testSplits(t, newTestRouting) }
func testSplits(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("55", "5"), 5, 2, 0)
defer ctx.Check(table.Close)
for _, prefix2 := range "18" {
for _, prefix1 := range "a69c23f1d7eb5408" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "0")))
}
}
// we just put 32 nodes into the table. the bucket with a differing first
// bit should be full with 5 nodes. the bucket with the same first bit and
// differing second bit should be full with 5 nodes. the bucket with the
// same first two bits and differing third bit should not be full and have
// 4 nodes (60..., 68..., 70..., 78...). the bucket with the same first
// three bits should also not be full and have 4 nodes
// (40..., 48..., 50..., 58...). So we should be able to get no more than
// 18 nodes back
nodes, err := table.FindNear(ctx, PadID("55", "5"), 19)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
// bucket 010 (same first three bits)
NodeFromPrefix("51", "0"), NodeFromPrefix("58", "0"),
NodeFromPrefix("41", "0"), NodeFromPrefix("48", "0"),
// bucket 011 (same first two bits)
NodeFromPrefix("71", "0"), NodeFromPrefix("78", "0"),
NodeFromPrefix("61", "0"), NodeFromPrefix("68", "0"),
// bucket 00 (same first bit)
NodeFromPrefix("11", "0"),
NodeFromPrefix("01", "0"),
NodeFromPrefix("31", "0"),
// 20 is added first of this group, so it's the only one where there's
// room for the 28, before this bucket is full
NodeFromPrefix("21", "0"), NodeFromPrefix("28", "0"),
// bucket 1 (differing first bit)
NodeFromPrefix("d1", "0"),
NodeFromPrefix("c1", "0"),
NodeFromPrefix("f1", "0"),
NodeFromPrefix("91", "0"),
NodeFromPrefix("a1", "0"),
// e and f were added last so that bucket should have been full by then
}, nodes)
// let's cause some failures and make sure the replacement cache fills in
// the gaps
// bucket 010 shouldn't have anything in its replacement cache
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("41", "0")))
// bucket 011 shouldn't have anything in its replacement cache
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("68", "0")))
// bucket 00 should have two things in its replacement cache, 18... is one of them
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("18", "0")))
// now just one thing in its replacement cache
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("31", "0")))
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("28", "0")))
// bucket 1 should have two things in its replacement cache
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("a1", "0")))
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("d1", "0")))
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("91", "0")))
nodes, err = table.FindNear(ctx, PadID("55", "5"), 19)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
// bucket 010
NodeFromPrefix("51", "0"), NodeFromPrefix("58", "0"),
NodeFromPrefix("48", "0"),
// bucket 011
NodeFromPrefix("71", "0"), NodeFromPrefix("78", "0"),
NodeFromPrefix("61", "0"),
// bucket 00
NodeFromPrefix("11", "0"),
NodeFromPrefix("01", "0"),
NodeFromPrefix("08", "0"), // replacement cache
NodeFromPrefix("21", "0"),
// bucket 1
NodeFromPrefix("c1", "0"),
NodeFromPrefix("f1", "0"),
NodeFromPrefix("88", "0"), // replacement cache
NodeFromPrefix("b8", "0"), // replacement cache
}, nodes)
}
func TestUnbalanced_Routing(t *testing.T) { testUnbalanced(t, newRouting) }
func TestUnbalanced_TestRouting(t *testing.T) { testUnbalanced(t, newTestRouting) }
func testUnbalanced(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("ff", "f"), 5, 2, 0)
defer ctx.Check(table.Close)
for _, prefix1 := range "0123456789abcdef" {
for _, prefix2 := range "18" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "0")))
}
}
// in this case, we've blown out the routing table with a paradoxical
// case. every node we added should have been the closest node, so this
// would have forced every bucket to split, and we should have stored all
// possible nodes.
nodes, err := table.FindNear(ctx, PadID("ff", "f"), 33)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("f8", "0"), NodeFromPrefix("f1", "0"),
NodeFromPrefix("e8", "0"), NodeFromPrefix("e1", "0"),
NodeFromPrefix("d8", "0"), NodeFromPrefix("d1", "0"),
NodeFromPrefix("c8", "0"), NodeFromPrefix("c1", "0"),
NodeFromPrefix("b8", "0"), NodeFromPrefix("b1", "0"),
NodeFromPrefix("a8", "0"), NodeFromPrefix("a1", "0"),
NodeFromPrefix("98", "0"), NodeFromPrefix("91", "0"),
NodeFromPrefix("88", "0"), NodeFromPrefix("81", "0"),
NodeFromPrefix("78", "0"), NodeFromPrefix("71", "0"),
NodeFromPrefix("68", "0"), NodeFromPrefix("61", "0"),
NodeFromPrefix("58", "0"), NodeFromPrefix("51", "0"),
NodeFromPrefix("48", "0"), NodeFromPrefix("41", "0"),
NodeFromPrefix("38", "0"), NodeFromPrefix("31", "0"),
NodeFromPrefix("28", "0"), NodeFromPrefix("21", "0"),
NodeFromPrefix("18", "0"), NodeFromPrefix("11", "0"),
NodeFromPrefix("08", "0"), NodeFromPrefix("01", "0"),
}, nodes)
}
func TestQuery_Routing(t *testing.T) { testQuery(t, newRouting) }
func TestQuery_TestRouting(t *testing.T) { testQuery(t, newTestRouting) }
func testQuery(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("a3", "3"), 5, 2, 0)
defer ctx.Check(table.Close)
for _, prefix2 := range "18" {
for _, prefix1 := range "b4f25c896de03a71" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "f")))
}
}
nodes, err := table.FindNear(ctx, PadID("c7139", "1"), 2)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("c1", "f"),
NodeFromPrefix("d1", "f"),
}, nodes)
nodes, err = table.FindNear(ctx, PadID("c7139", "1"), 7)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("c1", "f"),
NodeFromPrefix("d1", "f"),
NodeFromPrefix("e1", "f"),
NodeFromPrefix("f1", "f"),
NodeFromPrefix("f8", "f"),
NodeFromPrefix("81", "f"),
NodeFromPrefix("88", "f"),
}, nodes)
nodes, err = table.FindNear(ctx, PadID("c7139", "1"), 10)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("c1", "f"),
NodeFromPrefix("d1", "f"),
NodeFromPrefix("e1", "f"),
NodeFromPrefix("f1", "f"),
NodeFromPrefix("f8", "f"),
NodeFromPrefix("81", "f"),
NodeFromPrefix("88", "f"),
NodeFromPrefix("91", "f"),
NodeFromPrefix("98", "f"),
NodeFromPrefix("a1", "f"),
}, nodes)
}
func TestFailureCounting_Routing(t *testing.T) { t.Skip() }
func TestFailureCounting_TestRouting(t *testing.T) { testFailureCounting(t, newTestRouting) }
func testFailureCounting(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("a3", "3"), 5, 2, 2)
defer ctx.Check(table.Close)
for _, prefix2 := range "18" {
for _, prefix1 := range "b4f25c896de03a71" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "f")))
}
}
nochange := func() {
nodes, err := table.FindNear(ctx, PadID("c7139", "1"), 7)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("c1", "f"),
NodeFromPrefix("d1", "f"),
NodeFromPrefix("e1", "f"),
NodeFromPrefix("f1", "f"),
NodeFromPrefix("f8", "f"),
NodeFromPrefix("81", "f"),
NodeFromPrefix("88", "f"),
}, nodes)
}
nochange()
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("d1", "f")))
nochange()
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("d1", "f")))
nochange()
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("d1", "f")))
nodes, err := table.FindNear(ctx, PadID("c7139", "1"), 7)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("c1", "f"),
NodeFromPrefix("e1", "f"),
NodeFromPrefix("e8", "f"),
NodeFromPrefix("f1", "f"),
NodeFromPrefix("f8", "f"),
NodeFromPrefix("81", "f"),
NodeFromPrefix("88", "f"),
}, nodes)
}
func TestUpdateBucket_Routing(t *testing.T) { testUpdateBucket(t, newRouting) }
func TestUpdateBucket_TestRouting(t *testing.T) { testUpdateBucket(t, newTestRouting) }
func testUpdateBucket(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("a3", "3"), 5, 2, 0)
defer ctx.Check(table.Close)
for _, prefix2 := range "18" {
for _, prefix1 := range "b4f25c896de03a71" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "f")))
}
}
nodes, err := table.FindNear(ctx, PadID("c7139", "1"), 1)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("c1", "f"),
}, nodes)
require.NoError(t, table.ConnectionSuccess(ctx,
Node(PadID("c1", "f"), "new-address:3")))
nodes, err = table.FindNear(ctx, PadID("c7139", "1"), 1)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, PadID("c1", "f"), nodes[0].Id)
require.Equal(t, "new-address:3", nodes[0].Address.Address)
}
func TestUpdateCache_Routing(t *testing.T) { testUpdateCache(t, newRouting) }
func TestUpdateCache_TestRouting(t *testing.T) { testUpdateCache(t, newTestRouting) }
func testUpdateCache(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("a3", "3"), 1, 1, 0)
defer ctx.Check(table.Close)
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("81", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("c1", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("41", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("01", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, Node(PadID("01", "0"), "new-address:6")))
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("41", "0")))
nodes, err := table.FindNear(ctx, PadID("01", "0"), 4)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
Node(PadID("01", "0"), "new-address:6"),
NodeFromPrefix("81", "0"),
NodeFromPrefix("c1", "0"),
}, nodes)
}
func TestFailureUnknownAddress_Routing(t *testing.T) { testFailureUnknownAddress(t, newRouting) }
func TestFailureUnknownAddress_TestRouting(t *testing.T) { testFailureUnknownAddress(t, newTestRouting) }
func testFailureUnknownAddress(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("a3", "3"), 1, 1, 0)
defer ctx.Check(table.Close)
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("81", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("c1", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, Node(PadID("41", "0"), "address:2")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("01", "0")))
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("41", "0")))
nodes, err := table.FindNear(ctx, PadID("01", "0"), 4)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
Node(PadID("41", "0"), "address:2"),
NodeFromPrefix("81", "0"),
NodeFromPrefix("c1", "0"),
}, nodes)
}
func TestShrink_Routing(t *testing.T) { testShrink(t, newRouting) }
func TestShrink_TestRouting(t *testing.T) { testShrink(t, newTestRouting) }
func testShrink(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("ff", "f"), 2, 2, 0)
defer ctx.Check(table.Close)
// blow out the routing table
for _, prefix1 := range "0123456789abcdef" {
for _, prefix2 := range "18" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "0")))
}
}
// delete some of the bad ones
for _, prefix1 := range "0123456789abcd" {
for _, prefix2 := range "18" {
require.NoError(t, table.ConnectionFailed(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "0")))
}
}
// add back some nodes more balanced
for _, prefix1 := range "3a50" {
for _, prefix2 := range "19" {
require.NoError(t, table.ConnectionSuccess(ctx,
NodeFromPrefix(string([]rune{prefix1, prefix2}), "0")))
}
}
// make sure table filled in alright
nodes, err := table.FindNear(ctx, PadID("ff", "f"), 13)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("f8", "0"),
NodeFromPrefix("f1", "0"),
NodeFromPrefix("e8", "0"),
NodeFromPrefix("e1", "0"),
NodeFromPrefix("a9", "0"),
NodeFromPrefix("a1", "0"),
NodeFromPrefix("59", "0"),
NodeFromPrefix("51", "0"),
NodeFromPrefix("39", "0"),
NodeFromPrefix("31", "0"),
NodeFromPrefix("09", "0"),
NodeFromPrefix("01", "0"),
}, nodes)
}
func TestReplacementCacheOrder_Routing(t *testing.T) { testReplacementCacheOrder(t, newRouting) }
func TestReplacementCacheOrder_TestRouting(t *testing.T) { testReplacementCacheOrder(t, newTestRouting) }
func testReplacementCacheOrder(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("a3", "3"), 1, 2, 0)
defer ctx.Check(table.Close)
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("81", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("21", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("c1", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("41", "0")))
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix("01", "0")))
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("21", "0")))
nodes, err := table.FindNear(ctx, PadID("55", "5"), 4)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("01", "0"),
NodeFromPrefix("c1", "0"),
NodeFromPrefix("81", "0"),
}, nodes)
}
func TestHealSplit_Routing(t *testing.T) { testHealSplit(t, newRouting) }
func TestHealSplit_TestRouting(t *testing.T) { testHealSplit(t, newTestRouting) }
func testHealSplit(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("55", "55"), 2, 2, 0)
defer ctx.Check(table.Close)
for _, pad := range []string{"0", "1"} {
for _, prefix := range []string{"ff", "e1", "c1", "54", "56", "57"} {
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix(prefix, pad)))
}
}
nodes, err := table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("54", "1"),
NodeFromPrefix("54", "0"),
NodeFromPrefix("57", "0"),
NodeFromPrefix("56", "0"),
NodeFromPrefix("c1", "1"),
NodeFromPrefix("c1", "0"),
NodeFromPrefix("ff", "0"),
NodeFromPrefix("e1", "0"),
}, nodes)
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("c1", "0")))
nodes, err = table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("54", "1"),
NodeFromPrefix("54", "0"),
NodeFromPrefix("57", "0"),
NodeFromPrefix("56", "0"),
NodeFromPrefix("c1", "1"),
NodeFromPrefix("ff", "0"),
NodeFromPrefix("e1", "0"),
}, nodes)
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("ff", "0")))
nodes, err = table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("54", "1"),
NodeFromPrefix("54", "0"),
NodeFromPrefix("57", "0"),
NodeFromPrefix("56", "0"),
NodeFromPrefix("c1", "1"),
NodeFromPrefix("e1", "1"),
NodeFromPrefix("e1", "0"),
}, nodes)
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("e1", "0")))
nodes, err = table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("54", "1"),
NodeFromPrefix("54", "0"),
NodeFromPrefix("57", "0"),
NodeFromPrefix("56", "0"),
NodeFromPrefix("c1", "1"),
NodeFromPrefix("ff", "1"),
NodeFromPrefix("e1", "1"),
}, nodes)
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("e1", "1")))
nodes, err = table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("54", "1"),
NodeFromPrefix("54", "0"),
NodeFromPrefix("57", "0"),
NodeFromPrefix("56", "0"),
NodeFromPrefix("c1", "1"),
NodeFromPrefix("ff", "1"),
}, nodes)
for _, prefix := range []string{"ff", "e1", "c1", "54", "56", "57"} {
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix(prefix, "2")))
}
nodes, err = table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("54", "1"),
NodeFromPrefix("54", "0"),
NodeFromPrefix("57", "0"),
NodeFromPrefix("56", "0"),
NodeFromPrefix("c1", "1"),
NodeFromPrefix("c1", "2"),
NodeFromPrefix("ff", "1"),
NodeFromPrefix("ff", "2"),
}, nodes)
}
func TestFullDissimilarBucket_Routing(t *testing.T) { testFullDissimilarBucket(t, newRouting) }
func TestFullDissimilarBucket_TestRouting(t *testing.T) { testFullDissimilarBucket(t, newTestRouting) }
func testFullDissimilarBucket(t *testing.T, routingCtor routingCtor) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
table := routingCtor(ctx, PadID("55", "55"), 2, 2, 0)
defer ctx.Check(table.Close)
for _, prefix := range []string{"d1", "c1", "f1", "e1"} {
require.NoError(t, table.ConnectionSuccess(ctx, NodeFromPrefix(prefix, "0")))
}
nodes, err := table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("d1", "0"),
NodeFromPrefix("c1", "0"),
}, nodes)
require.NoError(t, table.ConnectionFailed(ctx, NodeFromPrefix("c1", "0")))
nodes, err = table.FindNear(ctx, PadID("55", "55"), 9)
require.NoError(t, err)
requireNodesEqual(t, []*pb.Node{
NodeFromPrefix("d1", "0"),
NodeFromPrefix("e1", "0"),
}, nodes)
}

View File

@ -1,235 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package kademlia
import (
"bytes"
"context"
"fmt"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testrand"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
func TestLocal(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
assert.Equal(t, rt.Local().Id.Bytes()[:2], []byte("AA"))
}
func TestK(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
k := rt.K()
assert.Equal(t, rt.bucketSize, k)
}
func TestCacheSize(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
expected := rt.rcBucketSize
result := rt.CacheSize()
assert.Equal(t, expected, result)
}
func TestGetBucket(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
rt := createRoutingTable(ctx, teststorj.NodeIDFromString("AA"))
defer ctx.Check(rt.Close)
node := teststorj.MockNode("AA")
node2 := teststorj.MockNode("BB")
ok, err := rt.addNode(ctx, node2)
assert.True(t, ok)
assert.NoError(t, err)
cases := []struct {
nodeID storj.NodeID
expected []*pb.Node
ok bool
}{
{nodeID: node.Id,
expected: []*pb.Node{node, node2},
ok: true,
},
{nodeID: node2.Id,
expected: []*pb.Node{node, node2},
ok: true,
},
}
for i, v := range cases {
b, e := rt.GetNodes(ctx, node2.Id)
for j, w := range v.expected {
if !assert.True(t, bytes.Equal(w.Id.Bytes(), b[j].Id.Bytes())) {
t.Logf("case %v failed expected: ", i)
}
}
if !assert.Equal(t, v.ok, e) {
t.Logf("case %v failed ok: ", i)
}
}
}
func RandomNode() pb.Node {
node := pb.Node{}
node.Id = testrand.NodeID()
return node
}
func TestKademliaFindNear(t *testing.T) {
ctx := context.Background()
testFunc := func(t *testing.T, testNodeCount, limit int) {
selfNode := RandomNode()
rt := createRoutingTable(ctx, selfNode.Id)
expectedIDs := make([]storj.NodeID, 0)
for x := 0; x < testNodeCount; x++ {
n := RandomNode()
ok, err := rt.addNode(ctx, &n)
require.NoError(t, err)
if ok { // buckets were full
expectedIDs = append(expectedIDs, n.Id)
}
}
if testNodeCount > 0 && limit > 0 {
require.True(t, len(expectedIDs) > 0)
}
//makes sure our target is like self, to keep close nodes
targetNode := pb.Node{Id: selfNode.Id}
targetNode.Id[storj.NodeIDSize-1] ^= 1 //flip lowest bit
sortByXOR(expectedIDs, targetNode.Id)
results, err := rt.FindNear(ctx, targetNode.Id, limit)
require.NoError(t, err)
counts := []int{len(expectedIDs), limit}
sort.Ints(counts)
require.Equal(t, counts[0], len(results))
for i, result := range results {
require.Equal(t, result.Id.String(), expectedIDs[i].String(), fmt.Sprintf("item %d", i))
}
}
for _, nodeodeCount := range []int{0, 1, 10, 100} {
testNodeCount := nodeodeCount
for _, limit := range []int{0, 1, 10, 100} {
l := limit
t.Run(fmt.Sprintf("test %d %d", testNodeCount, l),
func(t *testing.T) { testFunc(t, testNodeCount, l) })
}
}
}
func TestConnectionSuccess(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
id := teststorj.NodeIDFromString("AA")
rt := createRoutingTable(ctx, id)
defer ctx.Check(rt.Close)
id2 := teststorj.NodeIDFromString("BB")
address1 := &pb.NodeAddress{Address: "a"}
address2 := &pb.NodeAddress{Address: "b"}
node1 := &pb.Node{Id: id, Address: address1}
node2 := &pb.Node{Id: id2, Address: address2}
cases := []struct {
testID string
node *pb.Node
id storj.NodeID
address *pb.NodeAddress
}{
{testID: "Update Node",
node: node1,
id: id,
address: address1,
},
{testID: "Create Node",
node: node2,
id: id2,
address: address2,
},
}
for _, c := range cases {
testCase := c
t.Run(testCase.testID, func(t *testing.T) {
err := rt.ConnectionSuccess(ctx, testCase.node)
assert.NoError(t, err)
v, err := rt.nodeBucketDB.Get(ctx, testCase.id.Bytes())
assert.NoError(t, err)
n, err := unmarshalNodes([]storage.Value{v})
assert.NoError(t, err)
assert.Equal(t, testCase.address.Address, n[0].Address.Address)
})
}
}
func TestConnectionFailed(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
id := teststorj.NodeIDFromString("AA")
node := &pb.Node{Id: id}
rt := createRoutingTable(ctx, id)
defer ctx.Check(rt.Close)
err := rt.ConnectionFailed(ctx, node)
assert.NoError(t, err)
v, err := rt.nodeBucketDB.Get(ctx, id.Bytes())
assert.Error(t, err)
assert.Nil(t, v)
}
func TestSetBucketTimestamp(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
id := teststorj.NodeIDFromString("AA")
rt := createRoutingTable(ctx, id)
defer ctx.Check(rt.Close)
now := time.Now().UTC()
err := rt.createOrUpdateKBucket(ctx, keyToBucketID(id.Bytes()), now)
assert.NoError(t, err)
ti, err := rt.GetBucketTimestamp(ctx, id.Bytes())
assert.Equal(t, now, ti)
assert.NoError(t, err)
now = time.Now().UTC()
err = rt.SetBucketTimestamp(ctx, id.Bytes(), now)
assert.NoError(t, err)
ti, err = rt.GetBucketTimestamp(ctx, id.Bytes())
assert.Equal(t, now, ti)
assert.NoError(t, err)
}
func TestGetBucketTimestamp(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
id := teststorj.NodeIDFromString("AA")
rt := createRoutingTable(ctx, id)
defer ctx.Check(rt.Close)
now := time.Now().UTC()
err := rt.createOrUpdateKBucket(ctx, keyToBucketID(id.Bytes()), now)
assert.NoError(t, err)
ti, err := rt.GetBucketTimestamp(ctx, id.Bytes())
assert.Equal(t, now, ti)
assert.NoError(t, err)
}

View File

@ -1,102 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package routinggraph
import (
"fmt"
"io"
"storj.io/storj/pkg/pb"
)
type dot struct {
out io.Writer
err error
}
func (dot *dot) printf(format string, args ...interface{}) {
if dot.err != nil {
return
}
_, dot.err = fmt.Fprintf(dot.out, format, args...)
}
// Draw writes the routing graph obtained using a GetBucketListResponse in the specified file
func Draw(w io.Writer, info *pb.GetBucketListResponse) (err error) {
dot := dot{out: w}
dot.printf(`digraph{node [shape=plaintext, fontname="Courier"];edge [dir=none];`)
defer dot.printf("}")
buckets := info.GetBuckets()
dot.addBuckets(buckets, 0, "")
return dot.err
}
func (dot *dot) addBuckets(b []*pb.GetBucketListResponse_Bucket, depth int, inPrefix string) {
if len(b) == 1 {
dot.Leaf(b[0], inPrefix)
return
}
left, right := splitBucket(b, depth)
outPrefix := extendPrefix(inPrefix, false)
dot.printf("b%s [shape=point];", inPrefix)
dot.addBuckets(left, depth+1, outPrefix)
dot.Edge(inPrefix, outPrefix, "0")
outPrefix = extendPrefix(inPrefix, true)
dot.addBuckets(right, depth+1, outPrefix)
dot.Edge(inPrefix, outPrefix, "1")
}
func (dot *dot) Edge(inPrefix, outPrefix, label string) {
dot.printf(`b%s -> b%s [label=<<b><font point-size="18">%s</font></b>>];`, inPrefix, outPrefix, label)
}
func (dot *dot) Leaf(b *pb.GetBucketListResponse_Bucket, prefix string) {
dot.printf(`b%s [label=< <table cellborder="0"><tr><td cellspacing="0" sides="b" border="1" colspan="2"><b><font point-size="18"> %s </font></b></td></tr>`, prefix, prefix)
defer dot.printf("</table>>];")
dot.printf(`<tr><td colspan="2" align="left"><i><b><font point-size="16">routing:</font></b></i></td></tr>`)
routingNodes := b.GetRoutingNodes()
for _, n := range routingNodes {
dot.Node(n)
}
dot.printf(`<tr><td colspan="2"></td></tr>`)
dot.printf(`<tr><td colspan="2" align="left"><i><b><font point-size="16">cache:</font></b></i></td></tr>`)
cachedNodes := b.GetCachedNodes()
for _, c := range cachedNodes {
dot.Node(c)
}
}
func (dot *dot) Node(node *pb.Node) {
dot.printf(`<tr><td align="left"><font point-size="14">%s</font></td><td sides="r" align="left"><i>(%s)</i></td></tr>`, node.Id, node.Address.Address)
}
func splitBucket(buckets []*pb.GetBucketListResponse_Bucket, bitDepth int) (left, right []*pb.GetBucketListResponse_Bucket) {
for _, bucket := range buckets {
bID := bucket.BucketId
byteDepth := bitDepth / 8
bitOffset := bitDepth % 8
power := uint(7 - bitOffset)
bitMask := byte(1 << power)
b := bID[byteDepth]
if b&bitMask > 0 {
right = append(right, bucket)
} else {
left = append(left, bucket)
}
}
return
}
func extendPrefix(prefix string, bit bool) string {
if bit {
return prefix + "1"
}
return prefix + "0"
}

View File

@ -1,315 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package testrouting
import (
"context"
"sort"
"sync"
"time"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
var (
mon = monkit.Package()
)
type nodeData struct {
node *pb.Node
ordering int64
lastUpdated time.Time
fails int
inCache bool
}
// Table is a routing table that tries to be as correct as possible at
// the expense of performance.
type Table struct {
self storj.NodeID
bucketSize int
cacheSize int
allowedFailures int
mu sync.Mutex
counter int64
nodes map[storj.NodeID]*nodeData
splits map[string]bool
}
// New creates a new Table. self is the owning node's node id, bucketSize is
// the kademlia k value, cacheSize is the size of each bucket's replacement
// cache, and allowedFailures is the number of failures on a given node before
// the node is removed from the table.
func New(self storj.NodeID, bucketSize, cacheSize, allowedFailures int) *Table {
return &Table{
self: self,
bucketSize: bucketSize,
cacheSize: cacheSize,
allowedFailures: allowedFailures,
nodes: map[storj.NodeID]*nodeData{},
splits: map[string]bool{},
}
}
// K returns the Table's routing depth, or Kademlia k value
func (t *Table) K() int { return t.bucketSize }
// CacheSize returns the size of replacement cache
func (t *Table) CacheSize() int { return t.cacheSize }
// ConnectionSuccess should be called whenever a node is successfully connected
// to. It will add or update the node's entry in the routing table.
func (t *Table) ConnectionSuccess(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
t.mu.Lock()
defer t.mu.Unlock()
// don't add ourselves
if node.Id == t.self {
return nil
}
// if the node is already here, update it
if cell, exists := t.nodes[node.Id]; exists {
cell.node = node
cell.lastUpdated = time.Now()
cell.fails = 0
// skip placement order and cache status
return nil
}
// add unconditionally (it might be going into a replacement cache)
t.nodes[node.Id] = &nodeData{
node: node,
ordering: t.counter,
lastUpdated: time.Now(),
fails: 0,
// makeTree within preserveInvariants might promote this to true
inCache: false,
}
t.counter++
t.preserveInvariants()
return nil
}
// ConnectionFailed should be called whenever a node can't be contacted.
// If a node fails more than allowedFailures times, it will be removed from
// the routing table. The failure count is reset every successful connection.
func (t *Table) ConnectionFailed(ctx context.Context, node *pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
t.mu.Lock()
defer t.mu.Unlock()
// if the node exists and the failure is with the address we have, record
// a failure
if data, exists := t.nodes[node.Id]; exists &&
pb.AddressEqual(data.node.Address, node.Address) {
data.fails++ //TODO: we may not need this
// if we've failed too many times, remove the node
if data.fails > t.allowedFailures {
delete(t.nodes, node.Id)
t.preserveInvariants()
}
}
return nil
}
// FindNear will return up to limit nodes in the routing table ordered by
// kademlia xor distance from the given id.
func (t *Table) FindNear(ctx context.Context, id storj.NodeID, limit int) (_ []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
t.mu.Lock()
defer t.mu.Unlock()
// find all non-cache nodes
nodes := make([]*nodeData, 0, len(t.nodes))
for _, node := range t.nodes {
if !node.inCache {
nodes = append(nodes, node)
}
}
// sort by distance
sort.Sort(nodeDataDistanceSorter{self: id, nodes: nodes})
// return up to limit nodes
if limit > len(nodes) {
limit = len(nodes)
}
rv := make([]*pb.Node, 0, limit)
for _, data := range nodes[:limit] {
rv = append(rv, data.node)
}
return rv, nil
}
// Local returns the local node
func (t *Table) Local() overlay.NodeDossier {
// the routing table has no idea what the right address of ourself is,
// so this is the wrong place to get this information. we could return
// our own id only?
panic("Unimplementable")
}
// Self returns the node's configured node id.
func (t *Table) Self() storj.NodeID { return t.self }
// MaxBucketDepth returns the largest depth of the routing table tree. This
// is useful for determining which buckets should be refreshed.
func (t *Table) MaxBucketDepth() (int, error) {
t.mu.Lock()
defer t.mu.Unlock()
var maxDepth int
t.walkLeaves(t.makeTree(), func(b *bucket) {
if b.depth > maxDepth {
maxDepth = b.depth
}
})
return maxDepth, nil
}
// GetNodes retrieves nodes within the same kbucket as the given node id
func (t *Table) GetNodes(id storj.NodeID) (nodes []*pb.Node, ok bool) {
panic("TODO")
}
// GetBucketIds returns a storage.Keys type of bucket ID's in the Kademlia instance
func (t *Table) GetBucketIds(context.Context) (storage.Keys, error) {
panic("TODO")
}
// SetBucketTimestamp records the time of the last node lookup for a bucket
func (t *Table) SetBucketTimestamp(context.Context, []byte, time.Time) error {
panic("TODO")
}
// GetBucketTimestamp retrieves time of the last node lookup for a bucket
func (t *Table) GetBucketTimestamp(context.Context, []byte) (time.Time, error) {
panic("TODO")
}
func (t *Table) preserveInvariants() {
t.walkLeaves(t.makeTree(), func(b *bucket) {
// pull the latest nodes out of the replacement caches for incomplete
// buckets
for len(b.cache) > 0 && len(b.nodes) < t.bucketSize {
recentNode := b.cache[len(b.cache)-1]
recentNode.inCache = false
b.cache = b.cache[:len(b.cache)-1]
b.nodes = append(b.nodes, recentNode)
}
// prune remaining replacement cache entries
if len(b.cache) > t.cacheSize {
for _, node := range b.cache[:len(b.cache)-t.cacheSize] {
delete(t.nodes, node.node.Id)
}
}
})
}
type bucket struct {
prefix string
depth int
similar *bucket
dissimilar *bucket
nodes []*nodeData
cache []*nodeData
}
func (t *Table) walkLeaves(b *bucket, fn func(b *bucket)) {
if !t.splits[b.prefix] {
fn(b)
} else if b.similar != nil {
t.walkLeaves(b.similar, fn)
t.walkLeaves(b.dissimilar, fn)
}
}
func (t *Table) makeTree() *bucket {
// to make sure we get the logic right, we're going to reconstruct the
// routing table binary tree data structure every time.
nodes := make([]*nodeData, 0, len(t.nodes))
for _, node := range t.nodes {
nodes = append(nodes, node)
}
var root bucket
// we'll replay the nodes in original placement order
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].ordering < nodes[j].ordering
})
nearest := make([]*nodeData, 0, t.bucketSize+1)
for _, node := range nodes {
// keep track of the nearest k nodes
nearest = append(nearest, node)
sort.Sort(nodeDataDistanceSorter{self: t.self, nodes: nearest})
if len(nearest) > t.bucketSize {
nearest = nearest[:t.bucketSize]
}
t.add(&root, node, false, nearest)
}
return &root
}
func (t *Table) add(b *bucket, node *nodeData, dissimilar bool, nearest []*nodeData) {
if t.splits[b.prefix] {
if b.similar == nil {
similarBit := bitAtDepth(t.self, b.depth)
b.similar = &bucket{depth: b.depth + 1, prefix: extendPrefix(b.prefix, similarBit)}
b.dissimilar = &bucket{depth: b.depth + 1, prefix: extendPrefix(b.prefix, !similarBit)}
}
if bitAtDepth(node.node.Id, b.depth) == bitAtDepth(t.self, b.depth) {
t.add(b.similar, node, dissimilar, nearest)
} else {
t.add(b.dissimilar, node, true, nearest)
}
return
}
if node.inCache {
b.cache = append(b.cache, node)
return
}
if len(b.nodes) < t.bucketSize {
node.inCache = false
b.nodes = append(b.nodes, node)
return
}
if dissimilar && !isNearest(node.node.Id, nearest) {
node.inCache = true
b.cache = append(b.cache, node)
return
}
t.splits[b.prefix] = true
if len(b.cache) > 0 {
panic("unreachable codepath")
}
nodes := b.nodes
b.nodes = nil
for _, existingNode := range nodes {
t.add(b, existingNode, dissimilar, nearest)
}
t.add(b, node, dissimilar, nearest)
}
// Close closes without closing dependencies
func (t *Table) Close() error { return nil }

View File

@ -1,60 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package testrouting
import "storj.io/storj/pkg/storj"
type nodeDataDistanceSorter struct {
self storj.NodeID
nodes []*nodeData
}
func (s nodeDataDistanceSorter) Len() int { return len(s.nodes) }
func (s nodeDataDistanceSorter) Swap(i, j int) {
s.nodes[i], s.nodes[j] = s.nodes[j], s.nodes[i]
}
func (s nodeDataDistanceSorter) Less(i, j int) bool {
return compareByXor(s.nodes[i].node.Id, s.nodes[j].node.Id, s.self) < 0
}
func compareByXor(left, right, reference storj.NodeID) int {
for i, r := range reference {
a, b := left[i]^r, right[i]^r
if a != b {
if a < b {
return -1
}
return 1
}
}
return 0
}
func bitAtDepth(id storj.NodeID, bitDepth int) bool {
// we could make this a fun one-liner but this is more understandable
byteDepth := bitDepth / 8
bitOffset := bitDepth % 8
power := uint(7 - bitOffset)
bitMask := byte(1 << power)
b := id[byteDepth]
return b&bitMask > 0
}
func extendPrefix(prefix string, bit bool) string {
if bit {
return prefix + "1"
}
return prefix + "0"
}
func isNearest(id storj.NodeID, nearest []*nodeData) bool {
for _, near := range nearest {
if near.node.Id == id {
return true
}
}
return false
}

View File

@ -1,48 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package testrouting
import (
"bytes"
"encoding/hex"
"fmt"
"io"
)
// Graph writes a DOT format visual graph description of the routing table to w
func (t *Table) Graph(w io.Writer) error {
t.mu.Lock()
defer t.mu.Unlock()
var buf bytes.Buffer
buf.Write([]byte("digraph{node [shape=box];"))
t.graph(&buf, t.makeTree())
buf.Write([]byte("}\n"))
_, err := buf.WriteTo(w)
return err
}
func (t *Table) graph(buf *bytes.Buffer, b *bucket) {
if t.splits[b.prefix] {
fmt.Fprintf(buf, "b%s [label=%q];", b.prefix, b.prefix)
if b.similar != nil {
t.graph(buf, b.similar)
t.graph(buf, b.dissimilar)
fmt.Fprintf(buf, "b%s -> {b%s, b%s};",
b.prefix, b.similar.prefix, b.dissimilar.prefix)
}
return
}
// b.prefix is only ever 0s or 1s, so we don't need escaping below.
fmt.Fprintf(buf, "b%s [label=\"%s\nrouting:\\l", b.prefix, b.prefix)
for _, node := range b.nodes {
fmt.Fprintf(buf, " %s\\l", hex.EncodeToString(node.node.Id[:]))
}
fmt.Fprintf(buf, "cache:\\l")
for _, node := range b.cache {
fmt.Fprintf(buf, " %s\\l", hex.EncodeToString(node.node.Id[:]))
}
fmt.Fprintf(buf, "\"];")
}

View File

@ -1,79 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"math/bits"
"sort"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
// compareByXor compares left, right xorred by reference
func compareByXor(left, right, reference storj.NodeID) int {
for i, r := range reference {
a, b := left[i]^r, right[i]^r
if a != b {
if a < b {
return -1
}
return 1
}
}
return 0
}
func sortByXOR(nodeIDs storj.NodeIDList, ref storj.NodeID) {
sort.Slice(nodeIDs, func(i, k int) bool {
return compareByXor(nodeIDs[i], nodeIDs[k], ref) < 0
})
}
func keyToBucketID(key storage.Key) (bID bucketID) {
copy(bID[:], key)
return bID
}
// xorNodeID returns the xor of each byte in NodeID
func xorNodeID(a, b storj.NodeID) storj.NodeID {
r := storj.NodeID{}
for i, av := range a {
r[i] = av ^ b[i]
}
return r
}
// xorBucketID returns the xor of each byte in bucketID
func xorBucketID(a, b bucketID) bucketID {
r := bucketID{}
for i, av := range a {
r[i] = av ^ b[i]
}
return r
}
// determineDifferingBitIndex: helper, returns the last bit differs starting from prefix to suffix
func determineDifferingBitIndex(bID, comparisonID bucketID) (int, error) {
if bID == comparisonID {
return -2, RoutingErr.New("compared two equivalent k bucket ids")
}
if comparisonID == emptyBucketID {
comparisonID = firstBucketID
}
xorID := xorBucketID(bID, comparisonID)
if xorID == firstBucketID {
return -1, nil
}
for i, v := range xorID {
if v != 0 {
return i*8 + 7 - bits.TrailingZeros8(v), nil
}
}
return -1, nil
}

View File

@ -1,123 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kademlia
import (
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/testrand"
"storj.io/storj/pkg/storj"
)
func TestSortByXOR(t *testing.T) {
n1 := storj.NodeID{127, 255} //xor 0
n2 := storj.NodeID{143, 255} //xor 240
n3 := storj.NodeID{255, 255} //xor 128
n4 := storj.NodeID{191, 255} //xor 192
n5 := storj.NodeID{133, 255} //xor 250
unsorted := storj.NodeIDList{n1, n5, n2, n4, n3}
sortByXOR(unsorted, n1)
sorted := storj.NodeIDList{n1, n3, n4, n2, n5}
assert.Equal(t, sorted, unsorted)
}
func BenchmarkSortByXOR(b *testing.B) {
l := 1000
nodes := make([]storj.NodeID, l)
for k := 0; k < l; k++ {
nodes = append(nodes, testrand.NodeID())
}
b.ResetTimer()
for m := 0; m < b.N; m++ {
rand.Shuffle(len(nodes), func(i, k int) {
nodes[i], nodes[k] = nodes[k], nodes[i]
})
sortByXOR(nodes, testrand.NodeID())
}
}
func TestDetermineDifferingBitIndex(t *testing.T) {
filledID := func(a byte) bucketID {
id := firstBucketID
id[0] = a
return id
}
cases := []struct {
bucketID bucketID
key bucketID
expected int
}{
{
bucketID: filledID(191),
key: filledID(255),
expected: 1,
},
{
bucketID: filledID(255),
key: filledID(191),
expected: 1,
},
{
bucketID: filledID(95),
key: filledID(127),
expected: 2,
},
{
bucketID: filledID(95),
key: filledID(79),
expected: 3,
},
{
bucketID: filledID(95),
key: filledID(63),
expected: 2,
},
{
bucketID: filledID(95),
key: filledID(79),
expected: 3,
},
{
bucketID: filledID(255),
key: bucketID{},
expected: -1,
},
{
bucketID: filledID(127),
key: bucketID{},
expected: 0,
},
{
bucketID: filledID(63),
key: bucketID{},
expected: 1,
},
{
bucketID: filledID(31),
key: bucketID{},
expected: 2,
},
{
bucketID: filledID(95),
key: filledID(63),
expected: 2,
},
}
for i, c := range cases {
t.Logf("#%d. bucketID:%v key:%v\n", i, c.bucketID, c.key)
diff, err := determineDifferingBitIndex(c.bucketID, c.key)
assert.NoError(t, err)
assert.Equal(t, c.expected, diff)
}
diff, err := determineDifferingBitIndex(filledID(255), filledID(255))
assert.True(t, RoutingErr.Has(err))
assert.Equal(t, diff, -2)
}

View File

@ -252,6 +252,7 @@ func (m *CountNodesRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_CountNodesRequest proto.InternalMessageInfo
// Deprecated: Do not use.
type GetBucketListRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -282,6 +283,7 @@ func (m *GetBucketListRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_GetBucketListRequest proto.InternalMessageInfo
// Deprecated: Do not use.
type GetBucketListResponse struct {
Buckets []*GetBucketListResponse_Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -368,6 +370,8 @@ func (m *GetBucketListResponse_Bucket) GetCachedNodes() []*Node {
}
// GetBuckets
//
// Deprecated: Do not use.
type GetBucketsRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -398,6 +402,7 @@ func (m *GetBucketsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_GetBucketsRequest proto.InternalMessageInfo
// Deprecated: Do not use.
type GetBucketsResponse struct {
Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
Ids []NodeID `protobuf:"bytes,2,rep,name=ids,proto3,customtype=NodeID" json:"ids,omitempty"`
@ -438,6 +443,8 @@ func (m *GetBucketsResponse) GetTotal() int64 {
}
// GetBucket
//
// Deprecated: Do not use.
type GetBucketRequest struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -469,6 +476,7 @@ func (m *GetBucketRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_GetBucketRequest proto.InternalMessageInfo
// Deprecated: Do not use.
type GetBucketResponse struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Nodes []*Node `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"`
@ -508,6 +516,7 @@ func (m *GetBucketResponse) GetNodes() []*Node {
return nil
}
// Deprecated: Do not use.
type BucketList struct {
Nodes []*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -547,6 +556,8 @@ func (m *BucketList) GetNodes() []*Node {
}
// PingNode
//
// Deprecated: Do not use.
type PingNodeRequest struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
@ -586,6 +597,7 @@ func (m *PingNodeRequest) GetAddress() string {
return ""
}
// Deprecated: Do not use.
type PingNodeResponse struct {
Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -624,6 +636,7 @@ func (m *PingNodeResponse) GetOk() bool {
return false
}
// Deprecated: Do not use.
type LookupNodeRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
@ -670,6 +683,7 @@ func (m *LookupNodeRequest) GetAddress() string {
return ""
}
// Deprecated: Do not use.
type LookupNodeResponse struct {
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
Meta *NodeMetadata `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"`
@ -716,6 +730,7 @@ func (m *LookupNodeResponse) GetMeta() *NodeMetadata {
return nil
}
// Deprecated: Do not use.
type NodeInfoRequest struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Address *NodeAddress `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
@ -755,6 +770,7 @@ func (m *NodeInfoRequest) GetAddress() *NodeAddress {
return nil
}
// Deprecated: Do not use.
type NodeInfoResponse struct {
Type NodeType `protobuf:"varint,1,opt,name=type,proto3,enum=node.NodeType" json:"type,omitempty"`
Operator *NodeOperator `protobuf:"bytes,2,opt,name=operator,proto3" json:"operator,omitempty"`
@ -817,6 +833,7 @@ func (m *NodeInfoResponse) GetVersion() *NodeVersion {
return nil
}
// Deprecated: Do not use.
type FindNearRequest struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Start NodeID `protobuf:"bytes,2,opt,name=start,proto3,customtype=NodeID" json:"start"`
@ -857,6 +874,7 @@ func (m *FindNearRequest) GetLimit() int64 {
return 0
}
// Deprecated: Do not use.
type FindNearResponse struct {
Nodes []*Node `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -1104,7 +1122,7 @@ var xxx_messageInfo_DashboardRequest proto.InternalMessageInfo
type DashboardResponse struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
NodeConnections int64 `protobuf:"varint,2,opt,name=node_connections,json=nodeConnections,proto3" json:"node_connections,omitempty"`
BootstrapAddress string `protobuf:"bytes,3,opt,name=bootstrap_address,json=bootstrapAddress,proto3" json:"bootstrap_address,omitempty"`
BootstrapAddress string `protobuf:"bytes,3,opt,name=bootstrap_address,json=bootstrapAddress,proto3" json:"bootstrap_address,omitempty"` // Deprecated: Do not use.
InternalAddress string `protobuf:"bytes,4,opt,name=internal_address,json=internalAddress,proto3" json:"internal_address,omitempty"`
ExternalAddress string `protobuf:"bytes,5,opt,name=external_address,json=externalAddress,proto3" json:"external_address,omitempty"`
DashboardAddress string `protobuf:"bytes,6,opt,name=dashboard_address,json=dashboardAddress,proto3" json:"dashboard_address,omitempty"`
@ -1150,6 +1168,7 @@ func (m *DashboardResponse) GetNodeConnections() int64 {
return 0
}
// Deprecated: Do not use.
func (m *DashboardResponse) GetBootstrapAddress() string {
if m != nil {
return m.BootstrapAddress
@ -1524,115 +1543,117 @@ func init() {
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_a07d9034b2dd9d26) }
var fileDescriptor_a07d9034b2dd9d26 = []byte{
// 1728 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x73, 0x23, 0x49,
0x11, 0x76, 0xeb, 0x65, 0x29, 0x25, 0xeb, 0x51, 0xf6, 0xce, 0x0a, 0xed, 0x8c, 0x65, 0x9a, 0xc7,
0x78, 0xd7, 0x20, 0x2f, 0xda, 0xd9, 0xc3, 0x06, 0xc1, 0xc1, 0xb2, 0x77, 0x76, 0x14, 0x3b, 0xcc,
0x78, 0xda, 0x03, 0x07, 0x62, 0x82, 0x8e, 0x92, 0xaa, 0x2c, 0x35, 0x96, 0xba, 0x7a, 0xba, 0x4b,
0xc3, 0xe8, 0x0f, 0x10, 0x70, 0x82, 0x0b, 0x07, 0xce, 0x04, 0xff, 0x80, 0x13, 0x57, 0x2e, 0xfc,
0x06, 0x0e, 0xc3, 0x0d, 0xee, 0xdc, 0x88, 0xe0, 0x40, 0xd4, 0xa3, 0xab, 0xbb, 0xf5, 0xc0, 0x26,
0x80, 0x9b, 0x3a, 0xf3, 0xcb, 0xac, 0xaf, 0xb2, 0x1e, 0xf9, 0x95, 0xa0, 0xe1, 0xf9, 0x51, 0x40,
0xc7, 0x9c, 0x85, 0xbd, 0x20, 0x64, 0x9c, 0xa1, 0x8a, 0x31, 0x74, 0x60, 0xc2, 0x26, 0x4c, 0x99,
0x3b, 0xe0, 0x33, 0x42, 0xf5, 0xef, 0x46, 0xc0, 0x3c, 0x9f, 0xd3, 0x90, 0x8c, 0xb4, 0xe1, 0x70,
0xc2, 0xd8, 0x64, 0x46, 0x4f, 0xe5, 0xd7, 0x68, 0x71, 0x7d, 0x4a, 0x16, 0x21, 0xe6, 0x1e, 0xf3,
0xb5, 0xbf, 0xbb, 0xea, 0xe7, 0xde, 0x9c, 0x46, 0x1c, 0xcf, 0x03, 0x05, 0xb0, 0x6f, 0xe0, 0xf0,
0xa9, 0x17, 0xf1, 0x61, 0x18, 0xd2, 0x00, 0x87, 0x78, 0x34, 0xa3, 0x57, 0x74, 0x32, 0xa7, 0x3e,
0x8f, 0x1c, 0xfa, 0x7a, 0x41, 0x23, 0x8e, 0x0e, 0xa0, 0x38, 0xf3, 0xe6, 0x1e, 0x6f, 0x5b, 0x47,
0xd6, 0x71, 0xd1, 0x51, 0x1f, 0xe8, 0x13, 0xb8, 0x37, 0xc3, 0x11, 0x77, 0x23, 0x4a, 0x7d, 0x37,
0x52, 0x21, 0x6e, 0x80, 0xf9, 0xb4, 0x9d, 0x3b, 0xb2, 0x8e, 0x6b, 0xce, 0xbe, 0xf0, 0x5e, 0x51,
0xea, 0xeb, 0x74, 0x97, 0x98, 0x4f, 0xed, 0xbf, 0x5a, 0x80, 0xd6, 0x47, 0x42, 0x08, 0x0a, 0x32,
0xd2, 0x92, 0x91, 0xf2, 0x37, 0xfa, 0x0c, 0xea, 0x71, 0x56, 0x42, 0x39, 0xf6, 0x66, 0x32, 0x6f,
0xb5, 0x8f, 0x7a, 0x49, 0x09, 0x2e, 0xd5, 0x2f, 0x67, 0x4f, 0x23, 0x2f, 0x24, 0x10, 0x75, 0xa1,
0x3a, 0x63, 0x11, 0x77, 0x03, 0x8f, 0x8e, 0x69, 0xd4, 0xce, 0x4b, 0xda, 0x20, 0x4c, 0x97, 0xd2,
0x82, 0x7a, 0x20, 0xd9, 0xb9, 0x82, 0x88, 0x17, 0xba, 0x98, 0x73, 0x3a, 0x0f, 0x78, 0xbb, 0x70,
0x64, 0x1d, 0xe7, 0x9d, 0x96, 0x70, 0x39, 0xd2, 0x73, 0xa6, 0x1c, 0xe8, 0x63, 0x38, 0xc8, 0x42,
0xdd, 0x31, 0x5b, 0xf8, 0xbc, 0x5d, 0x94, 0x01, 0x28, 0x4c, 0x83, 0xcf, 0x85, 0xc7, 0x7e, 0x05,
0xdd, 0xad, 0x55, 0x8d, 0x02, 0xe6, 0x47, 0x14, 0x7d, 0x06, 0x65, 0x4d, 0x3b, 0x6a, 0x5b, 0x47,
0xf9, 0xe3, 0x6a, 0xff, 0x41, 0x2f, 0xd9, 0x11, 0xeb, 0x91, 0x8e, 0x81, 0xdb, 0x1f, 0x01, 0x92,
0xc3, 0x3c, 0x63, 0x84, 0x26, 0x09, 0x0f, 0xa0, 0xa8, 0x68, 0x59, 0x92, 0x96, 0xfa, 0xb0, 0xf7,
0xa1, 0x95, 0xc6, 0xca, 0x25, 0xb5, 0xef, 0xc1, 0xc1, 0x17, 0x94, 0x0f, 0x16, 0xe3, 0x1b, 0xca,
0x05, 0xcf, 0xd8, 0xfe, 0x77, 0x0b, 0xde, 0x5b, 0x71, 0xe8, 0xe4, 0x67, 0xb0, 0x3b, 0x92, 0xd6,
0x98, 0xec, 0xc3, 0x14, 0xd9, 0x8d, 0x21, 0x3d, 0x65, 0x72, 0xe2, 0xb8, 0xce, 0xaf, 0x2d, 0x28,
0x29, 0x1b, 0x3a, 0x81, 0x8a, 0xb2, 0xba, 0x1e, 0x51, 0xab, 0x3e, 0xa8, 0xff, 0xe9, 0x5d, 0x77,
0xe7, 0xcf, 0xef, 0xba, 0x25, 0x41, 0x74, 0x78, 0xe1, 0x94, 0x15, 0x60, 0x48, 0xd0, 0x29, 0xec,
0x85, 0x6c, 0xc1, 0x3d, 0x7f, 0xe2, 0x8a, 0x93, 0x10, 0xb5, 0x73, 0x92, 0x00, 0xf4, 0xe4, 0xb9,
0x10, 0x70, 0xa7, 0xa6, 0x01, 0x72, 0x92, 0xe8, 0xdb, 0x50, 0x1b, 0xe3, 0xf1, 0x94, 0x12, 0x8d,
0xcf, 0xaf, 0xe1, 0xab, 0xca, 0x2f, 0xe1, 0xa2, 0x42, 0x66, 0x02, 0xa6, 0x42, 0x4f, 0x00, 0xa5,
0x8d, 0x49, 0x89, 0x39, 0xe3, 0x78, 0x16, 0x97, 0x58, 0x7e, 0xa0, 0xfb, 0x90, 0xf7, 0x88, 0xa2,
0x55, 0x1b, 0x40, 0x6a, 0x0e, 0xc2, 0x6c, 0xf7, 0xa1, 0x69, 0x32, 0xc5, 0x47, 0xea, 0x10, 0x72,
0x5b, 0x27, 0x9e, 0xf3, 0x88, 0xfd, 0x83, 0x14, 0x25, 0x33, 0xf8, 0x2d, 0x41, 0xe8, 0x08, 0x8a,
0xdb, 0xea, 0xa3, 0x1c, 0x76, 0x0f, 0x20, 0x59, 0xa7, 0x04, 0x6f, 0x6d, 0xc3, 0x7f, 0x09, 0x8d,
0x4b, 0x5d, 0xd5, 0x3b, 0x32, 0x47, 0x6d, 0xd8, 0xc5, 0x84, 0x84, 0x34, 0x8a, 0xe4, 0x79, 0xad,
0x38, 0xf1, 0xa7, 0x6d, 0x43, 0x33, 0x49, 0xa6, 0xa7, 0x54, 0x87, 0x1c, 0xbb, 0x91, 0xd9, 0xca,
0x4e, 0x8e, 0xdd, 0xd8, 0xdf, 0x83, 0xd6, 0x53, 0xc6, 0x6e, 0x16, 0x41, 0x7a, 0xc8, 0xba, 0x19,
0xb2, 0x72, 0xcb, 0x10, 0xaf, 0x00, 0xa5, 0xc3, 0x4d, 0xdd, 0x0a, 0x62, 0x3a, 0x32, 0x43, 0x76,
0x9a, 0xd2, 0x8e, 0xbe, 0x09, 0x85, 0x39, 0xe5, 0xd8, 0xdc, 0x2f, 0xc6, 0xff, 0x7d, 0xca, 0x31,
0xc1, 0x1c, 0x3b, 0xd2, 0x6f, 0xff, 0x18, 0x1a, 0x72, 0xa2, 0xfe, 0x35, 0xbb, 0x6b, 0x35, 0x4e,
0xb2, 0x54, 0xab, 0xfd, 0x56, 0x92, 0xfd, 0x4c, 0x39, 0x12, 0xf6, 0x7f, 0xb4, 0xa0, 0x99, 0x0c,
0xa0, 0xc9, 0xdb, 0x50, 0xe0, 0xcb, 0x40, 0x91, 0xaf, 0xf7, 0xeb, 0x49, 0xf8, 0xcb, 0x65, 0x40,
0x1d, 0xe9, 0x43, 0x3d, 0x28, 0xb3, 0x80, 0x86, 0x98, 0xb3, 0x70, 0x7d, 0x12, 0xcf, 0xb5, 0xc7,
0x31, 0x18, 0x81, 0x1f, 0xe3, 0x00, 0x8f, 0x3d, 0xbe, 0x94, 0x97, 0x63, 0x06, 0x7f, 0xae, 0x3d,
0x8e, 0xc1, 0x88, 0x59, 0xbc, 0xa1, 0x61, 0xe4, 0x31, 0x5f, 0x5e, 0x91, 0x99, 0x59, 0xfc, 0x50,
0x39, 0x9c, 0x18, 0x61, 0xcf, 0xa1, 0xf1, 0xd8, 0xf3, 0xc9, 0x33, 0x8a, 0xc3, 0xbb, 0x56, 0xe9,
0xeb, 0x50, 0x8c, 0x38, 0x0e, 0xb9, 0xea, 0x1c, 0x6b, 0x10, 0xe5, 0x4c, 0xda, 0x50, 0x5e, 0x9d,
0x3d, 0xf9, 0x61, 0x3f, 0x82, 0x66, 0x32, 0x9c, 0xae, 0xd9, 0xed, 0x07, 0x01, 0x41, 0xf3, 0x62,
0x31, 0x0f, 0x32, 0x77, 0xe2, 0xa7, 0xd0, 0x4a, 0xd9, 0x56, 0x53, 0x6d, 0x3d, 0x23, 0x75, 0xa8,
0x5d, 0x71, 0x9c, 0x5c, 0x1c, 0xff, 0xb0, 0x60, 0x5f, 0x18, 0xae, 0x16, 0xf3, 0x39, 0x0e, 0x97,
0x26, 0xd3, 0x03, 0x80, 0x45, 0x44, 0x89, 0x1b, 0x05, 0x78, 0x4c, 0xf5, 0xfd, 0x51, 0x11, 0x96,
0x2b, 0x61, 0x40, 0x0f, 0xa1, 0x81, 0xdf, 0x60, 0x6f, 0x26, 0x2e, 0x7c, 0x8d, 0xc9, 0x49, 0x4c,
0xdd, 0x98, 0x15, 0xf0, 0xab, 0x50, 0x93, 0x79, 0x3c, 0x7f, 0x22, 0xf7, 0x95, 0xaa, 0x46, 0x55,
0xd8, 0x86, 0xca, 0x24, 0xfa, 0x9f, 0x84, 0x50, 0x85, 0x50, 0x6d, 0x4d, 0x8e, 0xfe, 0xb9, 0x02,
0x7c, 0x03, 0xea, 0x12, 0x30, 0xc2, 0x3e, 0xf9, 0xa9, 0x47, 0xf8, 0x54, 0x77, 0xb2, 0x3d, 0x61,
0x1d, 0xc4, 0x46, 0x74, 0x0a, 0xfb, 0x09, 0xa7, 0x04, 0x5b, 0x52, 0x5d, 0xcf, 0xb8, 0x4c, 0x80,
0x2c, 0x2b, 0x8e, 0xa6, 0x23, 0x86, 0x43, 0x12, 0xd7, 0xe3, 0x9f, 0x05, 0x68, 0xa5, 0x8c, 0xba,
0x1a, 0x0f, 0x61, 0x57, 0x94, 0x6f, 0xfb, 0xf5, 0x5f, 0x12, 0xee, 0x21, 0x41, 0x1f, 0x42, 0x53,
0x02, 0xc7, 0xcc, 0xf7, 0xe9, 0x58, 0x08, 0x9b, 0x48, 0x17, 0xa6, 0x21, 0xec, 0xe7, 0x89, 0x19,
0x9d, 0x40, 0x6b, 0xc4, 0x18, 0x8f, 0x78, 0x88, 0x03, 0x37, 0x3e, 0x76, 0x79, 0x79, 0x43, 0x34,
0x8d, 0x43, 0x9f, 0x3a, 0x91, 0x57, 0x6a, 0x07, 0x1f, 0xcf, 0x0c, 0xb6, 0x20, 0xb1, 0x8d, 0xd8,
0x9e, 0x82, 0xd2, 0xb7, 0x2b, 0xd0, 0xa2, 0x82, 0xc6, 0xf6, 0x18, 0x7a, 0x02, 0x2d, 0x12, 0xcf,
0xd5, 0x60, 0x4b, 0x8a, 0x82, 0x71, 0xc4, 0xe0, 0x47, 0x72, 0xdb, 0xf3, 0xa8, 0xbd, 0x2b, 0x0f,
0xd5, 0x61, 0xaa, 0xa1, 0x6e, 0xd8, 0x40, 0x8e, 0x02, 0xa3, 0xef, 0x40, 0x69, 0x11, 0x08, 0x11,
0xd7, 0x2e, 0xcb, 0xb0, 0xaf, 0xf4, 0x94, 0xc2, 0xeb, 0xc5, 0x0a, 0xaf, 0x77, 0xa1, 0x15, 0xa0,
0xa3, 0x81, 0xe8, 0x73, 0xa8, 0x4a, 0xb9, 0x13, 0x78, 0xfe, 0x84, 0x92, 0x76, 0x45, 0xc6, 0x75,
0xd6, 0xe2, 0x5e, 0xc6, 0xca, 0x70, 0x50, 0x16, 0x8b, 0xf1, 0xab, 0xbf, 0x74, 0x2d, 0x07, 0x44,
0xe0, 0xa5, 0x8c, 0x43, 0x5f, 0x40, 0x4d, 0xa6, 0x79, 0xbd, 0xa0, 0xa1, 0x47, 0x49, 0x1b, 0xfe,
0x83, 0x3c, 0x92, 0xc0, 0x0b, 0x15, 0x88, 0x3e, 0x85, 0x96, 0xe1, 0xe3, 0x5e, 0x87, 0x6c, 0x2e,
0xb6, 0x41, 0x55, 0x6e, 0x83, 0x74, 0xf7, 0xac, 0xc7, 0x63, 0x3f, 0x0e, 0xd9, 0x7c, 0x48, 0x8c,
0xe2, 0x4c, 0xc2, 0xe2, 0x0a, 0xd7, 0x64, 0x85, 0xf7, 0xd3, 0x78, 0x5d, 0x64, 0xfb, 0x37, 0x16,
0x1c, 0x68, 0x01, 0xf5, 0x84, 0xe2, 0x19, 0x9f, 0xc6, 0x97, 0xd2, 0x3d, 0x28, 0x29, 0x85, 0xa1,
0x55, 0xa7, 0xfe, 0x12, 0x67, 0x83, 0xfa, 0xe3, 0x70, 0x19, 0x70, 0x4a, 0xd2, 0x7a, 0x76, 0xcf,
0x58, 0x85, 0x92, 0x45, 0x5f, 0x83, 0x58, 0x74, 0xba, 0x9e, 0x4f, 0xe8, 0x5b, 0x7d, 0x0e, 0x6b,
0xda, 0x38, 0x14, 0x36, 0x71, 0xe6, 0x83, 0x90, 0xfd, 0x84, 0x8e, 0xa5, 0xce, 0x29, 0xc8, 0x3c,
0x15, 0x6d, 0x19, 0x12, 0xfb, 0xf7, 0x16, 0xec, 0x65, 0xb8, 0xa1, 0x13, 0xa8, 0x4e, 0xe5, 0xaf,
0xa5, 0x2b, 0x14, 0x85, 0xb5, 0xa6, 0x28, 0x40, 0xbb, 0x87, 0x24, 0x12, 0xba, 0x68, 0xe1, 0xa7,
0xe1, 0xeb, 0x02, 0xa4, 0x66, 0x00, 0x22, 0xe0, 0x04, 0xaa, 0xec, 0xfa, 0x7a, 0xe6, 0xf9, 0x54,
0xc2, 0xf3, 0xeb, 0xd9, 0xb5, 0x5b, 0x80, 0xdb, 0xb0, 0xab, 0xe7, 0xa2, 0x89, 0xc7, 0x9f, 0xf6,
0xcf, 0x2c, 0x78, 0x6f, 0xa5, 0xa4, 0xfa, 0x54, 0x7f, 0x0c, 0x25, 0x35, 0x9c, 0xee, 0xb5, 0xed,
0xf4, 0x96, 0xce, 0x44, 0x68, 0x1c, 0xfa, 0x2e, 0x40, 0x48, 0xc9, 0xc2, 0x27, 0xd8, 0x1f, 0x2f,
0x75, 0xf3, 0xfa, 0x20, 0xa5, 0xf0, 0x1d, 0xe3, 0xbc, 0x1a, 0x4f, 0xe9, 0x9c, 0x3a, 0x29, 0xb8,
0xfd, 0x37, 0x0b, 0xf6, 0x9f, 0x8f, 0x44, 0x31, 0xb3, 0x4b, 0xbb, 0xbe, 0x84, 0xd6, 0xa6, 0x25,
0x4c, 0x76, 0x40, 0x2e, 0xb3, 0x03, 0xb2, 0xab, 0x96, 0x5f, 0x59, 0x35, 0xf1, 0x78, 0x90, 0x0d,
0xc9, 0xc5, 0xd7, 0x9c, 0x86, 0x6e, 0xba, 0x48, 0x79, 0xa7, 0x25, 0x5d, 0x67, 0xc2, 0x13, 0x3f,
0x6e, 0xbe, 0x05, 0x88, 0xfa, 0xc4, 0x1d, 0xd1, 0x6b, 0x16, 0x52, 0x03, 0x57, 0x17, 0x6e, 0x93,
0xfa, 0x64, 0x20, 0x1d, 0x31, 0xda, 0x74, 0xb9, 0x52, 0xea, 0xb1, 0x65, 0xff, 0xc2, 0x82, 0x83,
0xec, 0x4c, 0x75, 0xc5, 0x1f, 0xad, 0x3d, 0x22, 0xb6, 0xd7, 0xdc, 0x20, 0xff, 0xab, 0xaa, 0xf7,
0x7f, 0x59, 0x80, 0xda, 0x97, 0x98, 0x0c, 0xe3, 0x51, 0xd0, 0x10, 0x20, 0x79, 0x61, 0xa0, 0xfb,
0xa9, 0xf1, 0xd7, 0x1e, 0x1e, 0x9d, 0x07, 0x5b, 0xbc, 0x7a, 0x3a, 0xe7, 0x50, 0x8e, 0x35, 0x22,
0xea, 0xa4, 0xa0, 0x2b, 0x2a, 0xb4, 0xf3, 0xc1, 0x46, 0x9f, 0x4e, 0x32, 0x04, 0x48, 0x54, 0x60,
0x86, 0xcf, 0x9a, 0xb6, 0xcc, 0xf0, 0xd9, 0x20, 0x1d, 0xcf, 0xa1, 0x1c, 0x2b, 0xb2, 0x0c, 0x9f,
0x15, 0x1d, 0x98, 0xe1, 0xb3, 0x26, 0xe1, 0xce, 0xa1, 0x1c, 0x4b, 0x94, 0x4c, 0x92, 0x15, 0x99,
0x94, 0x49, 0xb2, 0xa6, 0x69, 0x1e, 0x43, 0xc5, 0xa8, 0x13, 0x94, 0x46, 0xae, 0xea, 0x98, 0xce,
0xfd, 0xcd, 0x4e, 0x9d, 0xc7, 0x81, 0xbd, 0xcc, 0x6b, 0x0d, 0x75, 0xb7, 0xbf, 0xe3, 0x54, 0xbe,
0xa3, 0xdb, 0x1e, 0x7a, 0xfd, 0xdf, 0x59, 0xd0, 0x7c, 0xfe, 0x86, 0x86, 0x33, 0xbc, 0xfc, 0xbf,
0xec, 0x8a, 0xff, 0xd1, 0xdc, 0xfb, 0xbf, 0xb5, 0x60, 0x5f, 0xfe, 0x03, 0x70, 0xc5, 0x59, 0x48,
0x13, 0xaa, 0x03, 0x28, 0x4a, 0x09, 0x87, 0xde, 0x5f, 0x69, 0xc1, 0x26, 0xef, 0x2d, 0xbd, 0xd9,
0xde, 0x41, 0x4f, 0xa0, 0x62, 0x54, 0x4e, 0x96, 0xe3, 0x8a, 0x20, 0xca, 0x72, 0x5c, 0x15, 0x46,
0xf6, 0x4e, 0xff, 0xe7, 0x16, 0x1c, 0xa4, 0x5e, 0xff, 0x09, 0xcd, 0x00, 0xde, 0xdf, 0xf2, 0x9f,
0x02, 0xfa, 0x30, 0xbd, 0x8d, 0xff, 0xed, 0xbf, 0x39, 0x9d, 0x8f, 0xee, 0x02, 0xd5, 0x05, 0xfb,
0x83, 0x05, 0x0d, 0x75, 0x79, 0x24, 0x2c, 0x5e, 0x40, 0x2d, 0x7d, 0x13, 0xa1, 0x74, 0x69, 0x36,
0x5c, 0xc6, 0x9d, 0xee, 0x56, 0xbf, 0xa9, 0xdd, 0xcb, 0xd5, 0x36, 0xd8, 0xdd, 0x7a, 0x87, 0x6d,
0xd8, 0x93, 0x1b, 0x5b, 0x91, 0xbd, 0x33, 0x28, 0xfc, 0x28, 0x17, 0x8c, 0x46, 0x25, 0x29, 0x4b,
0x3e, 0xf9, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0xd5, 0x03, 0xf3, 0x6d, 0x13, 0x00, 0x00,
// 1751 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x93, 0x23, 0x47,
0x11, 0xde, 0xd6, 0x6b, 0xa4, 0x94, 0x46, 0x8f, 0x9a, 0xb1, 0xb7, 0x91, 0x77, 0x57, 0x43, 0x03,
0xde, 0xb5, 0x07, 0x34, 0x46, 0xb6, 0x09, 0x0c, 0xa7, 0xd1, 0xac, 0xd7, 0x56, 0xb0, 0x78, 0xc7,
0x3d, 0x1b, 0x44, 0x40, 0x10, 0xd1, 0x94, 0xd4, 0x35, 0x33, 0xcd, 0x48, 0x5d, 0xbd, 0xdd, 0xa5,
0xc5, 0xba, 0x71, 0x22, 0xe0, 0x04, 0x17, 0x0e, 0x9c, 0x09, 0xfe, 0x01, 0x27, 0xfe, 0x00, 0xfc,
0x06, 0x0e, 0xe6, 0x06, 0xff, 0x80, 0x08, 0x6e, 0x44, 0x65, 0x55, 0x57, 0x77, 0xeb, 0xe1, 0x1d,
0x02, 0xb8, 0xa9, 0x33, 0xbf, 0x7c, 0xd6, 0x23, 0xbf, 0x12, 0x74, 0x82, 0x30, 0x89, 0xd8, 0x4c,
0xf0, 0x78, 0x18, 0xc5, 0x5c, 0x70, 0xd2, 0x30, 0x82, 0x3e, 0x5c, 0xf1, 0x2b, 0xae, 0xc4, 0x7d,
0x08, 0xb9, 0xcf, 0xf4, 0xef, 0x4e, 0xc4, 0x83, 0x50, 0xb0, 0xd8, 0x9f, 0x6a, 0xc1, 0x83, 0x2b,
0xce, 0xaf, 0xe6, 0xec, 0x04, 0xbf, 0xa6, 0xcb, 0xcb, 0x13, 0x7f, 0x19, 0x53, 0x11, 0xf0, 0x50,
0xeb, 0x07, 0xeb, 0x7a, 0x11, 0x2c, 0x58, 0x22, 0xe8, 0x22, 0x52, 0x00, 0xe7, 0x06, 0x1e, 0x3c,
0x0d, 0x12, 0x31, 0x89, 0x63, 0x16, 0xd1, 0x98, 0x4e, 0xe7, 0xec, 0x82, 0x5d, 0x2d, 0x58, 0x28,
0x12, 0x97, 0xbd, 0x58, 0xb2, 0x44, 0x90, 0x43, 0xa8, 0xce, 0x83, 0x45, 0x20, 0x6c, 0xeb, 0xc8,
0x7a, 0x54, 0x75, 0xd5, 0x07, 0x79, 0x17, 0x5e, 0x9f, 0xd3, 0x44, 0x78, 0x09, 0x63, 0xa1, 0x97,
0x28, 0x13, 0x2f, 0xa2, 0xe2, 0xda, 0x2e, 0x1d, 0x59, 0x8f, 0x5a, 0xee, 0x81, 0xd4, 0x5e, 0x30,
0x16, 0x6a, 0x77, 0xe7, 0x54, 0x5c, 0x3b, 0x7f, 0xb7, 0x80, 0x6c, 0x46, 0x22, 0x04, 0x2a, 0x68,
0x69, 0xa1, 0x25, 0xfe, 0x26, 0x1f, 0x40, 0x3b, 0xf5, 0xea, 0x33, 0x41, 0x83, 0x39, 0xfa, 0x6d,
0x8e, 0xc8, 0x30, 0x6b, 0xc1, 0xb9, 0xfa, 0xe5, 0xee, 0x6b, 0xe4, 0x63, 0x04, 0x92, 0x01, 0x34,
0xe7, 0x3c, 0x11, 0x5e, 0x14, 0xb0, 0x19, 0x4b, 0xec, 0x32, 0xa6, 0x0d, 0x52, 0x74, 0x8e, 0x12,
0x32, 0x04, 0xcc, 0xce, 0x93, 0x89, 0x04, 0xb1, 0x47, 0x85, 0x60, 0x8b, 0x48, 0xd8, 0x95, 0x23,
0xeb, 0x51, 0xd9, 0xed, 0x49, 0x95, 0x8b, 0x9a, 0x53, 0xa5, 0x20, 0xef, 0xc0, 0x61, 0x11, 0xea,
0xcd, 0xf8, 0x32, 0x14, 0x76, 0x15, 0x0d, 0x48, 0x9c, 0x07, 0x9f, 0x49, 0x8d, 0xf3, 0x63, 0x18,
0xec, 0xec, 0x6a, 0x12, 0xf1, 0x30, 0x61, 0xe4, 0x03, 0xa8, 0xeb, 0xb4, 0x13, 0xdb, 0x3a, 0x2a,
0x3f, 0x6a, 0x8e, 0xee, 0x0f, 0xb3, 0x1d, 0xb1, 0x69, 0xe9, 0x1a, 0xb8, 0xf3, 0x36, 0x10, 0x0c,
0xf3, 0x09, 0xf7, 0x59, 0xe6, 0xf0, 0x10, 0xaa, 0x2a, 0x2d, 0x0b, 0xd3, 0x52, 0x1f, 0xce, 0x01,
0xf4, 0xf2, 0x58, 0x5c, 0x52, 0xa7, 0x0f, 0x87, 0x1f, 0x31, 0x31, 0x5e, 0xce, 0x6e, 0x98, 0x90,
0x79, 0x6a, 0xf9, 0x77, 0x4a, 0xb6, 0xe5, 0xfc, 0xd3, 0x82, 0xd7, 0xd6, 0x94, 0x3a, 0xc0, 0x29,
0xec, 0x4d, 0x51, 0x9a, 0x26, 0xfc, 0x30, 0x97, 0xf0, 0x56, 0x93, 0xa1, 0x12, 0xb9, 0xa9, 0x5d,
0xff, 0xb7, 0x16, 0xd4, 0x94, 0x8c, 0x1c, 0x43, 0x43, 0x49, 0xbd, 0xc0, 0x57, 0x2b, 0x3f, 0x6e,
0xff, 0xe5, 0xf3, 0xc1, 0x9d, 0xbf, 0x7e, 0x3e, 0xa8, 0xc9, 0x64, 0x27, 0x8f, 0xdd, 0xba, 0x02,
0x4c, 0x7c, 0x72, 0x02, 0xfb, 0x31, 0x5f, 0x8a, 0x20, 0xbc, 0xf2, 0xe4, 0x69, 0x48, 0xec, 0x12,
0x26, 0x00, 0x43, 0x3c, 0x1b, 0x12, 0xee, 0xb6, 0x34, 0x00, 0x0b, 0x25, 0xdf, 0x80, 0xd6, 0x8c,
0xce, 0xae, 0x99, 0xaf, 0xf1, 0xe5, 0x0d, 0x7c, 0x53, 0xe9, 0x11, 0x8e, 0x85, 0xdf, 0x85, 0x9e,
0x29, 0x22, 0xc9, 0x77, 0xe4, 0x29, 0x90, 0xbc, 0x22, 0x6b, 0xb7, 0xe0, 0x82, 0xce, 0xd3, 0x76,
0xe3, 0x07, 0xb9, 0x07, 0xe5, 0xc0, 0x57, 0xe9, 0xb5, 0xc6, 0x90, 0xab, 0x45, 0x8a, 0xd1, 0xdb,
0xb7, 0xa0, 0x6b, 0xbc, 0xa5, 0x47, 0xec, 0x01, 0x94, 0x76, 0x36, 0xa1, 0x14, 0xf8, 0x68, 0xf7,
0xc3, 0x5c, 0x7a, 0x26, 0x89, 0x57, 0x18, 0x92, 0x23, 0xa8, 0xee, 0xea, 0x97, 0x52, 0xa0, 0xeb,
0x11, 0x40, 0xb6, 0x76, 0x99, 0x8d, 0xf5, 0x45, 0x36, 0xcf, 0xa0, 0x73, 0xae, 0xbb, 0x7d, 0xcb,
0x2a, 0x88, 0x0d, 0x7b, 0xd4, 0xf7, 0x63, 0x96, 0x24, 0x78, 0x96, 0x1b, 0x6e, 0xfa, 0x89, 0x0e,
0xdf, 0x84, 0x6e, 0xe6, 0x50, 0x97, 0xd7, 0x86, 0x12, 0xbf, 0x41, 0x8f, 0x75, 0xb7, 0xc4, 0x6f,
0x10, 0x77, 0x0a, 0xbd, 0xa7, 0x9c, 0xdf, 0x2c, 0xa3, 0x7c, 0xe8, 0xb6, 0x09, 0xdd, 0xb8, 0x45,
0xa8, 0x9f, 0x00, 0xc9, 0xbb, 0x30, 0xbd, 0xac, 0xc8, 0xf2, 0xd0, 0x4b, 0xb1, 0x6c, 0x94, 0x93,
0x37, 0xa1, 0xb2, 0x60, 0x82, 0x9a, 0x7b, 0xc8, 0xe8, 0xbf, 0xcf, 0x04, 0xf5, 0xa9, 0xa0, 0x2e,
0xea, 0x31, 0xc2, 0x14, 0x3a, 0x58, 0x78, 0x78, 0xc9, 0x6f, 0xdb, 0x9d, 0xe3, 0x62, 0xca, 0xcd,
0x51, 0x2f, 0x8b, 0x70, 0xaa, 0x14, 0xc5, 0x2a, 0xfe, 0x6c, 0x41, 0x37, 0x0b, 0xa2, 0x8b, 0x70,
0xa0, 0x22, 0x56, 0x91, 0x2a, 0xa2, 0x3d, 0x6a, 0x67, 0x2e, 0x9e, 0xaf, 0x22, 0xe6, 0xa2, 0x8e,
0x0c, 0xa1, 0xce, 0x23, 0x16, 0x53, 0xc1, 0xe3, 0xcd, 0x62, 0x9e, 0x69, 0x8d, 0x6b, 0x30, 0x12,
0x3f, 0xa3, 0x11, 0x9d, 0x05, 0x62, 0x85, 0x97, 0x69, 0x01, 0x7f, 0xa6, 0x35, 0xae, 0xc1, 0xc8,
0x4a, 0x5e, 0xb2, 0x38, 0x09, 0x78, 0x88, 0x57, 0x6a, 0xa1, 0x92, 0x1f, 0x28, 0x85, 0x9b, 0x22,
0xb0, 0x92, 0x17, 0xd0, 0x79, 0x12, 0x84, 0xfe, 0x27, 0x8c, 0xc6, 0xb7, 0xed, 0xd6, 0x57, 0xa1,
0x9a, 0x08, 0x1a, 0x0b, 0x35, 0x6d, 0x36, 0x20, 0x4a, 0x99, 0x8d, 0xae, 0xb2, 0x3a, 0xa3, 0xf8,
0x81, 0x21, 0xbf, 0x0d, 0xdd, 0x2c, 0xa4, 0xee, 0xdd, 0xed, 0x0e, 0x0b, 0x81, 0xee, 0xe3, 0xe5,
0x22, 0x2a, 0xdc, 0xa7, 0xef, 0x43, 0x2f, 0x27, 0x5b, 0x77, 0xb7, 0xeb, 0x1c, 0x39, 0x6d, 0x68,
0x5d, 0x08, 0x6a, 0x2e, 0x1b, 0xe7, 0x5f, 0x16, 0x1c, 0x48, 0xc1, 0xc5, 0x72, 0xb1, 0xa0, 0xf1,
0xca, 0x78, 0xba, 0x0f, 0xb0, 0x4c, 0x98, 0xef, 0x25, 0x11, 0x9d, 0x31, 0x7d, 0xdf, 0x34, 0xa4,
0xe4, 0x42, 0x0a, 0xc8, 0x43, 0xe8, 0xd0, 0x97, 0x34, 0x98, 0xcb, 0x61, 0xa1, 0x31, 0x25, 0xc4,
0xb4, 0x8d, 0x58, 0x01, 0xbf, 0x0c, 0x2d, 0xf4, 0x13, 0x84, 0x57, 0xb8, 0xcf, 0x54, 0x57, 0x9a,
0x52, 0x36, 0x51, 0x22, 0x39, 0x3b, 0x11, 0xc2, 0x14, 0x42, 0x8d, 0x44, 0x8c, 0xfe, 0xa1, 0x02,
0x7c, 0x0d, 0xda, 0x08, 0x98, 0xd2, 0xd0, 0xff, 0x59, 0xe0, 0x8b, 0x6b, 0x3d, 0x05, 0xf7, 0xa5,
0x74, 0x9c, 0x0a, 0xc9, 0x09, 0x1c, 0x64, 0x39, 0x65, 0xd8, 0x9a, 0x9a, 0x98, 0x46, 0x65, 0x0c,
0xb0, 0xad, 0x34, 0xb9, 0x9e, 0x72, 0x1a, 0xfb, 0x69, 0x3f, 0x7e, 0x5e, 0x85, 0x5e, 0x4e, 0xa8,
0xbb, 0xf1, 0x10, 0xf6, 0x64, 0xfb, 0x76, 0x8f, 0x8d, 0x9a, 0x54, 0x4f, 0x7c, 0xf2, 0x16, 0x74,
0x11, 0x38, 0xe3, 0x61, 0xc8, 0x66, 0x92, 0x14, 0x25, 0xba, 0x31, 0x1d, 0x29, 0x3f, 0xcb, 0xc4,
0xe4, 0x04, 0x7a, 0x53, 0xce, 0x45, 0x22, 0x62, 0x1a, 0x79, 0xe9, 0x31, 0x94, 0xed, 0x69, 0x8c,
0x4b, 0xb6, 0xe5, 0x76, 0x8d, 0x52, 0x9f, 0x44, 0xe9, 0x1b, 0xb9, 0x47, 0x48, 0xe7, 0x06, 0x5f,
0xc1, 0x9b, 0xa6, 0x93, 0xca, 0x73, 0x50, 0xf6, 0xd9, 0x1a, 0xb4, 0xaa, 0xa0, 0xa9, 0x3c, 0x85,
0x1e, 0x43, 0xcf, 0x4f, 0xeb, 0x35, 0xd8, 0x1a, 0x62, 0xbb, 0x46, 0x91, 0x82, 0xdf, 0xc3, 0x23,
0x20, 0x12, 0x7b, 0x0f, 0x0f, 0xd9, 0x83, 0xdc, 0x30, 0xde, 0xb2, 0x89, 0x5c, 0x05, 0x26, 0xdf,
0x84, 0xda, 0x32, 0x92, 0x24, 0xd0, 0xae, 0xa3, 0xd9, 0x97, 0x86, 0x8a, 0x21, 0x0e, 0x53, 0x86,
0x38, 0x7c, 0xac, 0x19, 0xa4, 0xab, 0x81, 0xe4, 0x43, 0x68, 0x22, 0x5d, 0x8a, 0x82, 0xf0, 0x8a,
0xf9, 0x76, 0x03, 0xed, 0xfa, 0x1b, 0x76, 0xcf, 0x53, 0x66, 0x39, 0xae, 0xcb, 0x05, 0xf9, 0xcd,
0xdf, 0x06, 0x96, 0x0b, 0xd2, 0xf0, 0x1c, 0xed, 0xc8, 0x47, 0xd0, 0x42, 0x37, 0x2f, 0x96, 0x2c,
0x0e, 0x98, 0x6f, 0xc3, 0x7f, 0xe0, 0x07, 0x13, 0xf8, 0x54, 0x19, 0x92, 0xf7, 0xa1, 0x67, 0xf2,
0xf1, 0x2e, 0x63, 0xbe, 0x90, 0x5b, 0xa1, 0x89, 0x5b, 0x21, 0x3f, 0x71, 0xdb, 0x69, 0xec, 0x27,
0x31, 0x5f, 0x4c, 0x7c, 0xc3, 0x58, 0x33, 0xb3, 0xb4, 0xc3, 0x2d, 0xec, 0xf0, 0x41, 0x1e, 0xaf,
0x9b, 0xec, 0xfc, 0xce, 0x82, 0x43, 0x4d, 0xc0, 0x3e, 0x66, 0x74, 0x2e, 0xae, 0xd3, 0x0b, 0xea,
0x75, 0xa8, 0x29, 0x76, 0xa2, 0x59, 0xab, 0xfe, 0x92, 0xe7, 0x83, 0x85, 0xb3, 0x78, 0x15, 0x09,
0xe6, 0xe7, 0xf9, 0xf0, 0xbe, 0x91, 0x4a, 0x26, 0x4c, 0xbe, 0x02, 0x29, 0x69, 0xf5, 0x82, 0xd0,
0x67, 0x9f, 0xe9, 0xb3, 0xd8, 0xd2, 0xc2, 0x89, 0x94, 0xc9, 0x73, 0x1f, 0xc5, 0xfc, 0xa7, 0x6c,
0x86, 0x1c, 0xa9, 0x82, 0x7e, 0x1a, 0x5a, 0x32, 0xf1, 0x9d, 0x3f, 0x5a, 0xb0, 0x5f, 0xc8, 0x8d,
0x1c, 0x43, 0xf3, 0x1a, 0x7f, 0xad, 0x3c, 0xc9, 0x42, 0xac, 0x0d, 0x16, 0x02, 0x5a, 0x3d, 0xf1,
0xe5, 0x9e, 0xdf, 0x5f, 0x86, 0x79, 0xf8, 0x26, 0x69, 0x69, 0x19, 0x80, 0x34, 0x38, 0x86, 0x26,
0xbf, 0xbc, 0x9c, 0x07, 0x21, 0x43, 0x78, 0x79, 0xd3, 0xbb, 0x56, 0x4b, 0xb0, 0x0d, 0x7b, 0xba,
0x16, 0x9d, 0x78, 0xfa, 0xe9, 0xfc, 0xc2, 0x82, 0xd7, 0xd6, 0x5a, 0xaa, 0x4f, 0xf6, 0x3b, 0x50,
0x53, 0xe1, 0xf4, 0x0c, 0xb6, 0xf3, 0x5b, 0xba, 0x60, 0xa1, 0x71, 0xe4, 0xbb, 0x00, 0x31, 0xf3,
0x97, 0xa1, 0x4f, 0xc3, 0xd9, 0x4a, 0x0f, 0xb3, 0x37, 0x72, 0x2f, 0x04, 0xd7, 0x28, 0x2f, 0x66,
0xd7, 0x6c, 0xc1, 0xdc, 0x1c, 0xdc, 0xf9, 0x87, 0x05, 0x07, 0xcf, 0xa6, 0xb2, 0x99, 0xc5, 0xa5,
0xdd, 0x5c, 0x42, 0x6b, 0xdb, 0x12, 0x66, 0x3b, 0xa0, 0x54, 0xd8, 0x01, 0xc5, 0x55, 0x2b, 0xaf,
0xad, 0x9a, 0x7c, 0x7c, 0xe0, 0x70, 0xf2, 0xe8, 0xa5, 0x60, 0xb1, 0x97, 0x6f, 0x52, 0xd9, 0xed,
0xa1, 0xea, 0x54, 0x6a, 0xd2, 0xc7, 0xd1, 0xd7, 0x81, 0xb0, 0xd0, 0xf7, 0xa6, 0xec, 0x92, 0xc7,
0xcc, 0xc0, 0xd5, 0xa5, 0xdb, 0x65, 0xa1, 0x3f, 0x46, 0x45, 0x8a, 0x36, 0x13, 0xaf, 0x96, 0x7b,
0xac, 0x39, 0xbf, 0xb2, 0xe0, 0xb0, 0x58, 0xa9, 0xee, 0xf8, 0x7b, 0x1b, 0x8f, 0x90, 0xdd, 0x3d,
0x37, 0xc8, 0xff, 0xaa, 0xeb, 0xa3, 0x5f, 0x57, 0xa0, 0xf5, 0x3d, 0xea, 0x4f, 0xd2, 0x28, 0x64,
0x02, 0x90, 0xbd, 0x50, 0xc8, 0xbd, 0x5c, 0xfc, 0x8d, 0x87, 0x4b, 0xff, 0xfe, 0x0e, 0xad, 0x2e,
0xe7, 0x0c, 0xea, 0x29, 0x87, 0x24, 0xfd, 0x1c, 0x74, 0x8d, 0xa9, 0xf6, 0xdf, 0xd8, 0xaa, 0xd3,
0x4e, 0x26, 0x00, 0x19, 0x3b, 0x2c, 0xe4, 0xb3, 0xc1, 0x3b, 0x0b, 0xf9, 0x6c, 0xa1, 0x94, 0x67,
0x50, 0x4f, 0x19, 0x5a, 0x21, 0x9f, 0x35, 0x6e, 0x58, 0xc8, 0x67, 0x83, 0xd2, 0x9d, 0x41, 0x3d,
0xa5, 0x2a, 0x05, 0x27, 0x6b, 0x94, 0xa9, 0xe0, 0x64, 0x83, 0xdb, 0x3c, 0x81, 0x86, 0x61, 0x28,
0x24, 0x8f, 0x5c, 0xe7, 0x32, 0xfd, 0x7b, 0xdb, 0x95, 0xda, 0x8f, 0x0b, 0xfb, 0x85, 0x97, 0x1e,
0x19, 0xec, 0x7e, 0x03, 0x2a, 0x7f, 0x47, 0xaf, 0x7a, 0x24, 0x8e, 0xfe, 0x60, 0x41, 0xf7, 0xd9,
0x4b, 0x16, 0xcf, 0xe9, 0xea, 0xff, 0xb2, 0x2b, 0xfe, 0x47, 0xb5, 0x8f, 0x7e, 0x6f, 0xc1, 0x01,
0xfe, 0x83, 0x70, 0x21, 0x78, 0xcc, 0xb2, 0x54, 0xc7, 0x50, 0x45, 0x1a, 0x47, 0xee, 0xae, 0x8d,
0x60, 0xe3, 0xf7, 0x15, 0xb3, 0xd9, 0xb9, 0x43, 0x3e, 0x86, 0x86, 0x61, 0x3a, 0xc5, 0x1c, 0xd7,
0x48, 0x51, 0x31, 0xc7, 0x75, 0x72, 0xe4, 0xdc, 0x19, 0xfd, 0xd2, 0x82, 0xc3, 0xdc, 0xbf, 0x07,
0x59, 0x9a, 0x11, 0xdc, 0xdd, 0xf1, 0x9f, 0x04, 0x79, 0x2b, 0xbf, 0x8d, 0xbf, 0xf0, 0xdf, 0xa0,
0xfe, 0xdb, 0xb7, 0x81, 0xea, 0x86, 0xfd, 0xc9, 0x82, 0x8e, 0xba, 0x3c, 0xb2, 0x2c, 0x3e, 0x85,
0x56, 0xfe, 0x26, 0x22, 0xf9, 0xd6, 0x6c, 0xb9, 0x8c, 0xfb, 0x83, 0x9d, 0x7a, 0xd3, 0xbb, 0xe7,
0xeb, 0x63, 0x70, 0xb0, 0xf3, 0x0e, 0xdb, 0xb2, 0x27, 0xb7, 0x8e, 0x22, 0xe7, 0xce, 0xb8, 0xf2,
0xa3, 0x52, 0x34, 0x9d, 0xd6, 0x90, 0x96, 0xbc, 0xfb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x11,
0x24, 0x22, 0x66, 0xad, 0x13, 0x00, 0x00,
}
type DRPCKadInspectorClient interface {

View File

@ -12,23 +12,6 @@ import "google/protobuf/timestamp.proto";
package inspector;
service KadInspector {
// CountNodes returns the number of nodes in the routing table
rpc CountNodes(CountNodesRequest) returns (CountNodesResponse);
// PingNode sends a PING RPC to a node and returns its availability
rpc PingNode(PingNodeRequest) returns (PingNodeResponse);
// LookupNode triggers a Kademlia FindNode and returns the response
rpc LookupNode(LookupNodeRequest) returns (LookupNodeResponse);
// NodeInfo sends a PING RPC to a node and returns its local info
rpc NodeInfo(NodeInfoRequest) returns (NodeInfoResponse);
// FindNear returns limit number of IDs "near" the Start ID
rpc FindNear(FindNearRequest) returns (FindNearResponse);
// DumpNodes returns all the nodes in the node database
rpc DumpNodes(DumpNodesRequest) returns (DumpNodesResponse);
// GetBucketList returns all the buckets with all their nodes
rpc GetBucketList(GetBucketListRequest) returns (GetBucketListResponse);
}
service OverlayInspector {
// CountNodes returns the number of nodes in the cache
rpc CountNodes(CountNodesRequest) returns (CountNodesResponse);
@ -55,7 +38,6 @@ service HealthInspector {
rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {}
}
// ListSegments
message ListIrreparableSegmentsRequest {
int32 limit = 1;
@ -82,82 +64,6 @@ message CountNodesResponse {
message CountNodesRequest {
}
message GetBucketListRequest {
}
message GetBucketListResponse {
message Bucket {
bytes bucket_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
repeated node.Node routing_nodes = 2;
repeated node.Node cached_nodes = 3;
}
repeated Bucket buckets = 1;
}
// GetBuckets
message GetBucketsRequest {
}
message GetBucketsResponse {
int64 total = 1;
repeated bytes ids = 2 [(gogoproto.customtype) = "NodeID"];
}
// GetBucket
message GetBucketRequest {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
message GetBucketResponse {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
repeated node.Node nodes = 2;
}
message BucketList {
repeated node.Node nodes = 1;
}
// PingNode
message PingNodeRequest {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
string address = 2;
}
message PingNodeResponse {
bool ok = 1;
}
message LookupNodeRequest {
string id = 1;
string address = 2;
}
message LookupNodeResponse {
node.Node node = 1;
node.NodeMetadata meta = 2;
}
message NodeInfoRequest {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
node.NodeAddress address = 2;
}
message NodeInfoResponse {
node.NodeType type = 1;
node.NodeOperator operator = 2;
node.NodeCapacity capacity = 3;
node.NodeVersion version = 4;
}
message FindNearRequest {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
bytes start = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
int64 limit = 3;
}
message FindNearResponse {
repeated node.Node nodes = 2;
}
message DumpNodesRequest {}
message DumpNodesResponse {
@ -181,7 +87,7 @@ message DashboardRequest {
message DashboardResponse {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
int64 node_connections = 2;
string bootstrap_address = 3;
string bootstrap_address = 3 [deprecated=true];
string internal_address = 4;
string external_address = 5;
string dashboard_address = 6;

View File

@ -32,7 +32,7 @@ const (
NodeType_SATELLITE NodeType = 1
NodeType_STORAGE NodeType = 2
NodeType_UPLINK NodeType = 3
NodeType_BOOTSTRAP NodeType = 4
NodeType_BOOTSTRAP NodeType = 4 // Deprecated: Do not use.
)
var NodeType_name = map[int32]string{
@ -445,42 +445,42 @@ func init() {
func init() { proto.RegisterFile("node.proto", fileDescriptor_0c843d59d2d938e7) }
var fileDescriptor_0c843d59d2d938e7 = []byte{
// 587 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x6e, 0x1a, 0x3d,
0x14, 0xcd, 0xc0, 0x04, 0x98, 0xcb, 0x8f, 0xe6, 0xf3, 0x17, 0xb5, 0x28, 0x95, 0x0a, 0x45, 0xaa,
0x84, 0x52, 0x89, 0xa8, 0xe9, 0xb6, 0x1b, 0x48, 0xa2, 0x94, 0x96, 0x02, 0x32, 0xd3, 0x2c, 0xb2,
0x19, 0x19, 0xec, 0x80, 0x95, 0x61, 0x6c, 0xd9, 0x9e, 0x46, 0xbc, 0x45, 0x9f, 0xa2, 0xcf, 0xd2,
0x67, 0xe8, 0x22, 0x7d, 0x93, 0xaa, 0xf2, 0xfc, 0xe4, 0x67, 0x59, 0xa9, 0xbb, 0x39, 0xe7, 0x9e,
0x6b, 0x9f, 0x39, 0xf7, 0x1a, 0x20, 0x16, 0x94, 0x0d, 0xa4, 0x12, 0x46, 0x20, 0xd7, 0x7e, 0x1f,
0xc2, 0x5a, 0xac, 0x45, 0xc6, 0x1c, 0x76, 0xd6, 0x42, 0xac, 0x23, 0x76, 0x9c, 0xa2, 0x65, 0x72,
0x7d, 0x6c, 0xf8, 0x96, 0x69, 0x43, 0xb6, 0x32, 0x13, 0xf4, 0x7e, 0x3b, 0xe0, 0x4e, 0x05, 0x65,
0xe8, 0x25, 0x94, 0x38, 0x6d, 0x3b, 0x5d, 0xa7, 0xdf, 0x18, 0xb5, 0x7e, 0xdc, 0x75, 0xf6, 0x7e,
0xde, 0x75, 0x2a, 0xb6, 0x32, 0x3e, 0xc3, 0x25, 0x4e, 0xd1, 0x1b, 0xa8, 0x12, 0x4a, 0x15, 0xd3,
0xba, 0x5d, 0xea, 0x3a, 0xfd, 0xfa, 0xc9, 0x7f, 0x83, 0xf4, 0x66, 0x2b, 0x19, 0x66, 0x05, 0x5c,
0x28, 0xd0, 0x73, 0xa8, 0x46, 0x44, 0x9b, 0x90, 0xcb, 0x76, 0xab, 0xeb, 0xf4, 0x3d, 0x5c, 0xb1,
0x70, 0x2c, 0x3f, 0xba, 0xb5, 0xb2, 0xdf, 0xc2, 0xae, 0xd9, 0x49, 0x86, 0x1b, 0x8a, 0x69, 0xa3,
0xf8, 0xca, 0x70, 0x11, 0x6b, 0x0c, 0x8a, 0xc9, 0xc4, 0x10, 0x0b, 0x70, 0x6d, 0xcb, 0x0c, 0xa1,
0xc4, 0x10, 0xdc, 0x88, 0x88, 0x61, 0xf1, 0x6a, 0x17, 0x46, 0x5c, 0x1b, 0xdc, 0x24, 0x09, 0xe5,
0x26, 0xd4, 0xc9, 0x6a, 0x65, 0xaf, 0xdb, 0xe7, 0x3a, 0x4c, 0x24, 0x6e, 0x25, 0x92, 0x12, 0xc3,
0xc2, 0x5c, 0x8a, 0x0f, 0x72, 0xfc, 0x54, 0xdc, 0xcc, 0xd9, 0x44, 0xda, 0x08, 0x70, 0xf5, 0x2b,
0x53, 0x9a, 0x8b, 0xb8, 0x77, 0x05, 0xf5, 0x47, 0xbf, 0x80, 0xde, 0x82, 0x67, 0x14, 0x89, 0xb5,
0x14, 0xca, 0xa4, 0x69, 0xb4, 0x4e, 0xfe, 0x7f, 0xf8, 0xd1, 0xa0, 0x28, 0xe1, 0x07, 0x15, 0x6a,
0x3f, 0x4d, 0xc6, 0xbb, 0x8f, 0xa1, 0xf7, 0x1e, 0x1a, 0xb6, 0x6b, 0x26, 0x99, 0x22, 0x46, 0x28,
0x74, 0x00, 0xfb, 0x6c, 0x4b, 0x78, 0x94, 0x1e, 0xec, 0xe1, 0x0c, 0xa0, 0x67, 0x50, 0xb9, 0x25,
0x51, 0xc4, 0x4c, 0xde, 0x9e, 0xa3, 0x1e, 0xce, 0xba, 0x4f, 0x89, 0x24, 0x2b, 0x6e, 0x76, 0xe8,
0x35, 0xb4, 0xae, 0x15, 0x63, 0xe1, 0x92, 0xc4, 0xf4, 0x96, 0x53, 0xb3, 0x49, 0x8f, 0x29, 0xe3,
0xa6, 0x65, 0x47, 0x05, 0x89, 0x5e, 0x80, 0x97, 0xca, 0x28, 0xd7, 0x37, 0xe9, 0x89, 0x65, 0x5c,
0xb3, 0xc4, 0x19, 0xd7, 0x37, 0x85, 0xa3, 0xcf, 0x79, 0xbe, 0x7f, 0xe9, 0xe8, 0x12, 0x7c, 0xdb,
0x8d, 0x1f, 0xcd, 0xed, 0x9f, 0xb8, 0xfa, 0xee, 0x64, 0x43, 0xb8, 0xcc, 0x66, 0x62, 0x13, 0xcd,
0xc7, 0x93, 0xfb, 0x2a, 0x20, 0xea, 0x40, 0x7d, 0x25, 0xb6, 0x5b, 0x6e, 0xc2, 0x0d, 0xd1, 0x9b,
0xdc, 0x1e, 0x64, 0xd4, 0x07, 0xa2, 0x37, 0x68, 0x04, 0xde, 0xfd, 0x8a, 0xb7, 0xcb, 0xe9, 0xa2,
0x1e, 0x0e, 0xb2, 0x47, 0x30, 0x28, 0x1e, 0xc1, 0x20, 0x28, 0x14, 0xa3, 0x9a, 0xdd, 0xf4, 0x6f,
0xbf, 0x3a, 0x0e, 0x7e, 0x68, 0xb3, 0xd7, 0x2b, 0x16, 0x31, 0xa2, 0x59, 0xdb, 0xed, 0x3a, 0xfd,
0x1a, 0x2e, 0xe0, 0xd1, 0x14, 0x6a, 0xe9, 0x1a, 0xec, 0x24, 0x43, 0x75, 0xa8, 0x8e, 0xa7, 0x97,
0xc3, 0xc9, 0xf8, 0xcc, 0xdf, 0x43, 0x4d, 0xf0, 0x16, 0xc3, 0xe0, 0x7c, 0x32, 0x19, 0x07, 0xe7,
0xbe, 0x63, 0x6b, 0x8b, 0x60, 0x86, 0x87, 0x17, 0xe7, 0x7e, 0x09, 0x01, 0x54, 0xbe, 0xcc, 0x27,
0xe3, 0xe9, 0x27, 0xbf, 0x6c, 0x75, 0xa3, 0xd9, 0x2c, 0x58, 0x04, 0x78, 0x38, 0xf7, 0xdd, 0xa3,
0x57, 0xd0, 0x7c, 0xb2, 0x56, 0xc8, 0x87, 0x46, 0x70, 0x3a, 0x0f, 0x83, 0xc9, 0x22, 0xbc, 0xc0,
0xf3, 0x53, 0x7f, 0x6f, 0xe4, 0x5e, 0x95, 0xe4, 0x72, 0x59, 0x49, 0xbd, 0xbf, 0xfb, 0x13, 0x00,
0x00, 0xff, 0xff, 0x44, 0x0c, 0xb7, 0x9a, 0xee, 0x03, 0x00, 0x00,
// 591 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xdd, 0x6e, 0xd3, 0x30,
0x14, 0x5e, 0xda, 0xac, 0x4d, 0x4e, 0x7f, 0x94, 0x99, 0x09, 0xaa, 0x22, 0xd1, 0x52, 0x09, 0xa9,
0x1a, 0x52, 0x27, 0xc6, 0x2d, 0x37, 0xed, 0x36, 0x8d, 0x42, 0x59, 0x2b, 0x37, 0xec, 0x62, 0x37,
0x91, 0xdb, 0x78, 0xad, 0xb5, 0x34, 0xb6, 0x6c, 0x87, 0xa9, 0x6f, 0xc1, 0x53, 0xf0, 0x2c, 0x3c,
0x03, 0x17, 0xe3, 0x4d, 0x10, 0x72, 0x7e, 0xb6, 0xf5, 0x12, 0x89, 0xbb, 0x7c, 0xdf, 0xf9, 0x8e,
0xfd, 0xe5, 0x3b, 0xc7, 0x00, 0x31, 0x0f, 0xe9, 0x40, 0x48, 0xae, 0x39, 0xb2, 0xcd, 0x77, 0x1b,
0x56, 0x7c, 0xc5, 0x33, 0xa6, 0xdd, 0x59, 0x71, 0xbe, 0x8a, 0xe8, 0x71, 0x8a, 0x16, 0xc9, 0xcd,
0xb1, 0x66, 0x1b, 0xaa, 0x34, 0xd9, 0x88, 0x4c, 0xd0, 0xfb, 0x63, 0x81, 0x7d, 0xc9, 0x43, 0x8a,
0x5e, 0x41, 0x89, 0x85, 0x2d, 0xab, 0x6b, 0xf5, 0xeb, 0xa3, 0xe6, 0xcf, 0xfb, 0xce, 0xde, 0xaf,
0xfb, 0x4e, 0xc5, 0x54, 0xc6, 0x67, 0xb8, 0xc4, 0x42, 0xf4, 0x16, 0xaa, 0x24, 0x0c, 0x25, 0x55,
0xaa, 0x55, 0xea, 0x5a, 0xfd, 0xda, 0xc9, 0xc1, 0x20, 0xbd, 0xd9, 0x48, 0x86, 0x59, 0x01, 0x17,
0x0a, 0xf4, 0x02, 0xaa, 0x11, 0x51, 0x3a, 0x60, 0xa2, 0xd5, 0xec, 0x5a, 0x7d, 0x17, 0x57, 0x0c,
0x1c, 0x8b, 0x4f, 0xb6, 0x53, 0xf6, 0x9a, 0xd8, 0xd6, 0x5b, 0x41, 0x71, 0x5d, 0x52, 0xa5, 0x25,
0x5b, 0x6a, 0xc6, 0x63, 0x85, 0x41, 0x52, 0x91, 0x68, 0x62, 0x00, 0x76, 0x36, 0x54, 0x93, 0x90,
0x68, 0x82, 0xeb, 0x11, 0xd1, 0x34, 0x5e, 0x6e, 0x83, 0x88, 0x29, 0x8d, 0x1b, 0x24, 0x09, 0x99,
0x0e, 0x54, 0xb2, 0x5c, 0x9a, 0xeb, 0xf6, 0x99, 0x0a, 0x12, 0x81, 0x9b, 0x89, 0x08, 0x89, 0xa6,
0x41, 0x2e, 0xc5, 0x87, 0x39, 0xde, 0x15, 0x37, 0x72, 0x36, 0x11, 0x26, 0x02, 0x5c, 0xfd, 0x46,
0xa5, 0x62, 0x3c, 0xee, 0x5d, 0x43, 0xed, 0xc9, 0x2f, 0xa0, 0x77, 0xe0, 0x6a, 0x49, 0x62, 0x25,
0xb8, 0xd4, 0x69, 0x1a, 0xcd, 0x93, 0x67, 0x8f, 0x3f, 0xea, 0x17, 0x25, 0xfc, 0xa8, 0x42, 0xad,
0xdd, 0x64, 0xdc, 0x87, 0x18, 0x7a, 0x1f, 0xa0, 0x6e, 0xba, 0xa6, 0x82, 0x4a, 0xa2, 0xb9, 0x44,
0x87, 0xb0, 0x4f, 0x37, 0x84, 0x45, 0xe9, 0xc1, 0x2e, 0xce, 0x00, 0x7a, 0x0e, 0x95, 0x3b, 0x12,
0x45, 0x54, 0xe7, 0xed, 0x39, 0xea, 0xe1, 0xac, 0xfb, 0x94, 0x08, 0xb2, 0x64, 0x7a, 0x8b, 0xde,
0x40, 0xf3, 0x46, 0x52, 0x1a, 0x2c, 0x48, 0x1c, 0xde, 0xb1, 0x50, 0xaf, 0xd3, 0x63, 0xca, 0xb8,
0x61, 0xd8, 0x51, 0x41, 0xa2, 0x97, 0xe0, 0xa6, 0xb2, 0x90, 0xa9, 0xdb, 0xf4, 0xc4, 0x32, 0x76,
0x0c, 0x71, 0xc6, 0xd4, 0x6d, 0xe1, 0xe8, 0x4b, 0x9e, 0xef, 0x3f, 0x3a, 0xba, 0x02, 0xcf, 0x74,
0xe3, 0x27, 0x73, 0xfb, 0x2f, 0xae, 0x7e, 0x58, 0xd9, 0x10, 0xae, 0xb2, 0x99, 0x98, 0x44, 0xf3,
0xf1, 0xe4, 0xbe, 0x0a, 0x88, 0x3a, 0x50, 0x5b, 0xf2, 0xcd, 0x86, 0xe9, 0x60, 0x4d, 0xd4, 0x3a,
0xb7, 0x07, 0x19, 0xf5, 0x91, 0xa8, 0x35, 0x1a, 0x81, 0xfb, 0xb0, 0xe2, 0xad, 0x72, 0xba, 0xa8,
0xed, 0x41, 0xf6, 0x08, 0x06, 0xc5, 0x23, 0x18, 0xf8, 0x85, 0x62, 0xe4, 0x98, 0x4d, 0xff, 0xfe,
0xbb, 0x63, 0xe1, 0xc7, 0x36, 0x73, 0xbd, 0xa4, 0x11, 0x25, 0x8a, 0xb6, 0xec, 0xae, 0xd5, 0x77,
0x70, 0x01, 0x8f, 0x30, 0x38, 0xe9, 0x1a, 0x6c, 0x05, 0x45, 0x35, 0xa8, 0x8e, 0x2f, 0xaf, 0x86,
0x93, 0xf1, 0x99, 0xb7, 0x87, 0x1a, 0xe0, 0xce, 0x87, 0xfe, 0xf9, 0x64, 0x32, 0xf6, 0xcf, 0x3d,
0xcb, 0xd4, 0xe6, 0xfe, 0x14, 0x0f, 0x2f, 0xce, 0xbd, 0x12, 0x02, 0xa8, 0x7c, 0x9d, 0x4d, 0xc6,
0x97, 0x9f, 0xbd, 0x32, 0x3a, 0x00, 0x77, 0x34, 0x9d, 0xfa, 0x73, 0x1f, 0x0f, 0x67, 0x9e, 0xdd,
0x2e, 0x39, 0xd6, 0xd1, 0x6b, 0x68, 0xec, 0xac, 0x16, 0xf2, 0xa0, 0xee, 0x9f, 0xce, 0x02, 0x7f,
0x32, 0x0f, 0x2e, 0xf0, 0xec, 0xd4, 0xdb, 0x1b, 0xd9, 0xd7, 0x25, 0xb1, 0x58, 0x54, 0x52, 0xff,
0xef, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xab, 0x88, 0x34, 0xf2, 0x03, 0x00, 0x00,
}

View File

@ -26,7 +26,7 @@ enum NodeType {
SATELLITE = 1;
STORAGE = 2;
UPLINK = 3;
BOOTSTRAP = 4;
BOOTSTRAP = 4 [deprecated=true];
}
// NodeAddress contains the information needed to communicate with a node on the network

View File

@ -224,8 +224,7 @@ type OrderLimitSigning struct {
OrderExpiration *time.Time `protobuf:"bytes,9,opt,name=order_expiration,json=orderExpiration,proto3,stdtime" json:"order_expiration,omitempty"`
OrderCreation *time.Time `protobuf:"bytes,12,opt,name=order_creation,json=orderCreation,proto3,stdtime" json:"order_creation,omitempty"`
SatelliteSignature []byte `protobuf:"bytes,10,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"`
// satellites aren't necessarily discoverable in kademlia. this allows
// a storage node to find a satellite and handshake with it to get its key.
// this allows a storage node to find a satellite and handshake with it to get its key.
SatelliteAddress *NodeAddress `protobuf:"bytes,11,opt,name=satellite_address,json=satelliteAddress,proto3" json:"satellite_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`

View File

@ -80,8 +80,7 @@ message OrderLimitSigning {
bytes satellite_signature = 10;
// satellites aren't necessarily discoverable in kademlia. this allows
// a storage node to find a satellite and handshake with it to get its key.
// this allows a storage node to find a satellite and handshake with it to get its key.
node.NodeAddress satellite_address = 11;
}

View File

@ -10,7 +10,6 @@ import (
"github.com/zeebo/errs"
)
// Error is bootstrap web error type
var scanError = errs.Class("Protobuf Scanner")
var valueError = errs.Class("Protobuf Valuer")

View File

@ -21,7 +21,7 @@ import (
)
// Service represents a specific gRPC method collection to be registered
// on a shared gRPC server. Metainfo, OverlayCache, PieceStore, Kademlia,
// on a shared gRPC server. Metainfo, OverlayCache, PieceStore,
// etc. are all examples of services.
type Service interface {
Run(ctx context.Context, server *Server) error

View File

@ -920,302 +920,6 @@
{
"name": "CountNodesRequest"
},
{
"name": "GetBucketListRequest"
},
{
"name": "GetBucketListResponse",
"fields": [
{
"id": 1,
"name": "buckets",
"type": "Bucket",
"is_repeated": true
}
],
"messages": [
{
"name": "Bucket",
"fields": [
{
"id": 1,
"name": "bucket_id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
},
{
"id": 2,
"name": "routing_nodes",
"type": "node.Node",
"is_repeated": true
},
{
"id": 3,
"name": "cached_nodes",
"type": "node.Node",
"is_repeated": true
}
]
}
]
},
{
"name": "GetBucketsRequest"
},
{
"name": "GetBucketsResponse",
"fields": [
{
"id": 1,
"name": "total",
"type": "int64"
},
{
"id": 2,
"name": "ids",
"type": "bytes",
"is_repeated": true,
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
}
]
}
]
},
{
"name": "GetBucketRequest",
"fields": [
{
"id": 1,
"name": "id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
}
]
},
{
"name": "GetBucketResponse",
"fields": [
{
"id": 1,
"name": "id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
},
{
"id": 2,
"name": "nodes",
"type": "node.Node",
"is_repeated": true
}
]
},
{
"name": "BucketList",
"fields": [
{
"id": 1,
"name": "nodes",
"type": "node.Node",
"is_repeated": true
}
]
},
{
"name": "PingNodeRequest",
"fields": [
{
"id": 1,
"name": "id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
},
{
"id": 2,
"name": "address",
"type": "string"
}
]
},
{
"name": "PingNodeResponse",
"fields": [
{
"id": 1,
"name": "ok",
"type": "bool"
}
]
},
{
"name": "LookupNodeRequest",
"fields": [
{
"id": 1,
"name": "id",
"type": "string"
},
{
"id": 2,
"name": "address",
"type": "string"
}
]
},
{
"name": "LookupNodeResponse",
"fields": [
{
"id": 1,
"name": "node",
"type": "node.Node"
},
{
"id": 2,
"name": "meta",
"type": "node.NodeMetadata"
}
]
},
{
"name": "NodeInfoRequest",
"fields": [
{
"id": 1,
"name": "id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
},
{
"id": 2,
"name": "address",
"type": "node.NodeAddress"
}
]
},
{
"name": "NodeInfoResponse",
"fields": [
{
"id": 1,
"name": "type",
"type": "node.NodeType"
},
{
"id": 2,
"name": "operator",
"type": "node.NodeOperator"
},
{
"id": 3,
"name": "capacity",
"type": "node.NodeCapacity"
},
{
"id": 4,
"name": "version",
"type": "node.NodeVersion"
}
]
},
{
"name": "FindNearRequest",
"fields": [
{
"id": 1,
"name": "id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
},
{
"id": 2,
"name": "start",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
},
{
"id": 3,
"name": "limit",
"type": "int64"
}
]
},
{
"name": "FindNearResponse",
"fields": [
{
"id": 2,
"name": "nodes",
"type": "node.Node",
"is_repeated": true
}
]
},
{
"name": "DumpNodesRequest"
},
@ -1297,7 +1001,13 @@
{
"id": 3,
"name": "bootstrap_address",
"type": "string"
"type": "string",
"options": [
{
"name": "deprecated",
"value": "true"
}
]
},
{
"id": 4,
@ -1511,46 +1221,6 @@
}
],
"services": [
{
"name": "KadInspector",
"rpcs": [
{
"name": "CountNodes",
"in_type": "CountNodesRequest",
"out_type": "CountNodesResponse"
},
{
"name": "PingNode",
"in_type": "PingNodeRequest",
"out_type": "PingNodeResponse"
},
{
"name": "LookupNode",
"in_type": "LookupNodeRequest",
"out_type": "LookupNodeResponse"
},
{
"name": "NodeInfo",
"in_type": "NodeInfoRequest",
"out_type": "NodeInfoResponse"
},
{
"name": "FindNear",
"in_type": "FindNearRequest",
"out_type": "FindNearResponse"
},
{
"name": "DumpNodes",
"in_type": "DumpNodesRequest",
"out_type": "DumpNodesResponse"
},
{
"name": "GetBucketList",
"in_type": "GetBucketListRequest",
"out_type": "GetBucketListResponse"
}
]
},
{
"name": "OverlayInspector",
"rpcs": [
@ -4049,7 +3719,13 @@
},
{
"name": "BOOTSTRAP",
"integer": 4
"integer": 4,
"options": [
{
"name": "deprecated",
"value": "true"
}
]
}
]
},

View File

@ -29,9 +29,6 @@ var ErrNodeOffline = errs.Class("node is offline")
// ErrNodeDisqualified is returned if a nodes is disqualified
var ErrNodeDisqualified = errs.Class("node is disqualified")
// ErrBucketNotFound is returned if a bucket is unable to be found in the routing table
var ErrBucketNotFound = errs.New("bucket not found")
// ErrNotEnoughNodes is when selecting nodes failed with the given parameters
var ErrNotEnoughNodes = errs.Class("not enough nodes")
@ -332,8 +329,8 @@ func (service *Service) Reliable(ctx context.Context) (nodes storj.NodeIDList, e
func (service *Service) Put(ctx context.Context, nodeID storj.NodeID, value pb.Node) (err error) {
defer mon.Task()(&ctx)(&err)
// If we get a Node without an ID (i.e. bootstrap node)
// we don't want to add to the routing tbale
// If we get a Node without an ID
// we don't want to add to the database
if nodeID.IsZero() {
return nil
}

View File

@ -37,7 +37,7 @@ case $1 in
done
echo "done"
echo -n "generating alpha identities"
for dir in ${basepath}/{bootstrap/*,satellite/*,storagenode/*,gateway/*}; do
for dir in ${basepath}/{satellite/*,storagenode/*,gateway/*}; do
echo -n "."
_ca_basepath=$(rand_ca_basepath)
_ca_cert=${dir}/ca-alpha.cert

View File

@ -33,7 +33,6 @@ var Libraries = []string{
var Peers = []string{
"storj.io/storj/satellite/...",
"storj.io/storj/storagenode/...",
"storj.io/storj/bootstrap/...",
"storj.io/storj/versioncontrol/...",
"storj.io/storj/linksharing/...",
"storj.io/storj/certificate/...",

View File

@ -33,7 +33,7 @@ type Config struct {
MinimumBandwidth memory.Size `help:"how much bandwidth a node at minimum has to advertise" default:"500GB"`
}
// Service which monitors disk usage and updates kademlia network as necessary.
// Service which monitors disk usage
//
// architecture: Service
type Service struct {

View File

@ -27,7 +27,6 @@ type Config struct {
// ServiceVersions provides a list of allowed Versions per Service
type ServiceVersions struct {
Bootstrap string `user:"true" help:"Allowed Bootstrap Versions" default:"v0.0.1"`
Satellite string `user:"true" help:"Allowed Satellite Versions" default:"v0.0.1"`
Storagenode string `user:"true" help:"Allowed Storagenode Versions" default:"v0.0.1"`
Uplink string `user:"true" help:"Allowed Uplink Versions" default:"v0.0.1"`
@ -37,7 +36,6 @@ type ServiceVersions struct {
// Versions represents versions for all binaries
type Versions struct {
Bootstrap Binary
Satellite Binary
Storagenode Binary
Uplink Binary
@ -101,11 +99,6 @@ func New(log *zap.Logger, config *Config) (peer *Peer, err error) {
}
// Convert each Service's Version String to SemVer
peer.Versions.Bootstrap, err = version.NewSemVer(config.Versions.Bootstrap)
if err != nil {
return &Peer{}, err
}
peer.Versions.Satellite, err = version.NewSemVer(config.Versions.Satellite)
if err != nil {
return &Peer{}, err
@ -132,7 +125,6 @@ func New(log *zap.Logger, config *Config) (peer *Peer, err error) {
}
peer.Versions.Processes = version.Processes{}
peer.Versions.Processes.Bootstrap = configToProcess(config.Binary.Bootstrap)
peer.Versions.Processes.Satellite = configToProcess(config.Binary.Satellite)
peer.Versions.Processes.Storagenode = configToProcess(config.Binary.Storagenode)
peer.Versions.Processes.Uplink = configToProcess(config.Binary.Uplink)

View File

@ -1,26 +0,0 @@
.DS_Store
node_modules
dist
package-lock.json
coverage
# local env files
.env.local
.env.*.local
# Log files
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Allow images
!*.svg
# Editor directories and files
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw*

View File

@ -1,8 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
module.exports = {
presets: [
'@vue/app'
]
}

View File

@ -1,11 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no">
<title>Storj Node Bootstrap</title>
</head>
<body>
<div id="app"></div>
</body>
</html>

View File

@ -1,65 +0,0 @@
{
"name": "storj-bootstrap",
"version": "0.1.0",
"private": true,
"scripts": {
"serve": "rm -rf dist/ && npm run build && vue-cli-service serve -s dist",
"lint": "vue-cli-service lint",
"build": "webpack --config webpack.config.dev.js"
},
"dependencies": {
"@types/graphql": "^14.0.3",
"apollo-cache-inmemory": "^1.3.9",
"apollo-client": "^2.4.5",
"apollo-link": "^1.2.4",
"apollo-link-context": "^1.0.10",
"apollo-link-http": "^1.5.5",
"graphql": "^14.0.2",
"graphql-tag": "^2.10.0",
"vue": "^2.5.17",
"vue-apollo": "^3.0.0-beta.25",
"vue-class-component": "^6.0.0",
"vue-clipboards": "^1.2.4",
"vue-property-decorator": "^7.0.0",
"vue-router": "^3.0.1",
"vuex": "^3.0.1"
},
"devDependencies": {
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-object-rest-spread": "^7.0.0",
"@types/sinon": "^5.0.5",
"@vue/cli-plugin-babel": "^3.0.5",
"@vue/cli-plugin-typescript": "^3.0.5",
"@vue/cli-plugin-unit-jest": "^3.0.5",
"@vue/cli-service": "^3.0.5",
"babel-core": "7.0.0-bridge.0",
"css-loader": "^1.0.0",
"eslint": "^5.9.0",
"eslint-plugin-vue": "^5.0.0",
"node-sass": "^4.9.0",
"sass-loader": "^7.0.1",
"tslint": "^5.11.0",
"tslint-consistent-codestyle": "^1.14.1",
"tslint-loader": "^3.5.4",
"typescript": "^3.0.0",
"vue-html-webpack-plugin": "^3.2.2",
"vue-loader": "^15.4.2",
"vue-style-loader": "^4.1.2",
"vue-template-compiler": "^2.5.17",
"vue-tslint": "^0.3.2",
"vue-tslint-loader": "^3.5.6",
"webpack": "^4.21.0",
"webpack-cli": "^3.1.2",
"webpack-node-externals": "^1.7.2"
},
"postcss": {
"plugins": {
"autoprefixer": {}
}
},
"browserslist": [
"> 1%",
"last 2 versions",
"not ie <= 8"
]
}

View File

@ -1,39 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div id="app">
<router-view/>
</div>
</template>
<script lang="ts">
import { Component, Vue } from 'vue-property-decorator';
@Component({
})
export default class App extends Vue {
}
</script>
<style lang="scss">
body {
margin: 0 !important;
}
@font-face {
font-family: "font_regular";
src: url("../../satellite/static/fonts/font_regular.ttf");
}
@font-face {
font-family: "font_medium";
src: url("../../satellite/static/fonts/font_medium.ttf");
}
@font-face {
font-family: "font_bold";
src: url("../../satellite/static/fonts/font_bold.ttf");
}
</style>

View File

@ -1,32 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
import apolloManager from '@/utils/apolloManager';
import gql from 'graphql-tag';
import { NodeStatus } from '@/types/nodeStatus';
export async function checkAvailability(nodeId: string): Promise<number> {
try {
let response: any = await apolloManager.query(
{
query: gql(`
query {
isNodeUp (
nodeID: "${nodeId}"
)
}`
),
fetchPolicy: 'no-cache',
}
);
if (response.errors) {
return NodeStatus.Error;
}
return response.data.isNodeUp ? NodeStatus.Active : NodeStatus.Error;
} catch (e) {
return NodeStatus.Error;
}
}

View File

@ -1,15 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
import Vue from 'vue';
import App from './App.vue';
import router from './router';
import store from './store';
Vue.config.productionTip = false;
new Vue({
router,
render: (h) => h(App),
store,
}).$mount('#app');

View File

@ -1,21 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
import Vue from 'vue';
import Router from 'vue-router';
import Search from '@/views/Search.vue';
Vue.use(Router);
let router = new Router({
mode: 'history',
routes: [
{
path: '',
name: '',
component: Search
},
]
});
export default router;

View File

@ -1,38 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
import Vue from 'vue';
import Vuex from 'vuex';
import { ACTIONS, MUTATIONS } from '@/utils/constants';
import { checkAvailability } from '@/api/bootstrap';
import { NodeStatus } from '@/types/nodeStatus';
Vue.use(Vuex);
// Bootstrap store (vuex)
const store = new Vuex.Store({
state: {
isLoading: false,
nodeStatus: 1,
},
mutations: {
[MUTATIONS.SET_NODE_STATUS](state: any, status: NodeStatus): void {
state.nodeStatus = status;
},
[MUTATIONS.SET_LOADING](state:any): void {
state.isLoading = true;
}
},
actions: {
async [ACTIONS.CHECK_NODE_STATUS]({commit}: any, nodeId: string): Promise<any> {
let nodeStatus = await checkAvailability(nodeId);
commit(MUTATIONS.SET_NODE_STATUS, nodeStatus);
},
[ACTIONS.SET_LOADING]({commit}: any): void {
commit(MUTATIONS.SET_LOADING);
}
},
});
export default store;

View File

@ -1,8 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
export enum NodeStatus {
None = 1,
Active,
Error,
}

View File

@ -1,19 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
import { HttpLink } from 'apollo-link-http';
import ApolloClient from 'apollo-client/ApolloClient';
import { InMemoryCache } from 'apollo-cache-inmemory';
// Bootstrap server url
const bootstrapUrl = new HttpLink({
uri: '/api/graphql/v0',
});
// Creating apollo client
export default new ApolloClient({
link: bootstrapUrl,
cache: new InMemoryCache(),
connectToDevTools: true,
});

View File

@ -1,12 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
export const MUTATIONS = {
SET_NODE_STATUS: 'SET_NODE_STATUS',
SET_LOADING: 'SET_LOADING',
};
export const ACTIONS = {
CHECK_NODE_STATUS: 'CHECK_NODE_STATUS',
SET_LOADING: 'SET_LOADING',
};

View File

@ -1,162 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="failure-container">
<svg id="loading-svg" width="122px" height="120px" viewBox="0 0 122 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient x1="62.8427859%" y1="49.4456705%" x2="-91.4339241%" y2="51.3777925%" id="linearGradient-1">
<stop stop-color="#FF0000" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<filter x="-35.1%" y="-1516.0%" width="170.2%" height="3132.0%" filterUnits="objectBoundingBox" id="filter-2">
<feGaussianBlur stdDeviation="8" in="SourceGraphic"></feGaussianBlur>
</filter>
<linearGradient x1="62.8427859%" y1="32.6019784%" x2="-91.4339241%" y2="93.2429871%" id="linearGradient-3">
<stop stop-color="#FF0000" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<filter x="-213.9%" y="-1516.0%" width="527.8%" height="3132.0%" filterUnits="objectBoundingBox" id="filter-4">
<feGaussianBlur stdDeviation="8" in="SourceGraphic"></feGaussianBlur>
</filter>
<linearGradient x1="62.8427859%" y1="-400.886306%" x2="-91.4339241%" y2="1170.68321%" id="linearGradient-5">
<stop stop-color="#FF0000" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<filter x="-38.2%" y="-54.9%" width="176.4%" height="209.8%" filterUnits="objectBoundingBox" id="filter-6">
<feGaussianBlur stdDeviation="8" in="SourceGraphic"></feGaussianBlur>
</filter>
<linearGradient x1="62.8427859%" y1="-424.051899%" x2="-91.4339241%" y2="1228.26156%" id="linearGradient-7">
<stop stop-color="#FF0000" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<linearGradient x1="62.8427859%" y1="-11.1496077%" x2="-91.4339241%" y2="201.988068%" id="linearGradient-8">
<stop stop-color="#FF0000" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Icon/node/2-Copy-2" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Group-2" transform="translate(23.000000, 20.000000)">
<g id="Group" opacity="0.870117188" transform="translate(3.000000, 13.000000)" stroke-width="4.2">
<path d="M5.68434189e-14,25.0148333 L68.3666667,25.0148333" id="Stroke-3" stroke="url(#linearGradient-1)" opacity="0.605352493" filter="url(#filter-2)"></path>
<path d="M58.553,35.6936111 L46.3496667,35.6936111" id="Stroke-5" stroke="url(#linearGradient-3)" opacity="0.605352493" stroke-linecap="round" filter="url(#filter-4)"></path>
<path d="M68.3666667,25.1483333 L68.3666667,34.2016667 C68.3666667,41.565 62.3983333,47.5333333 55.035,47.5333333 L13.3316667,47.5333333 C5.96833333,47.5333333 5.68434189e-14,41.565 5.68434189e-14,34.2016667 L5.68434189e-14,25.015 C5.68434189e-14,24.4783333 0.128333333,23.9516667 0.376666667,23.4766667 L8.855,7.17833333 C11.1516667,2.76666667 15.7116667,-2.34479103e-13 20.685,-2.34479103e-13 L45.945,-2.34479103e-13 C50.5583333,-2.34479103e-13 54.8466667,2.385 57.2766667,6.30833333 L67.8666667,23.3916667 C68.1933333,23.92 68.3666667,24.5283333 68.3666667,25.1483333 Z" id="Stroke-1" stroke="url(#linearGradient-5)" opacity="0.605352493" filter="url(#filter-6)"></path>
</g>
<g id="Group" fill-rule="nonzero">
<g id="Group-4" transform="translate(0.000000, 13.000000)">
<g id="Group-3">
<g id="Group-7">
<g id="Group-5" transform="translate(0.250333, 0.250833)" fill="url(#linearGradient-7)">
<path d="M6.03606851,25.9148333 L68.1003134,25.9148333 L58.4913685,10.4140957 C56.4451258,7.11035405 52.8329636,5.1 48.945,5.1 L23.685,5.1 C19.4945489,5.1 15.6526847,7.43113725 13.7180055,11.147462 L6.03606851,25.9148333 Z M69.2666667,30.1148333 L5.1,30.1148333 L5.1,37.2016667 C5.1,43.405202 10.1281313,48.4333333 16.3316667,48.4333333 L58.035,48.4333333 C64.2385354,48.4333333 69.2666667,43.405202 69.2666667,37.2016667 L69.2666667,30.1148333 Z M73.4666667,28.1483333 L73.4666667,37.2016667 C73.4666667,45.724798 66.5581313,52.6333333 58.035,52.6333333 L16.3316667,52.6333333 C7.80853536,52.6333333 0.9,45.724798 0.9,37.2016667 L0.9,28.015 C0.9,27.1392671 1.11042376,26.2788214 1.51366113,25.507538 L9.99229426,9.20862857 C12.6504846,4.1025112 17.9284291,0.9 23.685,0.9 L48.945,0.9 C54.2867182,0.9 59.2497936,3.66220673 62.0615403,8.20188581 L72.6528236,25.287292 C73.1845977,26.1473551 73.4666667,27.1380876 73.4666667,28.1483333 Z" id="Combined-Shape"></path>
</g>
<path d="M61.8033333,36.8444444 C62.9631313,36.8444444 63.9033333,37.7846465 63.9033333,38.9444444 C63.9033333,40.1042424 62.9631313,41.0444444 61.8033333,41.0444444 L49.6,41.0444444 C48.440202,41.0444444 47.5,40.1042424 47.5,38.9444444 C47.5,37.7846465 48.440202,36.8444444 49.6,36.8444444 L61.8033333,36.8444444 Z" id="Stroke-5" fill="url(#linearGradient-8)"></path>
</g>
</g>
</g>
<path d="M88.2734342,15.145147 C89.1803944,16.0521073 89.1803944,17.4586211 88.2734342,18.3655813 C87.8199541,18.8190614 87.2300672,19.0458015 86.6862538,19.0458015 C86.1424405,19.0458015 85.50648,18.8190614 85.0990735,18.3655813 L79.4768272,12.743335 L73.8545809,18.3197798 C73.4011008,18.7732599 72.8112138,19 72.2674005,19 C71.7235872,19 71.0876267,18.7732599 70.6802202,18.3197798 C69.7732599,17.4128196 69.7732599,16.0063058 70.6802202,15.0993456 L76.256665,9.52290075 L70.6802202,3.90065444 C69.7732599,2.99369424 69.7732599,1.58718036 70.6802202,0.680220153 C71.5871804,-0.226740051 72.9936942,-0.226740051 73.9006544,0.680220153 L79.4770993,6.30246645 L85.0993456,0.680220153 C86.0063058,-0.226740051 87.4128196,-0.226740051 88.3197798,0.680220153 C89.2267401,1.58718036 89.2267401,2.99369424 88.3197798,3.90065444 L82.6975335,9.52290075 L88.2734342,15.145147 Z" id="Path" fill="#FF0000"></path>
</g>
</g>
</g>
</svg>
<div class="failure-container__animation-container">
<p class="failure-container__title">Your Node Not Found</p>
<p class="failure-container__info">Please check your Node ID or restart your Storj Node and try again later</p>
<div class="overlay__main-container__button" @click="onTryAgainClick">
<p class="overlay__main-container__button__text">Try Again</p>
</div>
<p class="overlay__main-container__support">Or Contact our <a>Support</a></p>
</div>
</div>
</template>
<script lang="ts">
import { Component, Vue } from 'vue-property-decorator';
@Component({
mounted() {
setTimeout(() => {
(document as any).querySelector('.failure-container').classList.add('active');
}, 500);
},
methods: {
onTryAgainClick: function () {
location.reload();
},
},
})
export default class Failure extends Vue {}
</script>
<style lang="scss">
.failure-container {
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
&__title {
width: 558px;
font-family: 'font_bold';
font-size: 48px;
font-style: normal;
font-stretch: normal;
line-height: 57px;
letter-spacing: normal;
text-align: center;
color: #fefeff;
margin-block-start: 0em;
margin-block-end: 0em;
}
&__info {
margin-top: 24px;
width: 446px;
font-family: 'font_regular';
font-size: 16px;
font-style: normal;
font-stretch: normal;
line-height: 23px;
letter-spacing: normal;
text-align: center;
color: #696c77;
margin-block-start: 1.5em;
margin-block-end: 0em;
}
&__animation-container {
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
visibility: hidden;
position: relative;
bottom: -200px;
opacity: 0;
-webkit-transition: all 0.5s linear;
-moz-transition: all 0.5s linear;
-o-transition: all 0.5s linear;
transition: all 0.5s linear;
}
svg {
display: block;
position: relative;
top: 192px;
transition: all 0.5s linear;
}
}
.failure-container.active {
svg {
top: 0;
}
.failure-container__animation-container {
margin-bottom: 0;
bottom: 0;
visibility: visible;
opacity: 1;
}
}
</style>

View File

@ -1,203 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="overlay">
<div class="overlay__main-container">
<div class="overlay__main-container__svg" v-if="nodeStatus.isNone">
<div class="loading-line"></div>
<svg width="120px" height="120px" viewBox="0 0 120 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient x1="65.4452463%" y1="208.17803%" x2="167.766742%" y2="150.69682%" id="linearGradient-1">
<stop stop-color="#4381F7" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Icon/node/1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Group" transform="translate(23.000000, 33.000000)" fill-rule="nonzero">
<g id="Group-2" transform="translate(0.900000, 0.900000)" fill="#505460">
<path d="M5.24781681,25 L66.7017485,25 L57.2767486,9.5639393 C55.2407064,6.22649392 51.6542832,4.2 47.7985861,4.2 L22.6740614,4.2 C18.517928,4.2 14.702343,6.55052798 12.7764624,10.3064151 L5.24781681,25 Z M68,29.2 L4.2,29.2 L4.2,36.6374474 C4.2,42.9203342 9.20330161,48 15.3601658,48 L56.8398342,48 C62.9966984,48 68,42.9203342 68,36.6374474 L68,29.2 Z M72.2,27.4952314 L72.2,36.6374474 C72.2,45.2258089 65.3306541,52.2 56.8398342,52.2 L15.3601658,52.2 C6.86934588,52.2 -1.70530257e-13,45.2258089 -1.70530257e-13,36.6374474 L-1.70530257e-13,27.3605891 C-1.70530257e-13,26.4842425 0.207498838,25.6228069 0.605692679,24.8495457 L9.03884835,8.39062775 C11.6812091,3.23744801 16.9365211,0 22.6740614,0 L47.7985861,0 C53.1230697,0 58.0658931,2.7929223 60.861791,7.37591038 L71.3962549,24.6290296 C71.9217493,25.4919082 72.2,26.4841461 72.2,27.4952314 Z" id="Combined-Shape"></path>
</g>
<path d="M61,36.9 C62.159798,36.9 63.1,37.840202 63.1,39 C63.1,40.159798 62.159798,41.1 61,41.1 L49,41.1 C47.840202,41.1 46.9,40.159798 46.9,39 C46.9,37.840202 47.840202,36.9 49,36.9 L61,36.9 Z" id="Stroke-5" fill="url(#linearGradient-1)"></path>
</g>
</g>
</svg>
</div>
<Success v-if="nodeStatus.isActive"/>
<Failure v-if="nodeStatus.isError"/>
</div>
</div>
</template>
<script lang="ts">
import { Component, Vue } from 'vue-property-decorator';
import Success from './Success.vue';
import Failure from './Failure.vue';
import { NodeStatus } from '../types/nodeStatus';
@Component({
mounted() {
(document as any).querySelector('.overlay').classList.add('active');
},
data: function () {
return {
isSuccessCheck: false,
isFailureCheck: false,
};
},
computed: {
nodeStatus: function () {
const currentNodeStatus = this.$store.state.nodeStatus;
const isNone = currentNodeStatus === NodeStatus.None;
const isActive = currentNodeStatus === NodeStatus.Active;
const isError = currentNodeStatus === NodeStatus.Error;
return {
isNone,
isActive,
isError,
};
},
},
components: {
Success,
Failure,
},
})
export default class Loading extends Vue {}
</script>
<style lang="scss">
.overlay {
position: absolute;
top: 0;
left: 0;
height: 100%;
width: 0px;
display: flex;
justify-content: center;
align-items: center;
background-color: transparent;
-webkit-transition: all 0.5s linear;
-moz-transition: all 0.5s linear;
-o-transition: all 0.5s linear;
transition: all 0.5s linear;
&__main-container {
width: auto;
height: auto;
visibility: hidden;
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
opacity: 0;
-webkit-transition: all 1s linear;
-moz-transition: all 1s linear;
-o-transition: all 1s linear;
transition: all 1s linear;
transition-delay: 1s;
&__svg {
position: relative;
}
.loading-line {
height: 4px;
position: absolute;
top: 59px;
left: 28px;
width: 64px;
-webkit-transition: all 1s linear;
-moz-transition: all 1s linear;
-o-transition: all 1s linear;
transition: all 1s linear;
animation-delay: 5s;
background-color: #1494ff;
border-radius: 12px;
}
h1 {
margin-top: 33px;
}
&__button {
width: 176px;
height: 52px;
border-radius: 8px;
background-color: #1494ff;
margin-top: 46px;
&__text {
font-family: 'font_bold';
font-size: 16px;
font-style: normal;
font-stretch: normal;
line-height: normal;
letter-spacing: normal;
text-align: center;
color: #f3f4f9;
}
&:hover {
cursor: pointer;
background-color: #2039df;
}
}
&__support {
margin-top: 128px;
font-family: 'font_regular';
font-size: 12px;
font-style: normal;
font-stretch: normal;
line-height: 23px;
letter-spacing: normal;
text-align: center;
color: #696c77;
a {
font-family: 'font_medium';
font-size: 12px;
color: #1494ff;
text-decoration: none;
cursor: pointer;
}
}
.loading {
-webkit-transition: all 1s linear;
-moz-transition: all 1s linear;
-o-transition: all 1s linear;
transition: all 5s linear;
animation-delay: 2s;
animation-duration: 2s;
}
}
}
.overlay.active {
background-color: #191919;
width: 100%;
z-index: 9999;
.overlay__main-container {
opacity: 1;
visibility: visible;
}
.loading-line {
animation: pathWidth 3s linear;
}
}
@keyframes pathWidth {
from {
width: 0px;
}
to {
width: 64px;
}
}
</style>

File diff suppressed because one or more lines are too long

View File

@ -1,154 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="success-container" >
<svg width="122px" height="120px" viewBox="0 0 122 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient x1="62.8427859%" y1="49.4456705%" x2="-91.4339241%" y2="51.3777925%" id="linearGradient-1">
<stop stop-color="#0058FF" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<filter x="-35.1%" y="-1516.0%" width="170.2%" height="3132.0%" filterUnits="objectBoundingBox" id="filter-2">
<feGaussianBlur stdDeviation="8" in="SourceGraphic"></feGaussianBlur>
</filter>
<linearGradient x1="62.8427859%" y1="32.6019784%" x2="-91.4339241%" y2="93.2429871%" id="linearGradient-3">
<stop stop-color="#0058FF" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<filter x="-213.9%" y="-1516.0%" width="527.8%" height="3132.0%" filterUnits="objectBoundingBox" id="filter-4">
<feGaussianBlur stdDeviation="8" in="SourceGraphic"></feGaussianBlur>
</filter>
<linearGradient x1="62.8427859%" y1="-400.886306%" x2="-91.4339241%" y2="1170.68321%" id="linearGradient-5">
<stop stop-color="#0058FF" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
<filter x="-38.2%" y="-54.9%" width="176.4%" height="209.8%" filterUnits="objectBoundingBox" id="filter-6">
<feGaussianBlur stdDeviation="8" in="SourceGraphic"></feGaussianBlur>
</filter>
<linearGradient x1="62.8427859%" y1="46.4797858%" x2="-91.4339241%" y2="58.7495336%" id="linearGradient-7">
<stop stop-color="#4381F7" offset="0%"></stop>
<stop stop-color="#044CFF" offset="100%"></stop>
</linearGradient>
<linearGradient x1="62.8427859%" y1="-424.051899%" x2="-91.4339241%" y2="1228.26156%" id="linearGradient-8">
<stop stop-color="#4381F7" offset="0%"></stop>
<stop stop-color="#044CFF" offset="100%"></stop>
</linearGradient>
<linearGradient x1="62.8427859%" y1="-11.1496077%" x2="-91.4339241%" y2="201.988068%" id="linearGradient-9">
<stop stop-color="#4381F7" offset="0%"></stop>
<stop stop-color="#505460" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Icon/node/2-Copy" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Group-2" transform="translate(23.000000, 21.000000)">
<g id="Group" transform="translate(3.000000, 18.000000)" stroke-width="4.2">
<path d="M5.68434189e-14,25.0148333 L68.3666667,25.0148333" id="Stroke-3" stroke="url(#linearGradient-1)" filter="url(#filter-2)"></path>
<path d="M58.553,35.6936111 L46.3496667,35.6936111" id="Stroke-5" stroke="url(#linearGradient-3)" stroke-linecap="round" filter="url(#filter-4)"></path>
<path d="M68.3666667,25.1483333 L68.3666667,34.2016667 C68.3666667,41.565 62.3983333,47.5333333 55.035,47.5333333 L13.3316667,47.5333333 C5.96833333,47.5333333 5.68434189e-14,41.565 5.68434189e-14,34.2016667 L5.68434189e-14,25.015 C5.68434189e-14,24.4783333 0.128333333,23.9516667 0.376666667,23.4766667 L8.855,7.17833333 C11.1516667,2.76666667 15.7116667,-2.34479103e-13 20.685,-2.34479103e-13 L45.945,-2.34479103e-13 C50.5583333,-2.34479103e-13 54.8466667,2.385 57.2766667,6.30833333 L67.8666667,23.3916667 C68.1933333,23.92 68.3666667,24.5283333 68.3666667,25.1483333 Z" id="Stroke-1" stroke="url(#linearGradient-5)" filter="url(#filter-6)"></path>
</g>
<g id="Group-4" transform="translate(0.250333, 12.250833)" fill-rule="nonzero">
<g id="Group-3">
<g id="Group-5" transform="translate(0.900000, 0.900000)">
<polygon id="Stroke-3" fill="url(#linearGradient-7)" points="2.1 29.2148333 2.1 25.0148333 70.4666667 25.0148333 70.4666667 29.2148333"></polygon>
<path d="M68.3666667,27.2483333 C68.3666667,27.0190576 68.3023215,26.7930533 68.181793,26.5981142 L57.5913685,9.51409574 C55.5451258,6.21035405 51.9329636,4.2 48.045,4.2 L22.785,4.2 C18.5945489,4.2 14.7526847,6.53113725 12.8180055,10.247462 L4.33767917,26.5496171 C4.24711487,26.7228441 4.2,26.9155019 4.2,27.115 L4.2,36.3016667 C4.2,42.505202 9.22813131,47.5333333 15.4316667,47.5333333 L57.135,47.5333333 C63.3385354,47.5333333 68.3666667,42.505202 68.3666667,36.3016667 L68.3666667,27.2483333 Z M72.5666667,27.2483333 L72.5666667,36.3016667 C72.5666667,44.824798 65.6581313,51.7333333 57.135,51.7333333 L15.4316667,51.7333333 C6.90853536,51.7333333 -2.84217094e-14,44.824798 -2.84217094e-14,36.3016667 L-2.84217094e-14,27.115 C-2.84217094e-14,26.2392671 0.210423755,25.3788214 0.613661128,24.607538 L9.09229426,8.30862857 C11.7504846,3.2025112 17.0284291,0 22.785,0 L48.045,0 C53.3867182,0 58.3497936,2.76220673 61.1615403,7.30188581 L71.7528236,24.387292 C72.2845977,25.2473551 72.5666667,26.2380876 72.5666667,27.2483333 Z" id="Stroke-1" fill="url(#linearGradient-8)"></path>
</g>
</g>
<path d="M61.553,36.5936111 C62.712798,36.5936111 63.653,37.5338131 63.653,38.6936111 C63.653,39.8534091 62.712798,40.7936111 61.553,40.7936111 L49.3496667,40.7936111 C48.1898687,40.7936111 47.2496667,39.8534091 47.2496667,38.6936111 C47.2496667,37.5338131 48.1898687,36.5936111 49.3496667,36.5936111 L61.553,36.5936111 Z" id="Stroke-5" fill="url(#linearGradient-9)"></path>
</g>
<path d="M69.44417,8.21779453 L69.44417,8.21779453 C70.2538459,7.26705512 71.6659377,7.11616 72.6582279,7.87434261 L78.4,12.2614787 L88.9509915,1.59935829 C89.8468741,0.694039749 91.2857828,0.622683594 92.26684,1.43492382 L92.26684,1.43492382 C93.1329574,2.1520027 93.2537769,3.43543684 92.536698,4.30155419 C92.4934859,4.35374756 92.4476951,4.40375115 92.3994967,4.45137826 L78.68834,18 L69.7763018,11.2574595 C68.850355,10.5569201 68.6676262,9.23839261 69.3681655,8.31244584 C69.392584,8.28017041 69.4179294,8.24860682 69.44417,8.21779453 Z" id="Path" fill="#5FC000" fill-rule="nonzero"></path>
</g>
</g>
</svg>
<div class="success-container__animation-container">
<h1 class="success-container__title">Connected</h1>
<h3 class="success-container__info">Your Node found and it is connected to the network</h3>
</div>
</div>
</template>
<script lang="ts">
import { Component, Vue } from 'vue-property-decorator';
@Component({
mounted() {
setTimeout(() => {
(document as any).querySelector('.success-container').classList.add('active');
}, 500);
}
})
export default class Success extends Vue {}
</script>
<style scoped lang="scss">
.success-container {
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
&__title {
width: 278px;
font-family: 'font_bold';
font-size: 48px;
font-style: normal;
font-stretch: normal;
line-height: 57px;
letter-spacing: normal;
text-align: center;
color: #fefeff;
margin-block-start: 0em;
margin-block-end: 0em;
}
&__info {
margin-top: 24px;
width: 446px;
font-family: 'font_regular';
font-size: 16px;
font-style: normal;
font-stretch: normal;
line-height: 23px;
letter-spacing: normal;
text-align: center;
color: #696c77;
margin-block-start: 1.5em;
margin-block-end: 0em;
}
&__animation-container {
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
visibility: hidden;
position: relative;
bottom: -50px;
opacity: 0;
-webkit-transition: all 0.5s linear;
-moz-transition: all 0.5s linear;
-o-transition: all 0.5s linear;
transition: all 0.5s linear;
}
svg {
display: block;
position: relative;
top: 50px;
transition: all 0.5s linear;
}
}
.success-container.active {
svg {
top: 0;
}
.success-container__animation-container {
margin-bottom: 0;
bottom: 0;
visibility: visible;
opacity: 1;
}
}
</style>

View File

@ -1,38 +0,0 @@
{
"compilerOptions": {
"target": "esnext",
"module": "esnext",
"strict": true,
"noImplicitAny": false,
"jsx": "preserve",
"importHelpers": true,
"moduleResolution": "node",
"experimentalDecorators": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"sourceMap": true,
"baseUrl": ".",
"types": [
"webpack-env"
],
"paths": {
"@/*": [
"src/*"
]
},
"lib": [
"esnext",
"dom",
"dom.iterable",
"scripthost"
]
},
"include": [
"src/**/*.ts",
"src/**/*.tsx",
"src/**/*.vue"
],
"exclude": [
"node_modules"
]
}

View File

@ -1,69 +0,0 @@
{
"defaultSeverity": "warning",
"rulesDirectory": [
"tslint-consistent-codestyle"
],
"linterOptions": {
"exclude": [
"node_modules/**"
]
},
"rules": {
// Enforces vertical alignment.
"align": [true, "parameters", "statements"],
// Enforces use of T[] if T is a simple type.
"array-type": [true, "array-simple"],
// Enforces PascalCased class and interface names.
"class-name": true,
// Enforces formatting rules for single-line comments.
"comment-format": [true, "check-space"],
"quotemark": [true, "single", "avoid-escape"],
// Ensures the file ends with a newline.
"eofline": true,
"indent": [true, "spaces", 4],
// Ensures proper spacing between import statement keywords.
"import-spacing": true,
"interface-name": false,
"ordered-imports": false,
// Enforces consistent semicolon usage at the end of every statement.
"semicolon": [true, "always"],
// Enforces braces for if/for/do/while statements.
"curly": [true, "ignore-same-line"],
// Enforces blank line before return when not the only line in the block.
"newline-before-return": true,
// Disallows multiple variable definitions in the same declaration statement.(Exception for loops)
"one-variable-per-declaration": [true, "ignore-for-loop"],
"object-literal-sort-keys": false,
"whitespace": [
true,
"check-branch", // checks branching statements (if/else/for/while) are followed by whitespace.
"check-decl", // checks that variable declarations have whitespace around the equals token.
"check-operator", // checks for whitespace around operator tokens.
"check-module", // checks for whitespace in import & export statements.
"check-separator", // checks for whitespace after separator tokens (,/;).
"check-type-operator", // checks for whitespace between type operators | and &.
"check-preblock" // checks for whitespace before the opening brace of a block.
],
// Recommends to use an early exit instead of a long if block.
"early-exit": true,
// Bans the use of specified console methods.
"no-console": [true, "log"],
"no-default-export": false,
// Ban the use of this in static methods.
"no-static-this": true,
// Warns if super() appears twice in a constructor.
"no-duplicate-super": true,
// Disallows any type of assignment in conditionals.
"no-conditional-assignment": true,
// Prevents duplicate cases in switch statements.
"no-duplicate-switch-case": true,
// Disallows empty blocks.
"no-empty": true,
// Disallows two or more blank lines in a row.
"no-consecutive-blank-lines": [true, 2],
// Warns on use of ${ in non-template strings.
"no-invalid-template-strings": true,
// Disallows using the this keyword outside of classes.
"no-invalid-this": true
}
}

View File

@ -1,13 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
module.exports = {
chainWebpack: config => {
config
.plugin('html')
.tap(args => {
args[0].template = './index.html'
return args
})
}
};

View File

@ -1,86 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
var path = require('path');
var webpack = require('webpack');
const VueLoaderPlugin = require('vue-loader/lib/plugin');
const HtmlWebpackPlugin = require('html-webpack-plugin');
const clientBundleOutputDir = './dist';
module.exports = {
mode: 'development',
entry: './src/main.ts',
output: {
path: path.resolve(__dirname, clientBundleOutputDir),
publicPath: '/static/dist/',
filename: 'build.js'
},
devServer: {
contentBase: clientBundleOutputDir
},
plugins: [
new VueLoaderPlugin(),
new HtmlWebpackPlugin({
vue: true,
template: './index.html',
filename: path.resolve(__dirname, './dist/public', 'index.html')
}),
],
module: {
rules: [
{
test: /\.vue$/,
loader: 'vue-loader',
options: {
loaders: {
// Since sass-loader (weirdly) has SCSS as its default parse mode, we map
// the "scss" and "sass" values for the lang attribute to the right configs here.
// other preprocessors should work out of the box, no loader config like this necessary.
ts:'ts-loader!tslint-loader',
'scss': 'vue-style-loader!css-loader!sass-loader',
'sass': 'vue-style-loader!css-loader!sass-loader?indentedSyntax',
}
// other vue-loader options go here
}
},
{
test: /\.tsx?$/,
loader: 'ts-loader',
exclude: /node_modules/,
options: {
appendTsSuffixTo: [/\.vue$/],
}
},
{
test: /\.(png|jpg|gif|svg)$/,
loader: 'file-loader',
options: {
name: 'images/[name].[ext]'
}
},
{
test: /\.scss$/,
use: [
'vue-style-loader',
'css-loader',
'sass-loader'
]
},
{
test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/,
loader: 'url-loader',
options: {
limit: 10000,
name: 'fonts/[name].[ext]'
}
}
]
},
resolve: {
extensions: ['.ts', '.tsx', '.js', '.vue', '.json'],
alias: {
'vue$': 'vue/dist/vue.esm.js',
'@': path.resolve('src')
}
}
}