2018-06-22 14:33:57 +01:00
|
|
|
// Copyright (C) 2018 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
2018-10-12 09:52:32 +01:00
|
|
|
// See LICENSE for copying information.
|
2018-06-22 14:33:57 +01:00
|
|
|
|
|
|
|
package kademlia
|
|
|
|
|
|
|
|
import (
|
2018-11-29 18:39:27 +00:00
|
|
|
"bytes"
|
2018-06-22 14:33:57 +01:00
|
|
|
"context"
|
2018-10-26 15:07:02 +01:00
|
|
|
"io/ioutil"
|
2018-12-20 21:45:06 +00:00
|
|
|
"math/rand"
|
2018-10-08 16:09:37 +01:00
|
|
|
"net"
|
2018-10-08 21:37:52 +01:00
|
|
|
"os"
|
2018-10-26 15:07:02 +01:00
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
2018-11-19 15:07:24 +00:00
|
|
|
"sync/atomic"
|
2018-06-22 14:33:57 +01:00
|
|
|
"testing"
|
2018-12-20 21:45:06 +00:00
|
|
|
"time"
|
2018-06-22 14:33:57 +01:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
2018-12-18 15:13:32 +00:00
|
|
|
"go.uber.org/zap/zaptest"
|
2018-10-08 16:09:37 +01:00
|
|
|
"google.golang.org/grpc"
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
"storj.io/storj/internal/testcontext"
|
2019-01-02 17:39:17 +00:00
|
|
|
"storj.io/storj/internal/testidentity"
|
2018-11-29 18:39:27 +00:00
|
|
|
"storj.io/storj/internal/teststorj"
|
2019-01-02 17:39:17 +00:00
|
|
|
"storj.io/storj/pkg/identity"
|
2018-10-08 16:09:37 +01:00
|
|
|
"storj.io/storj/pkg/node"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/provider"
|
2018-11-29 18:39:27 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2018-06-22 14:33:57 +01:00
|
|
|
)
|
|
|
|
|
2018-12-11 18:40:54 +00:00
|
|
|
const (
|
|
|
|
defaultAlpha = 5
|
|
|
|
)
|
|
|
|
|
2018-10-08 16:09:37 +01:00
|
|
|
func TestNewKademlia(t *testing.T) {
|
2018-12-20 21:45:06 +00:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
rootdir, cleanup := mktempdir(t, "kademlia")
|
|
|
|
defer cleanup()
|
2018-10-08 16:09:37 +01:00
|
|
|
cases := []struct {
|
2019-01-02 17:39:17 +00:00
|
|
|
id *identity.FullIdentity
|
2018-10-08 16:09:37 +01:00
|
|
|
bn []pb.Node
|
|
|
|
addr string
|
|
|
|
expectedErr error
|
|
|
|
}{
|
|
|
|
{
|
2019-01-02 17:39:17 +00:00
|
|
|
id: func() *identity.FullIdentity {
|
|
|
|
id, err := testidentity.NewTestIdentity(ctx)
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.NoError(t, err)
|
2019-01-02 10:57:06 +00:00
|
|
|
return id
|
2018-10-08 16:09:37 +01:00
|
|
|
}(),
|
2018-11-29 18:39:27 +00:00
|
|
|
bn: []pb.Node{{Id: teststorj.NodeIDFromString("foo")}},
|
2018-10-26 15:07:02 +01:00
|
|
|
addr: "127.0.0.1:8080",
|
2018-10-08 21:37:52 +01:00
|
|
|
},
|
|
|
|
{
|
2019-01-02 10:57:06 +00:00
|
|
|
id: func() *provider.FullIdentity {
|
2019-01-02 17:39:17 +00:00
|
|
|
id, err := testidentity.NewTestIdentity(ctx)
|
2018-10-08 21:37:52 +01:00
|
|
|
assert.NoError(t, err)
|
2019-01-02 10:57:06 +00:00
|
|
|
return id
|
2018-10-08 21:37:52 +01:00
|
|
|
}(),
|
2018-11-29 18:39:27 +00:00
|
|
|
bn: []pb.Node{{Id: teststorj.NodeIDFromString("foo")}},
|
2018-10-26 15:07:02 +01:00
|
|
|
addr: "127.0.0.1:8080",
|
2018-10-08 16:09:37 +01:00
|
|
|
},
|
|
|
|
}
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
for i, v := range cases {
|
|
|
|
dir := filepath.Join(rootdir, strconv.Itoa(i))
|
|
|
|
|
2019-01-02 10:57:06 +00:00
|
|
|
kad, err := NewKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, v.bn, v.addr, nil, v.id, dir, defaultAlpha)
|
2018-10-26 15:07:02 +01:00
|
|
|
assert.NoError(t, err)
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.Equal(t, v.expectedErr, err)
|
2018-10-26 15:07:02 +01:00
|
|
|
assert.Equal(t, kad.bootstrapNodes, v.bn)
|
|
|
|
assert.NotNil(t, kad.nodeClient)
|
|
|
|
assert.NotNil(t, kad.routingTable)
|
|
|
|
assert.NoError(t, kad.Disconnect())
|
2018-06-22 14:33:57 +01:00
|
|
|
}
|
2018-10-26 15:07:02 +01:00
|
|
|
|
2018-06-22 14:33:57 +01:00
|
|
|
}
|
|
|
|
|
2018-11-01 17:03:46 +00:00
|
|
|
func TestPeerDiscovery(t *testing.T) {
|
2018-12-20 21:45:06 +00:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
dir, cleanup := mktempdir(t, "kademlia")
|
|
|
|
defer cleanup()
|
2018-11-09 22:08:33 +00:00
|
|
|
// make new identity
|
2018-12-20 21:45:06 +00:00
|
|
|
bootServer, mockBootServer, bootID, bootAddress := startTestNodeServer(ctx)
|
2018-11-09 22:08:33 +00:00
|
|
|
defer bootServer.Stop()
|
2018-12-20 21:45:06 +00:00
|
|
|
testServer, _, testID, testAddress := startTestNodeServer(ctx)
|
2018-11-09 22:08:33 +00:00
|
|
|
defer testServer.Stop()
|
2018-12-20 21:45:06 +00:00
|
|
|
targetServer, _, targetID, targetAddress := startTestNodeServer(ctx)
|
2018-11-09 22:08:33 +00:00
|
|
|
defer targetServer.Stop()
|
|
|
|
|
2019-01-02 18:47:34 +00:00
|
|
|
bootstrapNodes := []pb.Node{{Id: bootID.ID, Address: &pb.NodeAddress{Address: bootAddress}, Type: pb.NodeType_STORAGE}}
|
2018-11-21 15:07:18 +00:00
|
|
|
metadata := &pb.NodeMetadata{
|
|
|
|
Email: "foo@bar.com",
|
2019-01-02 10:31:49 +00:00
|
|
|
Wallet: "OperatorWallet",
|
2018-11-21 15:07:18 +00:00
|
|
|
}
|
2019-01-02 10:57:06 +00:00
|
|
|
k, err := NewKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrapNodes, testAddress, metadata, testID, dir, defaultAlpha)
|
2018-11-21 15:07:18 +00:00
|
|
|
assert.NoError(t, err)
|
2018-12-20 21:45:06 +00:00
|
|
|
rt, err := k.GetRoutingTable(ctx)
|
2018-11-09 22:08:33 +00:00
|
|
|
assert.NoError(t, err)
|
2018-11-21 15:07:18 +00:00
|
|
|
assert.Equal(t, rt.Local().Metadata.Email, "foo@bar.com")
|
2019-01-02 10:31:49 +00:00
|
|
|
assert.Equal(t, rt.Local().Metadata.Wallet, "OperatorWallet")
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
defer func() {
|
|
|
|
assert.NoError(t, k.Disconnect())
|
|
|
|
}()
|
|
|
|
|
2018-06-22 14:33:57 +01:00
|
|
|
cases := []struct {
|
2018-11-29 18:39:27 +00:00
|
|
|
target storj.NodeID
|
2018-10-08 16:09:37 +01:00
|
|
|
expected *pb.Node
|
|
|
|
expectedErr error
|
2018-06-22 14:33:57 +01:00
|
|
|
}{
|
2018-11-29 18:39:27 +00:00
|
|
|
{target: func() storj.NodeID {
|
2019-01-02 18:47:34 +00:00
|
|
|
mockBootServer.returnValue = []*pb.Node{{Id: targetID.ID, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: targetAddress}}}
|
2018-11-29 18:39:27 +00:00
|
|
|
return targetID.ID
|
2018-10-26 15:07:02 +01:00
|
|
|
}(),
|
2018-10-08 16:09:37 +01:00
|
|
|
expected: &pb.Node{},
|
|
|
|
expectedErr: nil,
|
|
|
|
},
|
2018-11-29 18:39:27 +00:00
|
|
|
{target: bootID.ID,
|
2018-10-08 16:09:37 +01:00
|
|
|
expected: nil,
|
Testcoverage kademlia (#154)
* Unit test covarege increased for kademlia pkg
go style formatting added
Removed DHT param from newTestKademlia method, added comments for Bucket methods that informs that these tests will need to be updated
unnecessary comment deleted from newTestKademlia
Adjust Segment Store to the updated interface (#160)
* Adjust Segment Store to the updated interface
* Move /pkg/storage/segment to /pkg/storage/segments
* Fix overlay client tests
* Revert changes in NewOverlayClient return value
* Rename `rem` to `seg`
* Implement Meta()
captplanet (#159)
* captplanet
I kind of went overboard this weekend.
The major goal of this changeset is to provide an environment
for local development where all of the various services can
be easily run together. Developing on Storj v3 should be as
easy as running a setup command and a run command!
To do this, this changeset introduces a new tool called
captplanet, which combines the powers of the Overlay Cache,
the PointerDB, the PieceStore, Kademlia, the Minio Gateway,
etc.
Running 40 farmers and a heavy client inside the same process
forced a rethinking of the "services" that we had. To
avoid confusion by reusing prior terms, this changeset
introduces two new types: Providers and Responsibilities.
I wanted to avoid as many merge conflicts as possible, so
I left the existing Services and code for now, but if people
like this route we can clean up the duplication.
A Responsibility is a collection of gRPC methods and
corresponding state. The following systems are examples of
Responsibilities:
* Kademlia
* OverlayCache
* PointerDB
* StatDB
* PieceStore
* etc.
A Provider is a collection of Responsibilities that
share an Identity, such as:
* The heavy client
* The farmer
* The gateway
An Identity is a public/private key pair, a node id, etc.
Farmers all need different Identities, so captplanet
needs to support running multiple concurrent Providers
with different Identities.
Each Responsibility and Provider should allow for configuration
of multiple copies on its own so creating Responsibilities and
Providers use a new workflow.
To make a Responsibility, one should create a "config"
struct, such as:
```
type Config struct {
RepairThreshold int `help:"If redundancy falls below this number of
pieces, repair is triggered" default:"30"`
SuccessThreshold int `help:"If redundancy is above this number then
no additional uploads are needed" default:"40"`
}
```
To use "config" structs, this changeset introduces another
new library called 'cfgstruct', which allows for the configuration
of arbitrary structs through flagsets, and thus through cobra and
viper.
cfgstruct relies on Go's "struct tags" feature to document
help information and default values. Config structs can be
configured via cfgstruct.Bind for binding the struct to a flagset.
Because this configuration system makes setup and configuration
easier *in general*, additional commands are provided that allow
for easy standup of separate Providers. Please make sure to
check out:
* cmd/captplanet/farmer/main.go (a new farmer binary)
* cmd/captplanet/hc/main.go (a new heavy client binary)
* cmd/captplanet/gw/main.go (a new minio gateway binary)
Usage:
```
$ go install -v storj.io/storj/cmd/captplanet
$ captplanet setup
$ captplanet run
```
Configuration is placed by default in `~/.storj/capt/`
Other changes:
* introduces new config structs for currently existing
Responsibilities that conform to the new Responsibility
interface. Please see the `pkg/*/config.go` files for
examples.
* integrates the PointerDB API key with other global
configuration via flags, instead of through environment
variables through viper like it's been doing. (ultimately
this should also change to use the PointerDB config
struct but this is an okay shortterm solution).
* changes the Overlay cache to use a URL for database
configuration instead of separate redis and bolt config
settings.
* stubs out some peer identity skeleton code (but not the
meat).
* Fixes the SegmentStore to use the overlay client and
pointerdb clients instead of gRPC client code directly
* Leaves a very clear spot where we need to tie the object to
stream to segment store together. There's sort of a "golden
spike" opportunity to connect all the train tracks together
at the bottom of pkg/miniogw/config.go, labeled with a
bunch of TODOs.
Future stuff:
* I now prefer this design over the original
pkg/process.Service thing I had been pushing before (sorry!)
* The experience of trying to have multiple farmers
configurable concurrently led me to prefer config structs
over global flags (I finally came around) or using viper
directly. I think global flags are okay sometimes but in
general going forward we should try and get all relevant
config into config structs.
* If you all like this direction, I think we can go delete my
old Service interfaces and a bunch of flags and clean up a
bunch of stuff.
* If you don't like this direction, it's no sweat at all, and
despite how much code there is here I'm not very tied to any
of this! Considering a lot of this was written between midnight
and 6 am, it might not be any good!
* bind tests
Add files for testing builds in docker (#161)
* Add files for testing builds in docker
* Make tests check for redis running before trying to start redis-server, which may not exist.
* Clean redis server before any tests use it.
* Add more debugging for travis
* Explicitly requiring redis for travis
pkg/provider: with pkg/provider merged, make a single heavy client binary, gateway binary, and deprecate old services (#165)
* pkg/provider: with pkg/provider merged, make a single heavy client binary and deprecate old services
* add setup to gw binary too
* captplanet: output what addresses everything is listening on
* revert peertls/io_util changes
* define config flag across all commands
* use trimsuffix
fix docker makefile (#170)
* fix makefile
protos: update protobufs with go generate (#169)
the import for timestamp and duration should use
the path provided by a standard protocol buffer library
installation
Refactor List in PointerDB (#163)
* Refactor List in Pointer DB
* Fix pointerdb-client example
* Fix issue in Path type related to empty paths
* Test for the PointerDB service with some fixes
* Fixed debug message in example: trancated --> more
* GoDoc comments for unexported methods
* TODO comment to check if Put is overwriting
* Log warning if protobuf timestamp cannot be converted
* TODO comment to make ListPageLimit configurable
* Rename 'segment' package to 'segments' to reflect folder name
Minio integration with Object store (#156)
* initial WIP integration with Object store
* List WIP
* minio listobject function changes complete
* Code review changes and work in progress for the mock objectstore unit testing cases
* Warning fix redeclaration of err
* Warning fix redeclaration of err
* code review comments & unit testing inprogress
* fix compilation bug
* Fixed code review comments & added GetObject Mock test case
* rearraged the mock test file and gateway storj test file in to the proper directory
* added the missing file
* code clean up
* fix lint error on the mock generated code
* modified per code review comments
* added the PutObject mock test case
* added the GetObjectInfo mock test case
* added listobject mock test case
* fixed package from storj to miniogw
* resolved the gateway-storj.go initialization merge conflict
update readme (#174)
added assertion for unused errors (#152)
merging this PR to avoid future issues
updating github user to personal account (#171)
Test coverage ranger (#168)
* Fixed go panic for corner case
* Initial test coverage for ranger pkg
streamstore: add passthrough implementation (#176)
this doesn't implement streamstore, this just allows us to try and
get the june demo working again in the meantime
StatDB (#144)
* add statdb proto and example client
* server logic
* update readme
* remove boltdb from service.go
* sqlite3
* add statdb server executable file
* create statdb node table if it does not exist already
* get UpdateBatch working
* update based on jt review
* remove some commented lines
* fix linting issues
* reformat
* apiKey -> APIKey
* update statdb client apiKey->APIKey
Update README.md
Update README.md
overlay: correct dockerfile db (#179)
cmd/hc, cmd/gw, cmd/captplanet: simplify setup/run commands (#178)
also allows much more customization of services within captain planet,
such as reconfiguring the overlay service to use redis
pkg/process: don't require json formatting (#177)
Cleanup metadata across layers (#180)
* Cleanup metadata across layers
* Fix pointer db tests
Kademlia Routing Table (#164)
* adds comment
* runs deps
* creates boltdb kademlia routing table
* protobuf updates
* adds reverselist to mockkeyvaluestore interface
* xor wip
* xor wip
* fixes xor sort
* runs go fmt
* fixes
* goimports again
* trying to fix travis tests
* fixes mock tests
Ranger refactoring (#158)
* Fixed go panic for corner case
* Cosmetic changes, and small error fixes
miniogw: log all errors (#182)
* miniogw: log all errors
* tests added
* doc comment to satisfy linter
* fix test failure
Jennifer added to CLA list
* Temporary fix for storage/redis list method test
2018-08-02 19:36:57 +01:00
|
|
|
expectedErr: nil,
|
2018-06-22 14:33:57 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, v := range cases {
|
2018-12-20 21:45:06 +00:00
|
|
|
_, err := k.lookup(ctx, v.target, true)
|
2018-06-22 14:33:57 +01:00
|
|
|
assert.Equal(t, v.expectedErr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-08 16:09:37 +01:00
|
|
|
func TestBootstrap(t *testing.T) {
|
2018-12-20 21:45:06 +00:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
bn, s, clean := testNode(t, []pb.Node{})
|
|
|
|
defer clean()
|
2018-10-08 16:09:37 +01:00
|
|
|
defer s.Stop()
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-10-26 17:54:00 +01:00
|
|
|
n1, s1, clean1 := testNode(t, []pb.Node{bn.routingTable.self})
|
2018-10-26 15:07:02 +01:00
|
|
|
defer clean1()
|
2018-10-08 16:09:37 +01:00
|
|
|
defer s1.Stop()
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
err := n1.Bootstrap(ctx)
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.NoError(t, err)
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-10-26 17:54:00 +01:00
|
|
|
n2, s2, clean2 := testNode(t, []pb.Node{bn.routingTable.self})
|
2018-10-26 15:07:02 +01:00
|
|
|
defer clean2()
|
2018-10-08 16:09:37 +01:00
|
|
|
defer s2.Stop()
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
err = n2.Bootstrap(ctx)
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.NoError(t, err)
|
2018-10-11 22:41:58 +01:00
|
|
|
|
2018-10-08 16:09:37 +01:00
|
|
|
nodeIDs, err := n2.routingTable.nodeBucketDB.List(nil, 0)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, nodeIDs, 3)
|
2018-06-22 14:33:57 +01:00
|
|
|
}
|
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
func testNode(t *testing.T, bn []pb.Node) (*Kademlia, *grpc.Server, func()) {
|
2019-01-02 17:39:17 +00:00
|
|
|
ctx := testcontext.New(t)
|
2018-10-08 16:09:37 +01:00
|
|
|
// new address
|
|
|
|
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
2018-06-22 14:33:57 +01:00
|
|
|
assert.NoError(t, err)
|
2018-10-09 17:47:05 +01:00
|
|
|
// new config
|
2018-10-11 22:41:58 +01:00
|
|
|
// new identity
|
2019-01-02 17:39:17 +00:00
|
|
|
fid, err := testidentity.NewTestIdentity(ctx)
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
// new kademlia
|
2018-10-26 15:07:02 +01:00
|
|
|
dir, cleanup := mktempdir(t, "kademlia")
|
|
|
|
|
2018-12-18 15:13:32 +00:00
|
|
|
logger := zaptest.NewLogger(t)
|
2019-01-02 10:57:06 +00:00
|
|
|
k, err := NewKademlia(logger, pb.NodeType_STORAGE, bn, lis.Addr().String(), nil, fid, dir, defaultAlpha)
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.NoError(t, err)
|
2018-12-18 15:13:32 +00:00
|
|
|
s := node.NewServer(logger, k)
|
2018-10-11 22:41:58 +01:00
|
|
|
// new ident opts
|
|
|
|
identOpt, err := fid.ServerOption()
|
2018-10-08 16:09:37 +01:00
|
|
|
assert.NoError(t, err)
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-10-08 16:09:37 +01:00
|
|
|
grpcServer := grpc.NewServer(identOpt)
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-10-08 16:09:37 +01:00
|
|
|
pb.RegisterNodesServer(grpcServer, s)
|
2018-10-26 15:07:02 +01:00
|
|
|
go func() { assert.NoError(t, grpcServer.Serve(lis)) }()
|
2018-10-08 16:09:37 +01:00
|
|
|
|
2018-10-26 15:07:02 +01:00
|
|
|
return k, grpcServer, func() {
|
|
|
|
defer cleanup()
|
|
|
|
assert.NoError(t, k.Disconnect())
|
|
|
|
}
|
2018-12-20 21:45:06 +00:00
|
|
|
}
|
2018-06-22 14:33:57 +01:00
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
func TestRefresh(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
k, s, clean := testNode(t, []pb.Node{})
|
|
|
|
defer clean()
|
|
|
|
defer s.Stop()
|
|
|
|
//turn back time for only bucket
|
|
|
|
rt := k.routingTable
|
|
|
|
now := time.Now().UTC()
|
2019-01-02 18:57:11 +00:00
|
|
|
bID := firstBucketID //always exists
|
2018-12-20 21:45:06 +00:00
|
|
|
err := rt.SetBucketTimestamp(bID[:], now.Add(-2*time.Hour))
|
|
|
|
assert.NoError(t, err)
|
|
|
|
//refresh should call FindNode, updating the time
|
|
|
|
err = k.refresh(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
ts1, err := rt.GetBucketTimestamp(bID[:])
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.True(t, now.Add(-5*time.Minute).Before(ts1))
|
|
|
|
//refresh should not call FindNode, leaving the previous time
|
|
|
|
err = k.refresh(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
ts2, err := rt.GetBucketTimestamp(bID[:])
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.True(t, ts1.Equal(ts2))
|
2018-06-22 14:33:57 +01:00
|
|
|
}
|
2018-10-16 16:22:31 +01:00
|
|
|
|
|
|
|
func TestGetNodes(t *testing.T) {
|
2018-12-20 21:45:06 +00:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2018-11-29 18:39:27 +00:00
|
|
|
var (
|
|
|
|
nodeIDA = teststorj.NodeIDFromString("AAAAA")
|
|
|
|
nodeIDB = teststorj.NodeIDFromString("BBBBB")
|
|
|
|
nodeIDC = teststorj.NodeIDFromString("CCCCC")
|
|
|
|
nodeIDD = teststorj.NodeIDFromString("DDDDD")
|
|
|
|
)
|
|
|
|
|
2018-10-16 16:22:31 +01:00
|
|
|
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
|
|
|
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
srv, _ := newTestServer(ctx, []*pb.Node{{Id: teststorj.NodeIDFromString("foo")}})
|
2018-10-26 15:07:02 +01:00
|
|
|
go func() { assert.NoError(t, srv.Serve(lis)) }()
|
2018-10-16 16:22:31 +01:00
|
|
|
defer srv.Stop()
|
|
|
|
|
|
|
|
// make new identity
|
2019-01-02 17:39:17 +00:00
|
|
|
fid, err := testidentity.NewTestIdentity(ctx)
|
2018-10-16 16:22:31 +01:00
|
|
|
assert.NoError(t, err)
|
2019-01-02 17:39:17 +00:00
|
|
|
fid2, err := testidentity.NewTestIdentity(ctx)
|
2018-10-16 16:22:31 +01:00
|
|
|
assert.NoError(t, err)
|
2018-11-29 18:39:27 +00:00
|
|
|
fid.ID = nodeIDA
|
|
|
|
fid2.ID = nodeIDB
|
2018-10-16 16:22:31 +01:00
|
|
|
// create two new unique identities
|
2018-11-29 18:39:27 +00:00
|
|
|
assert.NotEqual(t, fid.ID, fid2.ID)
|
2018-10-26 15:07:02 +01:00
|
|
|
dir, cleanup := mktempdir(t, "kademlia")
|
|
|
|
defer cleanup()
|
2019-01-02 10:57:06 +00:00
|
|
|
k, err := NewKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, []pb.Node{{Id: fid2.ID, Address: &pb.NodeAddress{Address: lis.Addr().String()}}}, lis.Addr().String(), nil, fid, dir, defaultAlpha)
|
2018-10-16 16:22:31 +01:00
|
|
|
assert.NoError(t, err)
|
2018-10-26 15:07:02 +01:00
|
|
|
defer func() {
|
|
|
|
assert.NoError(t, k.Disconnect())
|
|
|
|
}()
|
|
|
|
|
2018-10-16 16:22:31 +01:00
|
|
|
// add nodes
|
2018-11-29 18:39:27 +00:00
|
|
|
ids := storj.NodeIDList{nodeIDA, nodeIDB, nodeIDC, nodeIDD}
|
2018-10-16 16:22:31 +01:00
|
|
|
bw := []int64{1, 2, 3, 4}
|
|
|
|
disk := []int64{4, 3, 2, 1}
|
|
|
|
nodes := []*pb.Node{}
|
|
|
|
for i, v := range ids {
|
|
|
|
n := &pb.Node{
|
|
|
|
Id: v,
|
|
|
|
Restrictions: &pb.NodeRestrictions{
|
|
|
|
FreeBandwidth: bw[i],
|
|
|
|
FreeDisk: disk[i],
|
|
|
|
},
|
2019-01-02 18:47:34 +00:00
|
|
|
Type: pb.NodeType_STORAGE,
|
2018-10-16 16:22:31 +01:00
|
|
|
}
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
err = k.routingTable.ConnectionSuccess(n)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
testID string
|
2018-11-29 18:39:27 +00:00
|
|
|
start storj.NodeID
|
2018-10-16 16:22:31 +01:00
|
|
|
limit int
|
|
|
|
restrictions []pb.Restriction
|
|
|
|
expected []*pb.Node
|
|
|
|
}{
|
|
|
|
{testID: "one",
|
2018-11-29 18:39:27 +00:00
|
|
|
start: nodeIDB,
|
2018-10-16 16:22:31 +01:00
|
|
|
limit: 2,
|
|
|
|
restrictions: []pb.Restriction{
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_GT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_BANDWIDTH,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: nodes[2:],
|
|
|
|
},
|
|
|
|
{testID: "two",
|
2018-11-29 18:39:27 +00:00
|
|
|
start: nodeIDA,
|
2018-10-16 16:22:31 +01:00
|
|
|
limit: 3,
|
|
|
|
restrictions: []pb.Restriction{
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_GT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_BANDWIDTH,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_LT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_DISK,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: nodes[3:],
|
|
|
|
},
|
|
|
|
{testID: "three",
|
2018-11-29 18:39:27 +00:00
|
|
|
start: nodeIDA,
|
2018-10-16 16:22:31 +01:00
|
|
|
limit: 4,
|
|
|
|
restrictions: []pb.Restriction{},
|
|
|
|
expected: nodes,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.testID, func(t *testing.T) {
|
2018-12-20 21:45:06 +00:00
|
|
|
ns, err := k.GetNodes(ctx, c.start, c.limit, c.restrictions...)
|
2018-10-16 16:22:31 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, len(c.expected), len(ns))
|
|
|
|
for i, n := range ns {
|
2018-11-29 18:39:27 +00:00
|
|
|
assert.True(t, bytes.Equal(c.expected[i].Id.Bytes(), n.Id.Bytes()))
|
2018-10-16 16:22:31 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMeetsRestrictions(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
testID string
|
|
|
|
r []pb.Restriction
|
|
|
|
n pb.Node
|
|
|
|
expect bool
|
|
|
|
}{
|
|
|
|
{testID: "pass one",
|
|
|
|
r: []pb.Restriction{
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_EQ,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_BANDWIDTH,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
n: pb.Node{
|
|
|
|
Restrictions: &pb.NodeRestrictions{
|
|
|
|
FreeBandwidth: int64(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expect: true,
|
|
|
|
},
|
|
|
|
{testID: "pass multiple",
|
|
|
|
r: []pb.Restriction{
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_LTE,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_BANDWIDTH,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_GTE,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_DISK,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
n: pb.Node{
|
|
|
|
Restrictions: &pb.NodeRestrictions{
|
|
|
|
FreeBandwidth: int64(1),
|
|
|
|
FreeDisk: int64(3),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expect: true,
|
|
|
|
},
|
|
|
|
{testID: "fail one",
|
|
|
|
r: []pb.Restriction{
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_LT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_BANDWIDTH,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_GT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_DISK,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
n: pb.Node{
|
|
|
|
Restrictions: &pb.NodeRestrictions{
|
|
|
|
FreeBandwidth: int64(2),
|
|
|
|
FreeDisk: int64(3),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expect: false,
|
|
|
|
},
|
|
|
|
{testID: "fail multiple",
|
|
|
|
r: []pb.Restriction{
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_LT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_BANDWIDTH,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
2018-11-29 18:39:27 +00:00
|
|
|
{
|
2018-10-16 16:22:31 +01:00
|
|
|
Operator: pb.Restriction_GT,
|
2018-11-24 02:46:53 +00:00
|
|
|
Operand: pb.Restriction_FREE_DISK,
|
2018-10-16 16:22:31 +01:00
|
|
|
Value: int64(2),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
n: pb.Node{
|
|
|
|
Restrictions: &pb.NodeRestrictions{
|
|
|
|
FreeBandwidth: int64(2),
|
|
|
|
FreeDisk: int64(2),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expect: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.testID, func(t *testing.T) {
|
|
|
|
result := meetsRestrictions(c.r, c.n)
|
|
|
|
assert.Equal(t, c.expect, result)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2018-10-26 15:07:02 +01:00
|
|
|
|
|
|
|
func mktempdir(t *testing.T, dir string) (string, func()) {
|
|
|
|
rootdir, err := ioutil.TempDir("", dir)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
cleanup := func() {
|
|
|
|
assert.NoError(t, os.RemoveAll(rootdir))
|
|
|
|
}
|
|
|
|
return rootdir, cleanup
|
|
|
|
}
|
2018-11-09 22:08:33 +00:00
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
func startTestNodeServer(ctx context.Context) (*grpc.Server, *mockNodesServer, *provider.FullIdentity, string) {
|
2018-11-09 22:08:33 +00:00
|
|
|
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, ""
|
|
|
|
}
|
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
ca, err := testidentity.NewTestCA(ctx)
|
2018-11-09 22:08:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, ""
|
|
|
|
}
|
|
|
|
identity, err := ca.NewIdentity()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, ""
|
|
|
|
}
|
|
|
|
identOpt, err := identity.ServerOption()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, ""
|
|
|
|
}
|
|
|
|
grpcServer := grpc.NewServer(identOpt)
|
2018-11-19 15:07:24 +00:00
|
|
|
mn := &mockNodesServer{queryCalled: 0}
|
2018-11-09 22:08:33 +00:00
|
|
|
|
|
|
|
pb.RegisterNodesServer(grpcServer, mn)
|
|
|
|
go func() {
|
|
|
|
if err := grpcServer.Serve(lis); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return grpcServer, mn, identity, lis.Addr().String()
|
|
|
|
}
|
2018-11-19 15:07:24 +00:00
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
func newTestServer(ctx context.Context, nn []*pb.Node) (*grpc.Server, *mockNodesServer) {
|
|
|
|
|
|
|
|
ca, err := testidentity.NewTestCA(ctx)
|
2018-11-19 15:07:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
identity, err := ca.NewIdentity()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
identOpt, err := identity.ServerOption()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
grpcServer := grpc.NewServer(identOpt)
|
|
|
|
mn := &mockNodesServer{queryCalled: 0}
|
|
|
|
|
|
|
|
pb.RegisterNodesServer(grpcServer, mn)
|
|
|
|
|
|
|
|
return grpcServer, mn
|
|
|
|
}
|
|
|
|
|
2018-12-20 21:45:06 +00:00
|
|
|
// TestRandomIds makes sure finds a random node ID is within a range (start..end]
|
|
|
|
func TestRandomIds(t *testing.T) {
|
|
|
|
for x := 0; x < 1000; x++ {
|
|
|
|
var start, end bucketID
|
|
|
|
// many valid options
|
|
|
|
rand.Read(start[:])
|
|
|
|
rand.Read(end[:])
|
|
|
|
if bytes.Compare(start[:], end[:]) > 0 {
|
|
|
|
start, end = end, start
|
|
|
|
}
|
|
|
|
id, err := randomIDInRange(start, end)
|
|
|
|
assert.NoError(t, err, "Unexpected err in randomIDInRange")
|
|
|
|
assert.True(t, bytes.Compare(id[:], start[:]) > 0, "Random id was less than starting id")
|
|
|
|
assert.True(t, bytes.Compare(id[:], end[:]) <= 0, "Random id was greater than end id")
|
|
|
|
//invalid range
|
|
|
|
_, err = randomIDInRange(end, start)
|
|
|
|
assert.Error(t, err, "Missing expected err in invalid randomIDInRange")
|
|
|
|
//no valid options
|
|
|
|
end = start
|
|
|
|
_, err = randomIDInRange(start, end)
|
|
|
|
assert.Error(t, err, "Missing expected err in empty randomIDInRange")
|
|
|
|
// one valid option
|
|
|
|
if start[31] == 255 {
|
|
|
|
start[31] = 254
|
|
|
|
} else {
|
|
|
|
end[31] = start[31] + 1
|
|
|
|
}
|
|
|
|
id, err = randomIDInRange(start, end)
|
|
|
|
assert.NoError(t, err, "Unexpected err in randomIDInRange")
|
|
|
|
assert.True(t, bytes.Equal(id[:], end[:]), "Not-so-random id was incorrect")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-19 15:07:24 +00:00
|
|
|
type mockNodesServer struct {
|
|
|
|
queryCalled int32
|
|
|
|
pingCalled int32
|
|
|
|
returnValue []*pb.Node
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mn *mockNodesServer) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResponse, error) {
|
|
|
|
atomic.AddInt32(&mn.queryCalled, 1)
|
|
|
|
return &pb.QueryResponse{Response: mn.returnValue}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mn *mockNodesServer) Ping(ctx context.Context, req *pb.PingRequest) (*pb.PingResponse, error) {
|
|
|
|
atomic.AddInt32(&mn.pingCalled, 1)
|
|
|
|
return &pb.PingResponse{}, nil
|
|
|
|
}
|