storj/pkg/overlay/server_test.go

230 lines
5.9 KiB
Go
Raw Normal View History

2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay_test
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
2019-01-29 19:42:43 +00:00
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
func TestServer(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 4, 1)
if err != nil {
t.Fatal(err)
}
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
// we wait a second for all the nodes to complete bootstrapping off the satellite
time.Sleep(2 * time.Second)
satellite := planet.Satellites[0]
Satellite Peer (#1034) * add satellite peer * Add overlay * reorganize kademlia * add RunRefresh * add refresh to storagenode.Peer * add discovery * add agreements and metainfo * rename * add datarepair checker * add repair * add todo notes for audit * add testing interface * add into testplanet * fixes * fix compilation errors * fix compilation errors * make testplanet run * remove audit refrences * ensure that audit tests run * dev * checker tests compilable * fix discovery * fix compilation * fix * fix * dev * fix * disable auth * fixes * revert go.mod/sum * fix linter errors * fix * fix copyright * Add address param for SN dashboard (#1076) * Rename storj-sdk to storj-sim (#1078) * Storagenode logs and config improvements (#1075) * Add more info to SN logs * remove config-dir from user config * add output where config was stored * add message for successful connection * fix linter * remove storage.path from user config * resolve config path * move success message to info * log improvements * Remove captplanet (#1070) * pkg/server: include production cert (#1082) Change-Id: Ie8e6fe78550be83c3bd797db7a1e58d37c684792 * Generate Payments Report (#1079) * memory.Size: autoformat sizes based on value entropy (#1081) * Jj/bytes (#1085) * run tally and rollup * sets dev default tally and rollup intervals * nonessential storj-sim edits (#1086) * Closing context doesn't stop storage node (#1084) * Print when cancelled * Close properly * Don't log nil * Don't print error when closing dashboard * Fix panic in inspector if ping fails (#1088) * Consolidate identity management to identity cli commands (#1083) * Consolidate identity management: Move identity cretaion/signing out of storagenode setup command. * fixes * linters * Consolidate identity management: Move identity cretaion/signing out of storagenode setup command. * fixes * sava backups before saving signed certs * add "-prebuilt-test-cmds" test flag * linters * prepare cli tests for travis * linter fixes * more fixes * linter gods * sp/sdk/sim * remove ca.difficulty * remove unused difficulty * return setup to its rightful place * wip travis * Revert "wip travis" This reverts commit 56834849dcf066d3cc0a4f139033fc3f6d7188ca. * typo in travis.yaml * remove tests * remove more * make it only create one identity at a time for consistency * add config-dir for consitency * add identity creation to storj-sim * add flags * simplify * fix nolint and compile * prevent overwrite and pass difficulty, concurrency, and parent creds * goimports
2019-01-18 13:54:08 +00:00
server := satellite.Overlay.Endpoint
// TODO: handle cleanup
{ // Lookup
result, err := server.Lookup(ctx, &pb.LookupRequest{
NodeId: planet.StorageNodes[0].ID(),
})
require.NoError(t, err)
require.NotNil(t, result)
assert.Equal(t, result.Node.Address.Address, planet.StorageNodes[0].Addr())
}
{ // BulkLookup
result, err := server.BulkLookup(ctx, &pb.LookupRequests{
LookupRequest: []*pb.LookupRequest{
{NodeId: planet.StorageNodes[0].ID()},
{NodeId: planet.StorageNodes[1].ID()},
{NodeId: planet.StorageNodes[2].ID()},
},
})
require.NoError(t, err)
require.NotNil(t, result)
require.Len(t, result.LookupResponse, 3)
for i, resp := range result.LookupResponse {
if assert.NotNil(t, resp.Node) {
assert.Equal(t, resp.Node.Address.Address, planet.StorageNodes[i].Addr())
}
}
}
}
2019-01-29 19:42:43 +00:00
func TestNodeSelection(t *testing.T) {
2019-01-29 19:42:43 +00:00
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 10, 0)
require.NoError(t, err)
2019-01-29 19:42:43 +00:00
planet.Start(ctx)
defer ctx.Check(planet.Shutdown)
satellite := planet.Satellites[0]
// we wait a second for all the nodes to complete bootstrapping off the satellite
time.Sleep(2 * time.Second)
2019-01-29 19:42:43 +00:00
// This sets a reputable audit count for a certain number of nodes.
for i, node := range planet.StorageNodes {
for k := 0; k < i; k++ {
2019-01-29 19:42:43 +00:00
_, err := satellite.DB.StatDB().UpdateAuditSuccess(ctx, node.ID(), true)
assert.NoError(t, err)
}
}
// ensure all storagenodes are in overlay service
for _, storageNode := range planet.StorageNodes {
err = satellite.Overlay.Service.Put(ctx, storageNode.ID(), storageNode.Local())
assert.NoError(t, err)
}
type test struct {
Preferences overlay.NodeSelectionConfig
ExcludeCount int
RequestCount int64
ExpectedCount int
ShouldFailWith *errs.Class
}
for i, tt := range []test{
{ // all reputable nodes, only reputable nodes requested
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 0,
NewNodePercentage: 0,
},
RequestCount: 5,
ExpectedCount: 5,
2019-01-29 19:42:43 +00:00
},
{ // all reputable nodes, reputable and new nodes requested
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 0,
NewNodePercentage: 1,
},
RequestCount: 5,
ExpectedCount: 5,
2019-01-29 19:42:43 +00:00
},
{ // all reputable nodes except one, reputable and new nodes requested
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 1,
NewNodePercentage: 1,
},
RequestCount: 5,
ExpectedCount: 6,
2019-01-29 19:42:43 +00:00
},
{ // 50-50 reputable and new nodes, reputable and new nodes requested (new node ratio 1.0)
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 5,
NewNodePercentage: 1,
},
RequestCount: 2,
ExpectedCount: 4,
2019-01-29 19:42:43 +00:00
},
{ // 50-50 reputable and new nodes, reputable and new nodes requested (new node ratio 0.5)
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 5,
NewNodePercentage: 0.5,
},
RequestCount: 4,
ExpectedCount: 6,
2019-01-29 19:42:43 +00:00
},
{ // all new nodes except one, reputable and new nodes requested (happy path)
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 8,
NewNodePercentage: 1,
},
RequestCount: 1,
ExpectedCount: 2,
2019-01-29 19:42:43 +00:00
},
{ // all new nodes except one, reputable and new nodes requested (not happy path)
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 9,
NewNodePercentage: 1,
},
RequestCount: 2,
ExpectedCount: 3,
ShouldFailWith: &overlay.ErrNotEnoughNodes,
2019-01-29 19:42:43 +00:00
},
{ // all new nodes, reputable and new nodes requested
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 50,
NewNodePercentage: 1,
},
RequestCount: 2,
ExpectedCount: 2,
ShouldFailWith: &overlay.ErrNotEnoughNodes,
2019-01-29 19:42:43 +00:00
},
{ // audit threshold edge case (1)
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 9,
NewNodePercentage: 0,
},
RequestCount: 1,
ExpectedCount: 1,
2019-01-29 19:42:43 +00:00
},
{ // audit threshold edge case (2)
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 0,
NewNodePercentage: 1,
},
RequestCount: 1,
ExpectedCount: 1,
2019-01-29 19:42:43 +00:00
},
{ // excluded node ids being excluded
Preferences: overlay.NodeSelectionConfig{
NewNodeAuditThreshold: 5,
NewNodePercentage: 0,
},
ExcludeCount: 7,
RequestCount: 5,
ExpectedCount: 3,
ShouldFailWith: &overlay.ErrNotEnoughNodes,
2019-01-29 19:42:43 +00:00
},
} {
t.Logf("#%2d. %+v", i, tt)
endpoint := planet.Satellites[0].Overlay.Endpoint
2019-01-29 19:42:43 +00:00
var excludedNodes []storj.NodeID
for _, storageNode := range planet.StorageNodes[:tt.ExcludeCount] {
excludedNodes = append(excludedNodes, storageNode.ID())
2019-01-29 19:42:43 +00:00
}
response, err := endpoint.FindStorageNodesWithPreferences(ctx,
2019-01-29 19:42:43 +00:00
&pb.FindStorageNodesRequest{
Opts: &pb.OverlayOptions{
Restrictions: &pb.NodeRestrictions{
FreeBandwidth: 0,
FreeDisk: 0,
},
Amount: tt.RequestCount,
2019-01-29 19:42:43 +00:00
ExcludedNodes: excludedNodes,
},
}, &tt.Preferences)
2019-01-29 19:42:43 +00:00
t.Log(len(response.Nodes), err)
if tt.ShouldFailWith != nil {
assert.Error(t, err)
assert.True(t, tt.ShouldFailWith.Has(err))
2019-01-29 19:42:43 +00:00
} else {
assert.NoError(t, err)
2019-01-29 19:42:43 +00:00
}
assert.Equal(t, tt.ExpectedCount, len(response.Nodes))
2019-01-29 19:42:43 +00:00
}
}