724bb44723
What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
142 lines
4.4 KiB
Go
142 lines
4.4 KiB
Go
// Copyright (C) 2019 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package inspector_test
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"storj.io/storj/internal/memory"
|
|
"storj.io/storj/internal/sync2"
|
|
"storj.io/storj/internal/testcontext"
|
|
"storj.io/storj/internal/testplanet"
|
|
"storj.io/storj/internal/testrand"
|
|
"storj.io/storj/pkg/pb"
|
|
"storj.io/storj/uplink"
|
|
)
|
|
|
|
func TestInspectorStats(t *testing.T) {
|
|
ctx := testcontext.New(t)
|
|
defer ctx.Cleanup()
|
|
|
|
planet, err := testplanet.New(t, 1, 5, 1)
|
|
require.NoError(t, err)
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
planet.Start(ctx)
|
|
|
|
planet.Satellites[0].Discovery.Service.Refresh.TriggerWait()
|
|
|
|
var availableBandwidth int64
|
|
var availableSpace int64
|
|
for _, storageNode := range planet.StorageNodes {
|
|
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
|
require.NoError(t, err)
|
|
|
|
assert.Zero(t, response.UsedBandwidth)
|
|
assert.Zero(t, response.UsedSpace)
|
|
assert.Zero(t, response.UsedEgress)
|
|
assert.Zero(t, response.UsedIngress)
|
|
assert.True(t, response.AvailableBandwidth > 0)
|
|
assert.True(t, response.AvailableSpace > 0)
|
|
|
|
// assume that all storage node should have the same initial values
|
|
availableBandwidth = response.AvailableBandwidth
|
|
availableSpace = response.AvailableSpace
|
|
}
|
|
|
|
expectedData := testrand.Bytes(100 * memory.KiB)
|
|
|
|
rs := &uplink.RSConfig{
|
|
MinThreshold: 2,
|
|
RepairThreshold: 3,
|
|
SuccessThreshold: 4,
|
|
MaxThreshold: 5,
|
|
}
|
|
|
|
err = planet.Uplinks[0].UploadWithConfig(ctx, planet.Satellites[0], rs, "testbucket", "test/path", expectedData)
|
|
require.NoError(t, err)
|
|
|
|
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
|
|
assert.NoError(t, err)
|
|
|
|
// wait until all requests have been handled
|
|
for {
|
|
total := int32(0)
|
|
for _, storageNode := range planet.StorageNodes {
|
|
total += storageNode.Storage2.Endpoint.TestLiveRequestCount()
|
|
}
|
|
if total == 0 {
|
|
break
|
|
}
|
|
|
|
sync2.Sleep(ctx, 100*time.Millisecond)
|
|
}
|
|
|
|
var downloaded int
|
|
for _, storageNode := range planet.StorageNodes {
|
|
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
|
require.NoError(t, err)
|
|
|
|
// TODO set more accurate assertions
|
|
if response.UsedSpace > 0 {
|
|
assert.NotZero(t, response.UsedBandwidth)
|
|
assert.Equal(t, response.UsedBandwidth, response.UsedIngress+response.UsedEgress)
|
|
assert.Equal(t, availableBandwidth-response.UsedBandwidth, response.AvailableBandwidth)
|
|
assert.Equal(t, availableSpace-response.UsedSpace, response.AvailableSpace)
|
|
|
|
assert.Equal(t, response.UsedSpace, response.UsedBandwidth-response.UsedEgress)
|
|
if response.UsedEgress > 0 {
|
|
downloaded++
|
|
assert.Equal(t, response.UsedBandwidth-response.UsedIngress, response.UsedEgress)
|
|
}
|
|
} else {
|
|
assert.Zero(t, response.UsedSpace)
|
|
// TODO track why this is failing
|
|
//assert.Equal(t, availableBandwidth, response.AvailableBandwidth)
|
|
assert.Equal(t, availableSpace, response.AvailableSpace)
|
|
}
|
|
}
|
|
assert.True(t, downloaded >= rs.MinThreshold, "downloaded=%v, rs.MinThreshold=%v", downloaded, rs.MinThreshold)
|
|
}
|
|
|
|
func TestInspectorDashboard(t *testing.T) {
|
|
testStartedTime := time.Now()
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
for _, storageNode := range planet.StorageNodes {
|
|
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &pb.DashboardRequest{})
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, response.Uptime.Nanos > 0)
|
|
assert.Equal(t, storageNode.ID(), response.NodeId)
|
|
assert.Equal(t, storageNode.Addr(), response.ExternalAddress)
|
|
assert.NotNil(t, response.Stats)
|
|
}
|
|
|
|
expectedData := testrand.Bytes(100 * memory.KiB)
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path", expectedData)
|
|
require.NoError(t, err)
|
|
|
|
for _, storageNode := range planet.StorageNodes {
|
|
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &pb.DashboardRequest{})
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, response.LastPinged.After(testStartedTime))
|
|
assert.NotEmpty(t, response.LastPingFromAddress)
|
|
|
|
assert.True(t, response.Uptime.Nanos > 0)
|
|
assert.Equal(t, storageNode.ID(), response.NodeId)
|
|
assert.Equal(t, storageNode.Addr(), response.ExternalAddress)
|
|
assert.NotNil(t, response.Stats)
|
|
}
|
|
})
|
|
}
|