Remove Kademlia dependencies from Satellite and Storagenode (#2966)
What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
This commit is contained in:
parent
93788e5218
commit
724bb44723
@ -81,7 +81,7 @@ go test -v ./...
|
|||||||
```
|
```
|
||||||
|
|
||||||
You can also execute only a single test package if you like. For example:
|
You can also execute only a single test package if you like. For example:
|
||||||
`go test ./pkg/kademlia`. Add `-v` for more informations about the executed unit
|
`go test ./pkg/identity`. Add `-v` for more informations about the executed unit
|
||||||
tests.
|
tests.
|
||||||
|
|
||||||
### Push up a pull request
|
### Push up a pull request
|
||||||
|
@ -13,14 +13,11 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/jsonpb"
|
|
||||||
"github.com/gogo/protobuf/proto"
|
|
||||||
prompt "github.com/segmentio/go-prompt"
|
prompt "github.com/segmentio/go-prompt"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
|
|
||||||
"storj.io/storj/pkg/identity"
|
"storj.io/storj/pkg/identity"
|
||||||
"storj.io/storj/pkg/kademlia/routinggraph"
|
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/process"
|
"storj.io/storj/pkg/process"
|
||||||
"storj.io/storj/pkg/storj"
|
"storj.io/storj/pkg/storj"
|
||||||
@ -55,11 +52,7 @@ var (
|
|||||||
// Commander CLI
|
// Commander CLI
|
||||||
rootCmd = &cobra.Command{
|
rootCmd = &cobra.Command{
|
||||||
Use: "inspector",
|
Use: "inspector",
|
||||||
Short: "CLI for interacting with Storj Kademlia network",
|
Short: "CLI for interacting with Storj network",
|
||||||
}
|
|
||||||
kadCmd = &cobra.Command{
|
|
||||||
Use: "kad",
|
|
||||||
Short: "commands for kademlia/overlay",
|
|
||||||
}
|
}
|
||||||
statsCmd = &cobra.Command{
|
statsCmd = &cobra.Command{
|
||||||
Use: "statdb",
|
Use: "statdb",
|
||||||
@ -74,39 +67,6 @@ var (
|
|||||||
Short: "list segments in irreparable database",
|
Short: "list segments in irreparable database",
|
||||||
RunE: getSegments,
|
RunE: getSegments,
|
||||||
}
|
}
|
||||||
countNodeCmd = &cobra.Command{
|
|
||||||
Use: "count",
|
|
||||||
Short: "count nodes in kademlia and overlay",
|
|
||||||
RunE: CountNodes,
|
|
||||||
}
|
|
||||||
pingNodeCmd = &cobra.Command{
|
|
||||||
Use: "ping <node_id> <ip:port>",
|
|
||||||
Short: "ping node at provided ID",
|
|
||||||
Args: cobra.MinimumNArgs(2),
|
|
||||||
RunE: PingNode,
|
|
||||||
}
|
|
||||||
lookupNodeCmd = &cobra.Command{
|
|
||||||
Use: "lookup <node_id>",
|
|
||||||
Short: "lookup a node by ID only",
|
|
||||||
Args: cobra.MinimumNArgs(1),
|
|
||||||
RunE: LookupNode,
|
|
||||||
}
|
|
||||||
nodeInfoCmd = &cobra.Command{
|
|
||||||
Use: "node-info <node_id>",
|
|
||||||
Short: "get node info directly from node",
|
|
||||||
Args: cobra.MinimumNArgs(1),
|
|
||||||
RunE: NodeInfo,
|
|
||||||
}
|
|
||||||
dumpNodesCmd = &cobra.Command{
|
|
||||||
Use: "dump-nodes",
|
|
||||||
Short: "dump all nodes in the routing table",
|
|
||||||
RunE: DumpNodes,
|
|
||||||
}
|
|
||||||
drawTableCmd = &cobra.Command{
|
|
||||||
Use: "routing-graph",
|
|
||||||
Short: "Dumps a graph of the routing table in the dot format",
|
|
||||||
RunE: DrawTableAsGraph,
|
|
||||||
}
|
|
||||||
objectHealthCmd = &cobra.Command{
|
objectHealthCmd = &cobra.Command{
|
||||||
Use: "object <project-id> <bucket> <encrypted-path>",
|
Use: "object <project-id> <bucket> <encrypted-path>",
|
||||||
Short: "Get stats about an object's health",
|
Short: "Get stats about an object's health",
|
||||||
@ -121,16 +81,15 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Inspector gives access to kademlia and overlay.
|
// Inspector gives access to overlay.
|
||||||
type Inspector struct {
|
type Inspector struct {
|
||||||
identity *identity.FullIdentity
|
identity *identity.FullIdentity
|
||||||
kadclient pb.KadInspectorClient
|
|
||||||
overlayclient pb.OverlayInspectorClient
|
overlayclient pb.OverlayInspectorClient
|
||||||
irrdbclient pb.IrreparableInspectorClient
|
irrdbclient pb.IrreparableInspectorClient
|
||||||
healthclient pb.HealthInspectorClient
|
healthclient pb.HealthInspectorClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInspector creates a new gRPC inspector client for access to kademlia and overlay.
|
// NewInspector creates a new gRPC inspector client for access to overlay.
|
||||||
func NewInspector(address, path string) (*Inspector, error) {
|
func NewInspector(address, path string) (*Inspector, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
@ -149,160 +108,12 @@ func NewInspector(address, path string) (*Inspector, error) {
|
|||||||
|
|
||||||
return &Inspector{
|
return &Inspector{
|
||||||
identity: id,
|
identity: id,
|
||||||
kadclient: pb.NewKadInspectorClient(conn),
|
|
||||||
overlayclient: pb.NewOverlayInspectorClient(conn),
|
overlayclient: pb.NewOverlayInspectorClient(conn),
|
||||||
irrdbclient: pb.NewIrreparableInspectorClient(conn),
|
irrdbclient: pb.NewIrreparableInspectorClient(conn),
|
||||||
healthclient: pb.NewHealthInspectorClient(conn),
|
healthclient: pb.NewHealthInspectorClient(conn),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountNodes returns the number of nodes in kademlia
|
|
||||||
func CountNodes(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
i, err := NewInspector(*Addr, *IdentityPath)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInspectorDial.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
kadcount, err := i.kadclient.CountNodes(context.Background(), &pb.CountNodesRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return ErrRequest.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Kademlia node count: %+v\n", kadcount.Count)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupNode starts a Kademlia lookup for the provided Node ID
|
|
||||||
func LookupNode(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
i, err := NewInspector(*Addr, *IdentityPath)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInspectorDial.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := i.kadclient.LookupNode(context.Background(), &pb.LookupNodeRequest{
|
|
||||||
Id: args[0],
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return ErrRequest.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println(prettyPrint(n))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeInfo get node info directly from the node with provided Node ID
|
|
||||||
func NodeInfo(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
i, err := NewInspector(*Addr, *IdentityPath)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInspectorDial.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// first lookup the node to get its address
|
|
||||||
n, err := i.kadclient.LookupNode(context.Background(), &pb.LookupNodeRequest{
|
|
||||||
Id: args[0],
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return ErrRequest.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// now ask the node directly for its node info
|
|
||||||
info, err := i.kadclient.NodeInfo(context.Background(), &pb.NodeInfoRequest{
|
|
||||||
Id: n.GetNode().Id,
|
|
||||||
Address: n.GetNode().GetAddress(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return ErrRequest.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println(prettyPrint(info))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DrawTableAsGraph outputs the table routing as a graph
|
|
||||||
func DrawTableAsGraph(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
i, err := NewInspector(*Addr, *IdentityPath)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInspectorDial.Wrap(err)
|
|
||||||
}
|
|
||||||
// retrieve buckets
|
|
||||||
info, err := i.kadclient.GetBucketList(context.Background(), &pb.GetBucketListRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return ErrRequest.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = routinggraph.Draw(os.Stdout, info)
|
|
||||||
if err != nil {
|
|
||||||
return ErrRequest.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DumpNodes outputs a json list of every node in every bucket in the satellite
|
|
||||||
func DumpNodes(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
i, err := NewInspector(*Addr, *IdentityPath)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInspectorDial.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes, err := i.kadclient.FindNear(context.Background(), &pb.FindNearRequest{
|
|
||||||
Start: storj.NodeID{},
|
|
||||||
Limit: 100000,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println(prettyPrint(nodes))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prettyPrint(unformatted proto.Message) string {
|
|
||||||
m := jsonpb.Marshaler{Indent: " ", EmitDefaults: true}
|
|
||||||
formatted, err := m.MarshalToString(unformatted)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return formatted
|
|
||||||
}
|
|
||||||
|
|
||||||
// PingNode sends a PING RPC across the Kad network to check node availability
|
|
||||||
func PingNode(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
nodeID, err := storj.NodeIDFromString(args[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
i, err := NewInspector(*Addr, *IdentityPath)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInspectorDial.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Pinging node %s at %s", args[0], args[1])
|
|
||||||
|
|
||||||
p, err := i.kadclient.PingNode(context.Background(), &pb.PingNodeRequest{
|
|
||||||
Id: nodeID,
|
|
||||||
Address: args[1],
|
|
||||||
})
|
|
||||||
|
|
||||||
var okayString string
|
|
||||||
if p != nil && p.Ok {
|
|
||||||
okayString = "OK"
|
|
||||||
} else {
|
|
||||||
okayString = "Error"
|
|
||||||
}
|
|
||||||
fmt.Printf("\n -- Ping response: %s\n", okayString)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf(" -- Error: %v\n", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectHealth gets information about the health of an object on the network
|
// ObjectHealth gets information about the health of an object on the network
|
||||||
func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
|
func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -604,18 +415,10 @@ func sortSegments(segments []*pb.IrreparableSegment) map[string][]*pb.Irreparabl
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.AddCommand(kadCmd)
|
|
||||||
rootCmd.AddCommand(statsCmd)
|
rootCmd.AddCommand(statsCmd)
|
||||||
rootCmd.AddCommand(irreparableCmd)
|
rootCmd.AddCommand(irreparableCmd)
|
||||||
rootCmd.AddCommand(healthCmd)
|
rootCmd.AddCommand(healthCmd)
|
||||||
|
|
||||||
kadCmd.AddCommand(countNodeCmd)
|
|
||||||
kadCmd.AddCommand(pingNodeCmd)
|
|
||||||
kadCmd.AddCommand(lookupNodeCmd)
|
|
||||||
kadCmd.AddCommand(nodeInfoCmd)
|
|
||||||
kadCmd.AddCommand(dumpNodesCmd)
|
|
||||||
kadCmd.AddCommand(drawTableCmd)
|
|
||||||
|
|
||||||
healthCmd.AddCommand(objectHealthCmd)
|
healthCmd.AddCommand(objectHealthCmd)
|
||||||
healthCmd.AddCommand(segmentHealthCmd)
|
healthCmd.AddCommand(segmentHealthCmd)
|
||||||
|
|
||||||
|
@ -10,9 +10,9 @@ RUN_PARAMS="${RUN_PARAMS:-} --identity-dir identity"
|
|||||||
|
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --metrics.app-suffix=-alpha"
|
RUN_PARAMS="${RUN_PARAMS:-} --metrics.app-suffix=-alpha"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --metrics.interval=30m"
|
RUN_PARAMS="${RUN_PARAMS:-} --metrics.interval=30m"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --kademlia.external-address=${ADDRESS}"
|
RUN_PARAMS="${RUN_PARAMS:-} --contact.external-address=${ADDRESS}"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --kademlia.operator.email=${EMAIL}"
|
RUN_PARAMS="${RUN_PARAMS:-} --operator.email=${EMAIL}"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --kademlia.operator.wallet=${WALLET}"
|
RUN_PARAMS="${RUN_PARAMS:-} --operator.wallet=${WALLET}"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --console.address=:14002"
|
RUN_PARAMS="${RUN_PARAMS:-} --console.address=:14002"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --console.static-dir=/app"
|
RUN_PARAMS="${RUN_PARAMS:-} --console.static-dir=/app"
|
||||||
RUN_PARAMS="${RUN_PARAMS:-} --storage.allocated-bandwidth=${BANDWIDTH}"
|
RUN_PARAMS="${RUN_PARAMS:-} --storage.allocated-bandwidth=${BANDWIDTH}"
|
||||||
|
@ -107,11 +107,10 @@ func init() {
|
|||||||
|
|
||||||
func databaseConfig(config storagenode.Config) storagenodedb.Config {
|
func databaseConfig(config storagenode.Config) storagenodedb.Config {
|
||||||
return storagenodedb.Config{
|
return storagenodedb.Config{
|
||||||
Storage: config.Storage.Path,
|
Storage: config.Storage.Path,
|
||||||
Info: filepath.Join(config.Storage.Path, "piecestore.db"),
|
Info: filepath.Join(config.Storage.Path, "piecestore.db"),
|
||||||
Info2: filepath.Join(config.Storage.Path, "info.db"),
|
Info2: filepath.Join(config.Storage.Path, "info.db"),
|
||||||
Pieces: config.Storage.Path,
|
Pieces: config.Storage.Path,
|
||||||
Kademlia: config.Kademlia.DBPath,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,8 +282,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
|||||||
"--server.address", process.Address,
|
"--server.address", process.Address,
|
||||||
"--server.private-address", net.JoinHostPort(host, port(satellitePeer, i, privateGRPC)),
|
"--server.private-address", net.JoinHostPort(host, port(satellitePeer, i, privateGRPC)),
|
||||||
|
|
||||||
"--kademlia.bootstrap-addr", bootstrap.Address,
|
|
||||||
|
|
||||||
"--server.extensions.revocation=false",
|
"--server.extensions.revocation=false",
|
||||||
"--server.use-peer-ca-whitelist=false",
|
"--server.use-peer-ca-whitelist=false",
|
||||||
|
|
||||||
@ -450,9 +448,8 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
|||||||
"--server.address", process.Address,
|
"--server.address", process.Address,
|
||||||
"--server.private-address", net.JoinHostPort(host, port(storagenodePeer, i, privateGRPC)),
|
"--server.private-address", net.JoinHostPort(host, port(storagenodePeer, i, privateGRPC)),
|
||||||
|
|
||||||
"--kademlia.bootstrap-addr", bootstrap.Address,
|
"--operator.email", fmt.Sprintf("storage%d@mail.test", i),
|
||||||
"--kademlia.operator.email", fmt.Sprintf("storage%d@mail.test", i),
|
"--operator.wallet", "0x0123456789012345678901234567890123456789",
|
||||||
"--kademlia.operator.wallet", "0x0123456789012345678901234567890123456789",
|
|
||||||
|
|
||||||
"--storage2.monitor.minimum-disk-space", "0",
|
"--storage2.monitor.minimum-disk-space", "0",
|
||||||
"--storage2.monitor.minimum-bandwidth", "0",
|
"--storage2.monitor.minimum-bandwidth", "0",
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"storj.io/storj/bootstrap"
|
"storj.io/storj/bootstrap"
|
||||||
"storj.io/storj/internal/testidentity"
|
"storj.io/storj/internal/testidentity"
|
||||||
"storj.io/storj/pkg/identity"
|
"storj.io/storj/pkg/identity"
|
||||||
"storj.io/storj/pkg/pb"
|
|
||||||
"storj.io/storj/pkg/storj"
|
"storj.io/storj/pkg/storj"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
@ -199,19 +198,6 @@ func NewCustom(log *zap.Logger, config Config) (*Planet, error) {
|
|||||||
return nil, errs.Combine(err, planet.Shutdown())
|
return nil, errs.Combine(err, planet.Shutdown())
|
||||||
}
|
}
|
||||||
|
|
||||||
// init Satellites
|
|
||||||
for _, satellite := range planet.Satellites {
|
|
||||||
if len(satellite.Kademlia.Service.GetBootstrapNodes()) == 0 {
|
|
||||||
satellite.Kademlia.Service.SetBootstrapNodes([]pb.Node{planet.Bootstrap.Local().Node})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// init storage nodes
|
|
||||||
for _, storageNode := range planet.StorageNodes {
|
|
||||||
if len(storageNode.Kademlia.Service.GetBootstrapNodes()) == 0 {
|
|
||||||
storageNode.Kademlia.Service.SetBootstrapNodes([]pb.Node{planet.Bootstrap.Local().Node})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return planet, nil
|
return planet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,56 +217,11 @@ func (planet *Planet) Start(ctx context.Context) {
|
|||||||
return peer.peer.Run(peer.ctx)
|
return peer.peer.Run(peer.ctx)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
for _, peer := range planet.StorageNodes {
|
||||||
|
peer.Contact.Chore.Loop.TriggerWait()
|
||||||
|
}
|
||||||
|
|
||||||
planet.started = true
|
planet.started = true
|
||||||
|
|
||||||
planet.Bootstrap.Kademlia.Service.WaitForBootstrap()
|
|
||||||
|
|
||||||
for _, peer := range planet.StorageNodes {
|
|
||||||
peer.Kademlia.Service.WaitForBootstrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, peer := range planet.Satellites {
|
|
||||||
peer.Kademlia.Service.WaitForBootstrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
planet.Reconnect(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconnect reconnects all nodes with each other.
|
|
||||||
func (planet *Planet) Reconnect(ctx context.Context) {
|
|
||||||
log := planet.log.Named("reconnect")
|
|
||||||
|
|
||||||
var group errgroup.Group
|
|
||||||
|
|
||||||
// TODO: instead of pinging try to use Lookups or natural discovery to ensure
|
|
||||||
// everyone finds everyone else
|
|
||||||
|
|
||||||
for _, storageNode := range planet.StorageNodes {
|
|
||||||
storageNode := storageNode
|
|
||||||
group.Go(func() error {
|
|
||||||
_, err := storageNode.Kademlia.Service.Ping(ctx, planet.Bootstrap.Local().Node)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("storage node did not find bootstrap", zap.Error(err))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, satellite := range planet.Satellites {
|
|
||||||
satellite := satellite
|
|
||||||
group.Go(func() error {
|
|
||||||
for _, storageNode := range planet.StorageNodes {
|
|
||||||
_, err := satellite.Kademlia.Service.Ping(ctx, storageNode.Local().Node)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("satellite did not find storage node", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = group.Wait() // none of the goroutines return an error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopPeer stops a single peer in the planet
|
// StopPeer stops a single peer in the planet
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"storj.io/storj/internal/testcontext"
|
"storj.io/storj/internal/testcontext"
|
||||||
"storj.io/storj/internal/testplanet"
|
"storj.io/storj/internal/testplanet"
|
||||||
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/storj"
|
"storj.io/storj/pkg/storj"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,21 +38,18 @@ func TestBasic(t *testing.T) {
|
|||||||
t.Log("UPLINK", uplink.ID(), uplink.Addr())
|
t.Log("UPLINK", uplink.ID(), uplink.Addr())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ping a satellite
|
sat := planet.Satellites[0].Local().Node
|
||||||
_, err = planet.StorageNodes[0].Kademlia.Service.Ping(ctx, planet.Satellites[0].Local().Node)
|
node := planet.StorageNodes[0].Local()
|
||||||
|
conn, err := planet.StorageNodes[0].Transport.DialNode(ctx, &sat)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
_, err = pb.NewNodeClient(conn).CheckIn(ctx, &pb.CheckInRequest{
|
||||||
// ping a storage node
|
Address: node.GetAddress().GetAddress(),
|
||||||
_, err = planet.StorageNodes[0].Kademlia.Service.Ping(ctx, planet.StorageNodes[1].Local().Node)
|
Version: &node.Version,
|
||||||
|
Capacity: &node.Capacity,
|
||||||
|
Operator: &node.Operator,
|
||||||
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = planet.StopPeer(planet.StorageNodes[1])
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// ping a stopped storage node
|
|
||||||
_, err = planet.StorageNodes[0].Kademlia.Service.Ping(ctx, planet.StorageNodes[1].Local().Node)
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// wait a bit to see whether some failures occur
|
// wait a bit to see whether some failures occur
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@ -61,6 +59,23 @@ func TestBasic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test that nodes get put into each satellite's overlay cache
|
||||||
|
func TestContact(t *testing.T) {
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 2, StorageNodeCount: 5, UplinkCount: 0,
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
satellite0 := planet.Satellites[0]
|
||||||
|
satellite1 := planet.Satellites[1]
|
||||||
|
|
||||||
|
for _, n := range planet.StorageNodes {
|
||||||
|
_, err := satellite0.Overlay.Service.Get(ctx, n.ID())
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = satellite1.Overlay.Service.Get(ctx, n.ID())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkCreate(b *testing.B) {
|
func BenchmarkCreate(b *testing.B) {
|
||||||
storageNodes := []int{4, 10, 100}
|
storageNodes := []int{4, 10, 100}
|
||||||
for _, count := range storageNodes {
|
for _, count := range storageNodes {
|
||||||
|
@ -7,13 +7,11 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
|
|
||||||
"storj.io/storj/internal/memory"
|
"storj.io/storj/internal/memory"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/peertls/extensions"
|
"storj.io/storj/pkg/peertls/extensions"
|
||||||
"storj.io/storj/pkg/peertls/tlsopts"
|
"storj.io/storj/pkg/peertls/tlsopts"
|
||||||
"storj.io/storj/pkg/revocation"
|
"storj.io/storj/pkg/revocation"
|
||||||
@ -86,16 +84,6 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Kademlia: kademlia.Config{
|
|
||||||
Alpha: 5,
|
|
||||||
BootstrapBackoffBase: 500 * time.Millisecond,
|
|
||||||
BootstrapBackoffMax: 2 * time.Second,
|
|
||||||
DBPath: storageDir, // TODO: replace with master db
|
|
||||||
Operator: kademlia.OperatorConfig{
|
|
||||||
Email: prefix + "@mail.test",
|
|
||||||
Wallet: "0x" + strings.Repeat("00", 20),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Overlay: overlay.Config{
|
Overlay: overlay.Config{
|
||||||
Node: overlay.NodeSelectionConfig{
|
Node: overlay.NodeSelectionConfig{
|
||||||
UptimeCount: 0,
|
UptimeCount: 0,
|
||||||
@ -122,7 +110,6 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) {
|
|||||||
UpdateStatsBatchSize: 100,
|
UpdateStatsBatchSize: 100,
|
||||||
},
|
},
|
||||||
Discovery: discovery.Config{
|
Discovery: discovery.Config{
|
||||||
DiscoveryInterval: 1 * time.Second,
|
|
||||||
RefreshInterval: 1 * time.Second,
|
RefreshInterval: 1 * time.Second,
|
||||||
RefreshLimit: 100,
|
RefreshLimit: 100,
|
||||||
RefreshConcurrency: 2,
|
RefreshConcurrency: 2,
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
|
|
||||||
"storj.io/storj/internal/memory"
|
"storj.io/storj/internal/memory"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/peertls/extensions"
|
"storj.io/storj/pkg/peertls/extensions"
|
||||||
"storj.io/storj/pkg/peertls/tlsopts"
|
"storj.io/storj/pkg/peertls/tlsopts"
|
||||||
"storj.io/storj/pkg/revocation"
|
"storj.io/storj/pkg/revocation"
|
||||||
@ -72,15 +71,9 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatellites storj.Nod
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Kademlia: kademlia.Config{
|
Operator: storagenode.OperatorConfig{
|
||||||
BootstrapBackoffBase: 500 * time.Millisecond,
|
Email: prefix + "@mail.test",
|
||||||
BootstrapBackoffMax: 2 * time.Second,
|
Wallet: "0x" + strings.Repeat("00", 20),
|
||||||
Alpha: 5,
|
|
||||||
DBPath: filepath.Join(storageDir, "kademlia/"),
|
|
||||||
Operator: kademlia.OperatorConfig{
|
|
||||||
Email: prefix + "@mail.test",
|
|
||||||
Wallet: "0x" + strings.Repeat("00", 20),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Storage: piecestore.OldConfig{
|
Storage: piecestore.OldConfig{
|
||||||
Path: filepath.Join(storageDir, "pieces/"),
|
Path: filepath.Join(storageDir, "pieces/"),
|
||||||
@ -145,11 +138,10 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatellites storj.Nod
|
|||||||
verisonInfo := planet.NewVersionInfo()
|
verisonInfo := planet.NewVersionInfo()
|
||||||
|
|
||||||
storageConfig := storagenodedb.Config{
|
storageConfig := storagenodedb.Config{
|
||||||
Storage: config.Storage.Path,
|
Storage: config.Storage.Path,
|
||||||
Info: filepath.Join(config.Storage.Path, "piecestore.db"),
|
Info: filepath.Join(config.Storage.Path, "piecestore.db"),
|
||||||
Info2: filepath.Join(config.Storage.Path, "info.db"),
|
Info2: filepath.Join(config.Storage.Path, "info.db"),
|
||||||
Pieces: config.Storage.Path,
|
Pieces: config.Storage.Path,
|
||||||
Kademlia: config.Kademlia.DBPath,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var db storagenode.DB
|
var db storagenode.DB
|
||||||
|
@ -81,9 +81,6 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
|
|||||||
ul := planet.Uplinks[0]
|
ul := planet.Uplinks[0]
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
|
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
|
|
||||||
testData := testrand.Bytes(memory.MiB)
|
testData := testrand.Bytes(memory.MiB)
|
||||||
|
|
||||||
err := ul.UploadWithConfig(ctx, satellite, &uplink.RSConfig{
|
err := ul.UploadWithConfig(ctx, satellite, &uplink.RSConfig{
|
||||||
|
@ -1,258 +0,0 @@
|
|||||||
// Copyright (C) 2019 Storj Labs, Inc.
|
|
||||||
// See LICENSE for copying information.
|
|
||||||
|
|
||||||
package kademlia_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/zeebo/errs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
|
|
||||||
"storj.io/storj/internal/errs2"
|
|
||||||
"storj.io/storj/internal/memory"
|
|
||||||
"storj.io/storj/internal/testcontext"
|
|
||||||
"storj.io/storj/internal/testplanet"
|
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/pb"
|
|
||||||
"storj.io/storj/pkg/peertls/tlsopts"
|
|
||||||
"storj.io/storj/pkg/transport"
|
|
||||||
"storj.io/storj/satellite"
|
|
||||||
"storj.io/storj/storagenode"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFetchPeerIdentity(t *testing.T) {
|
|
||||||
testplanet.Run(t, testplanet.Config{
|
|
||||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
||||||
sat := planet.Satellites[0]
|
|
||||||
peerID, err := planet.StorageNodes[0].Kademlia.Service.FetchPeerIdentity(ctx, sat.ID())
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, sat.ID(), peerID.ID)
|
|
||||||
require.True(t, sat.Identity.Leaf.Equal(peerID.Leaf))
|
|
||||||
require.True(t, sat.Identity.CA.Equal(peerID.CA))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRequestInfo(t *testing.T) {
|
|
||||||
testplanet.Run(t, testplanet.Config{
|
|
||||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
||||||
node := planet.StorageNodes[0]
|
|
||||||
info, err := planet.Satellites[0].Kademlia.Service.FetchInfo(ctx, node.Local().Node)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, node.Local().Type, info.GetType())
|
|
||||||
require.Empty(t, cmp.Diff(node.Local().Operator, *info.GetOperator(), cmp.Comparer(pb.Equal)))
|
|
||||||
require.Empty(t, cmp.Diff(node.Local().Capacity, *info.GetCapacity(), cmp.Comparer(pb.Equal)))
|
|
||||||
require.Empty(t, cmp.Diff(node.Local().Version, *info.GetVersion(), cmp.Comparer(pb.Equal)))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRequestInfoUntrusted(t *testing.T) {
|
|
||||||
testplanet.Run(t, testplanet.Config{
|
|
||||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
||||||
Reconfigure: testplanet.Reconfigure{
|
|
||||||
StorageNode: func(index int, config *storagenode.Config) {
|
|
||||||
config.Storage.WhitelistedSatellites = nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
||||||
_, err := planet.Satellites[0].Kademlia.Service.FetchInfo(ctx, planet.StorageNodes[0].Local().Node)
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.True(t, errs2.IsRPC(err, codes.PermissionDenied), "unexpected error: %+v", err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPingTimeout(t *testing.T) {
|
|
||||||
testplanet.Run(t, testplanet.Config{
|
|
||||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 0,
|
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
||||||
|
|
||||||
self := planet.StorageNodes[0]
|
|
||||||
routingTable := self.Kademlia.RoutingTable
|
|
||||||
|
|
||||||
tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
|
||||||
Request: 1 * time.Millisecond,
|
|
||||||
})
|
|
||||||
|
|
||||||
network := &transport.SimulatedNetwork{
|
|
||||||
DialLatency: 300 * time.Second,
|
|
||||||
BytesPerSecond: 1 * memory.KB,
|
|
||||||
}
|
|
||||||
|
|
||||||
slowClient := network.NewClient(self.Transport)
|
|
||||||
require.NotNil(t, slowClient)
|
|
||||||
|
|
||||||
newService, err := kademlia.NewService(zaptest.NewLogger(t), slowClient, routingTable, kademlia.Config{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
target := pb.Node{
|
|
||||||
Id: planet.StorageNodes[2].ID(),
|
|
||||||
Address: &pb.NodeAddress{
|
|
||||||
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
|
||||||
Address: planet.StorageNodes[2].Addr(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = newService.Ping(ctx, target)
|
|
||||||
require.Error(t, err, context.DeadlineExceeded)
|
|
||||||
require.True(t, kademlia.NodeErr.Has(err) && transport.Error.Has(err))
|
|
||||||
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBootstrapBackoffReconnect(t *testing.T) {
|
|
||||||
// TODO(nat): skipping because flakily erroring with "panic: planet took too long to shutdown"
|
|
||||||
// or kademlia_planet_test.go:139: dial tcp 127.0.0.1:40409: connect: connection refused
|
|
||||||
|
|
||||||
t.Skip("flaky")
|
|
||||||
|
|
||||||
ctx := testcontext.New(t)
|
|
||||||
defer ctx.Cleanup()
|
|
||||||
|
|
||||||
log := zaptest.NewLogger(t)
|
|
||||||
|
|
||||||
// This sets up an unreliable proxy server which will receive conns from
|
|
||||||
// storage nodes and the satellite, but drops the connections of the first
|
|
||||||
// `dropCount` number of connections to the bootstrap node (proxy.target).
|
|
||||||
// This should test that the Bootstrap function will retry a connection
|
|
||||||
// if it initially fails.
|
|
||||||
proxy, err := newBadProxy(log.Named("proxy"), "127.0.0.1:0", 4)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
planet, err := testplanet.NewCustom(log, testplanet.Config{
|
|
||||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 0,
|
|
||||||
Reconfigure: testplanet.Reconfigure{
|
|
||||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
||||||
config.Kademlia.BootstrapAddr = proxy.listener.Addr().String()
|
|
||||||
},
|
|
||||||
StorageNode: func(index int, config *storagenode.Config) {
|
|
||||||
config.Kademlia.BootstrapAddr = proxy.listener.Addr().String()
|
|
||||||
config.Kademlia.BootstrapBackoffBase = 100 * time.Millisecond
|
|
||||||
config.Kademlia.BootstrapBackoffMax = 3 * time.Second
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// We set the bad proxy's "target" to the bootstrap node's addr
|
|
||||||
// (which was selected when the new custom planet was set up).
|
|
||||||
proxy.target = planet.Bootstrap.Addr()
|
|
||||||
|
|
||||||
var group errgroup.Group
|
|
||||||
group.Go(func() error { return proxy.run(ctx) })
|
|
||||||
defer ctx.Check(group.Wait)
|
|
||||||
|
|
||||||
defer ctx.Check(proxy.close)
|
|
||||||
|
|
||||||
planet.Start(ctx)
|
|
||||||
ctx.Check(planet.Shutdown)
|
|
||||||
}
|
|
||||||
|
|
||||||
type badProxy struct {
|
|
||||||
log *zap.Logger
|
|
||||||
target string
|
|
||||||
dropCount int
|
|
||||||
listener net.Listener
|
|
||||||
done chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBadProxy(log *zap.Logger, addr string, dropCount int) (*badProxy, error) {
|
|
||||||
listener, err := net.Listen("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &badProxy{
|
|
||||||
log: log,
|
|
||||||
target: "",
|
|
||||||
dropCount: dropCount,
|
|
||||||
listener: listener,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxy *badProxy) close() error {
|
|
||||||
close(proxy.done)
|
|
||||||
return proxy.listener.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxy *badProxy) run(ctx context.Context) error {
|
|
||||||
var group errgroup.Group
|
|
||||||
group.Go(func() (err error) {
|
|
||||||
var connections errs2.Group
|
|
||||||
defer func() {
|
|
||||||
var errlist errs.Group
|
|
||||||
errlist.Add(err)
|
|
||||||
errlist.Add(connections.Wait()...)
|
|
||||||
err = errlist.Err()
|
|
||||||
}()
|
|
||||||
|
|
||||||
var conns int
|
|
||||||
for {
|
|
||||||
conn, err := proxy.listener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case <-proxy.done:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
conns++
|
|
||||||
|
|
||||||
if conns < proxy.dropCount {
|
|
||||||
if err := conn.Close(); err != nil {
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
connections.Go(func() error {
|
|
||||||
defer func() {
|
|
||||||
err = errs.Combine(err, conn.Close())
|
|
||||||
}()
|
|
||||||
|
|
||||||
targetConn, err := net.Dial("tcp", proxy.target)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { err = errs.Combine(err, targetConn.Close()) }()
|
|
||||||
|
|
||||||
var pipe errs2.Group
|
|
||||||
pipe.Go(func() error {
|
|
||||||
_, err := io.Copy(targetConn, conn)
|
|
||||||
// since planet is shutting down a forced close is to be expected
|
|
||||||
if err != nil {
|
|
||||||
proxy.log.Debug("copy error", zap.Error(err))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
pipe.Go(func() error {
|
|
||||||
_, err := io.Copy(conn, targetConn)
|
|
||||||
// since planet is shutting down a forced close is to be expected
|
|
||||||
if err != nil {
|
|
||||||
proxy.log.Debug("copy error", zap.Error(err))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
return errs.Combine(pipe.Wait()...)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return errs.Wrap(group.Wait())
|
|
||||||
}
|
|
@ -3,297 +3,276 @@
|
|||||||
|
|
||||||
package kademliaclient_test
|
package kademliaclient_test
|
||||||
|
|
||||||
import (
|
//func TestDialer(t *testing.T) {
|
||||||
"context"
|
// ctx := testcontext.New(t)
|
||||||
"fmt"
|
// defer ctx.Cleanup()
|
||||||
"testing"
|
//
|
||||||
"time"
|
// planet, err := testplanet.New(t, 1, 4, 3)
|
||||||
|
// require.NoError(t, err)
|
||||||
"github.com/stretchr/testify/require"
|
// defer ctx.Check(planet.Shutdown)
|
||||||
"github.com/zeebo/errs"
|
//
|
||||||
"go.uber.org/zap/zaptest"
|
// planet.Start(ctx)
|
||||||
"golang.org/x/sync/errgroup"
|
//
|
||||||
|
// expectedKademliaEntries := len(planet.Satellites) + len(planet.StorageNodes)
|
||||||
"storj.io/storj/internal/memory"
|
//
|
||||||
"storj.io/storj/internal/testcontext"
|
// // TODO: also use satellites
|
||||||
"storj.io/storj/internal/testplanet"
|
// peers := planet.StorageNodes
|
||||||
"storj.io/storj/pkg/kademlia/kademliaclient"
|
//
|
||||||
"storj.io/storj/pkg/pb"
|
// { // PingNode: storage node pings all other storage nodes
|
||||||
"storj.io/storj/pkg/peertls/tlsopts"
|
// self := planet.StorageNodes[0]
|
||||||
"storj.io/storj/pkg/storj"
|
//
|
||||||
"storj.io/storj/pkg/transport"
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
||||||
)
|
// defer ctx.Check(dialer.Close)
|
||||||
|
//
|
||||||
func TestDialer(t *testing.T) {
|
// var group errgroup.Group
|
||||||
ctx := testcontext.New(t)
|
// defer ctx.Check(group.Wait)
|
||||||
defer ctx.Cleanup()
|
//
|
||||||
|
// for _, peer := range peers {
|
||||||
planet, err := testplanet.New(t, 1, 4, 3)
|
// peer := peer
|
||||||
require.NoError(t, err)
|
// group.Go(func() error {
|
||||||
defer ctx.Check(planet.Shutdown)
|
// pinged, err := dialer.PingNode(ctx, peer.Local().Node)
|
||||||
|
// var pingErr error
|
||||||
planet.Start(ctx)
|
// if !pinged {
|
||||||
|
// pingErr = fmt.Errorf("ping to %s should have succeeded", peer.ID())
|
||||||
expectedKademliaEntries := len(planet.Satellites) + len(planet.StorageNodes)
|
// }
|
||||||
|
// return errs.Combine(pingErr, err)
|
||||||
// TODO: also use satellites
|
// })
|
||||||
peers := planet.StorageNodes
|
// }
|
||||||
|
// }
|
||||||
{ // PingNode: storage node pings all other storage nodes
|
//
|
||||||
self := planet.StorageNodes[0]
|
// { // FetchPeerIdentity: storage node fetches identity of the satellite
|
||||||
|
// self := planet.StorageNodes[0]
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
//
|
||||||
defer ctx.Check(dialer.Close)
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
||||||
|
// defer ctx.Check(dialer.Close)
|
||||||
var group errgroup.Group
|
//
|
||||||
defer ctx.Check(group.Wait)
|
// var group errgroup.Group
|
||||||
|
// defer ctx.Check(group.Wait)
|
||||||
for _, peer := range peers {
|
//
|
||||||
peer := peer
|
// group.Go(func() error {
|
||||||
group.Go(func() error {
|
// ident, err := dialer.FetchPeerIdentity(ctx, planet.Satellites[0].Local().Node)
|
||||||
pinged, err := dialer.PingNode(ctx, peer.Local().Node)
|
// if err != nil {
|
||||||
var pingErr error
|
// return fmt.Errorf("failed to fetch peer identity")
|
||||||
if !pinged {
|
// }
|
||||||
pingErr = fmt.Errorf("ping to %s should have succeeded", peer.ID())
|
// if ident.ID != planet.Satellites[0].Local().Id {
|
||||||
}
|
// return fmt.Errorf("fetched wrong identity")
|
||||||
return errs.Combine(pingErr, err)
|
// }
|
||||||
})
|
//
|
||||||
}
|
// ident, err = dialer.FetchPeerIdentityUnverified(ctx, planet.Satellites[0].Addr())
|
||||||
}
|
// if err != nil {
|
||||||
|
// return fmt.Errorf("failed to fetch peer identity from address")
|
||||||
{ // FetchPeerIdentity: storage node fetches identity of the satellite
|
// }
|
||||||
self := planet.StorageNodes[0]
|
// if ident.ID != planet.Satellites[0].Local().Id {
|
||||||
|
// return fmt.Errorf("fetched wrong identity from address")
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
// }
|
||||||
defer ctx.Check(dialer.Close)
|
//
|
||||||
|
// return nil
|
||||||
var group errgroup.Group
|
// })
|
||||||
defer ctx.Check(group.Wait)
|
// }
|
||||||
|
//
|
||||||
group.Go(func() error {
|
// { // Lookup: storage node query every node for everyone elese
|
||||||
ident, err := dialer.FetchPeerIdentity(ctx, planet.Satellites[0].Local().Node)
|
// self := planet.StorageNodes[1]
|
||||||
if err != nil {
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
||||||
return fmt.Errorf("failed to fetch peer identity")
|
// defer ctx.Check(dialer.Close)
|
||||||
}
|
//
|
||||||
if ident.ID != planet.Satellites[0].Local().Id {
|
// var group errgroup.Group
|
||||||
return fmt.Errorf("fetched wrong identity")
|
// defer ctx.Check(group.Wait)
|
||||||
}
|
//
|
||||||
|
// for _, peer := range peers {
|
||||||
ident, err = dialer.FetchPeerIdentityUnverified(ctx, planet.Satellites[0].Addr())
|
// peer := peer
|
||||||
if err != nil {
|
// group.Go(func() error {
|
||||||
return fmt.Errorf("failed to fetch peer identity from address")
|
// for _, target := range peers {
|
||||||
}
|
// errTag := fmt.Errorf("lookup peer:%s target:%s", peer.ID(), target.ID())
|
||||||
if ident.ID != planet.Satellites[0].Local().Id {
|
//
|
||||||
return fmt.Errorf("fetched wrong identity from address")
|
// selfnode := self.Local().Node
|
||||||
}
|
// results, err := dialer.Lookup(ctx, &selfnode, peer.Local().Node, target.Local().Node.Id, self.Kademlia.RoutingTable.K())
|
||||||
|
// if err != nil {
|
||||||
return nil
|
// return errs.Combine(errTag, err)
|
||||||
})
|
// }
|
||||||
}
|
//
|
||||||
|
// if containsResult(results, target.ID()) {
|
||||||
{ // Lookup: storage node query every node for everyone elese
|
// continue
|
||||||
self := planet.StorageNodes[1]
|
// }
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
//
|
||||||
defer ctx.Check(dialer.Close)
|
// // with small network we expect to return everything
|
||||||
|
// if len(results) != expectedKademliaEntries {
|
||||||
var group errgroup.Group
|
// return errs.Combine(errTag, fmt.Errorf("expected %d got %d: %s", expectedKademliaEntries, len(results), pb.NodesToIDs(results)))
|
||||||
defer ctx.Check(group.Wait)
|
// }
|
||||||
|
// return nil
|
||||||
for _, peer := range peers {
|
// }
|
||||||
peer := peer
|
// return nil
|
||||||
group.Go(func() error {
|
// })
|
||||||
for _, target := range peers {
|
// }
|
||||||
errTag := fmt.Errorf("lookup peer:%s target:%s", peer.ID(), target.ID())
|
// }
|
||||||
|
//
|
||||||
selfnode := self.Local().Node
|
// { // Lookup: storage node queries every node for missing storj.NodeID{} and storj.NodeID{255}
|
||||||
results, err := dialer.Lookup(ctx, &selfnode, peer.Local().Node, target.Local().Node.Id, self.Kademlia.RoutingTable.K())
|
// self := planet.StorageNodes[2]
|
||||||
if err != nil {
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
||||||
return errs.Combine(errTag, err)
|
// defer ctx.Check(dialer.Close)
|
||||||
}
|
//
|
||||||
|
// targets := []storj.NodeID{
|
||||||
if containsResult(results, target.ID()) {
|
// {}, // empty target
|
||||||
continue
|
// {255}, // non-empty
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
// with small network we expect to return everything
|
// var group errgroup.Group
|
||||||
if len(results) != expectedKademliaEntries {
|
// defer ctx.Check(group.Wait)
|
||||||
return errs.Combine(errTag, fmt.Errorf("expected %d got %d: %s", expectedKademliaEntries, len(results), pb.NodesToIDs(results)))
|
//
|
||||||
}
|
// for _, target := range targets {
|
||||||
return nil
|
// target := target
|
||||||
}
|
// for _, peer := range peers {
|
||||||
return nil
|
// peer := peer
|
||||||
})
|
// group.Go(func() error {
|
||||||
}
|
// errTag := fmt.Errorf("invalid lookup peer:%s target:%s", peer.ID(), target)
|
||||||
}
|
//
|
||||||
|
// selfnode := self.Local().Node
|
||||||
{ // Lookup: storage node queries every node for missing storj.NodeID{} and storj.NodeID{255}
|
// results, err := dialer.Lookup(ctx, &selfnode, peer.Local().Node, target, self.Kademlia.RoutingTable.K())
|
||||||
self := planet.StorageNodes[2]
|
// if err != nil {
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), self.Transport)
|
// return errs.Combine(errTag, err)
|
||||||
defer ctx.Check(dialer.Close)
|
// }
|
||||||
|
//
|
||||||
targets := []storj.NodeID{
|
// // with small network we expect to return everything
|
||||||
{}, // empty target
|
// if len(results) != expectedKademliaEntries {
|
||||||
{255}, // non-empty
|
// return errs.Combine(errTag, fmt.Errorf("expected %d got %d: %s", expectedKademliaEntries, len(results), pb.NodesToIDs(results)))
|
||||||
}
|
// }
|
||||||
|
// return nil
|
||||||
var group errgroup.Group
|
// })
|
||||||
defer ctx.Check(group.Wait)
|
// }
|
||||||
|
// }
|
||||||
for _, target := range targets {
|
// }
|
||||||
target := target
|
//}
|
||||||
for _, peer := range peers {
|
//
|
||||||
peer := peer
|
//func TestSlowDialerHasTimeout(t *testing.T) {
|
||||||
group.Go(func() error {
|
// ctx := testcontext.New(t)
|
||||||
errTag := fmt.Errorf("invalid lookup peer:%s target:%s", peer.ID(), target)
|
// defer ctx.Cleanup()
|
||||||
|
//
|
||||||
selfnode := self.Local().Node
|
// planet, err := testplanet.New(t, 1, 4, 0)
|
||||||
results, err := dialer.Lookup(ctx, &selfnode, peer.Local().Node, target, self.Kademlia.RoutingTable.K())
|
// require.NoError(t, err)
|
||||||
if err != nil {
|
// defer ctx.Check(planet.Shutdown)
|
||||||
return errs.Combine(errTag, err)
|
//
|
||||||
}
|
// planet.Start(ctx)
|
||||||
|
//
|
||||||
// with small network we expect to return everything
|
// // TODO: also use satellites
|
||||||
if len(results) != expectedKademliaEntries {
|
// peers := planet.StorageNodes
|
||||||
return errs.Combine(errTag, fmt.Errorf("expected %d got %d: %s", expectedKademliaEntries, len(results), pb.NodesToIDs(results)))
|
//
|
||||||
}
|
// func() { // PingNode
|
||||||
return nil
|
// self := planet.StorageNodes[0]
|
||||||
})
|
//
|
||||||
}
|
// tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
||||||
}
|
// require.NoError(t, err)
|
||||||
}
|
//
|
||||||
}
|
// self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
||||||
|
// Dial: 20 * time.Millisecond,
|
||||||
func TestSlowDialerHasTimeout(t *testing.T) {
|
// })
|
||||||
ctx := testcontext.New(t)
|
//
|
||||||
defer ctx.Cleanup()
|
// network := &transport.SimulatedNetwork{
|
||||||
|
// DialLatency: 200 * time.Second,
|
||||||
planet, err := testplanet.New(t, 1, 4, 0)
|
// BytesPerSecond: 1 * memory.KB,
|
||||||
require.NoError(t, err)
|
// }
|
||||||
defer ctx.Check(planet.Shutdown)
|
//
|
||||||
|
// slowClient := network.NewClient(self.Transport)
|
||||||
planet.Start(ctx)
|
// require.NotNil(t, slowClient)
|
||||||
|
//
|
||||||
// TODO: also use satellites
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), slowClient)
|
||||||
peers := planet.StorageNodes
|
// defer ctx.Check(dialer.Close)
|
||||||
|
//
|
||||||
func() { // PingNode
|
// var group errgroup.Group
|
||||||
self := planet.StorageNodes[0]
|
// defer ctx.Check(group.Wait)
|
||||||
|
//
|
||||||
tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
// for _, peer := range peers {
|
||||||
require.NoError(t, err)
|
// peer := peer
|
||||||
|
// group.Go(func() error {
|
||||||
self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
// _, err := dialer.PingNode(ctx, peer.Local().Node)
|
||||||
Dial: 20 * time.Millisecond,
|
// if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
||||||
})
|
// return errs.New("invalid error: %v", err)
|
||||||
|
// }
|
||||||
network := &transport.SimulatedNetwork{
|
// return nil
|
||||||
DialLatency: 200 * time.Second,
|
// })
|
||||||
BytesPerSecond: 1 * memory.KB,
|
// }
|
||||||
}
|
// }()
|
||||||
|
//
|
||||||
slowClient := network.NewClient(self.Transport)
|
// func() { // FetchPeerIdentity
|
||||||
require.NotNil(t, slowClient)
|
// self := planet.StorageNodes[1]
|
||||||
|
//
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), slowClient)
|
// tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
||||||
defer ctx.Check(dialer.Close)
|
// require.NoError(t, err)
|
||||||
|
//
|
||||||
var group errgroup.Group
|
// self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
||||||
defer ctx.Check(group.Wait)
|
// Dial: 20 * time.Millisecond,
|
||||||
|
// })
|
||||||
for _, peer := range peers {
|
//
|
||||||
peer := peer
|
// network := &transport.SimulatedNetwork{
|
||||||
group.Go(func() error {
|
// DialLatency: 200 * time.Second,
|
||||||
_, err := dialer.PingNode(ctx, peer.Local().Node)
|
// BytesPerSecond: 1 * memory.KB,
|
||||||
if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
// }
|
||||||
return errs.New("invalid error: %v", err)
|
//
|
||||||
}
|
// slowClient := network.NewClient(self.Transport)
|
||||||
return nil
|
// require.NotNil(t, slowClient)
|
||||||
})
|
//
|
||||||
}
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), slowClient)
|
||||||
}()
|
// defer ctx.Check(dialer.Close)
|
||||||
|
//
|
||||||
func() { // FetchPeerIdentity
|
// var group errgroup.Group
|
||||||
self := planet.StorageNodes[1]
|
// defer ctx.Check(group.Wait)
|
||||||
|
//
|
||||||
tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
// group.Go(func() error {
|
||||||
require.NoError(t, err)
|
// _, err := dialer.FetchPeerIdentity(ctx, planet.Satellites[0].Local().Node)
|
||||||
|
// if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
||||||
self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
// return errs.New("invalid error: %v", err)
|
||||||
Dial: 20 * time.Millisecond,
|
// }
|
||||||
})
|
// _, err = dialer.FetchPeerIdentityUnverified(ctx, planet.Satellites[0].Addr())
|
||||||
|
// if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
||||||
network := &transport.SimulatedNetwork{
|
// return errs.New("invalid error: %v", err)
|
||||||
DialLatency: 200 * time.Second,
|
// }
|
||||||
BytesPerSecond: 1 * memory.KB,
|
// return nil
|
||||||
}
|
// })
|
||||||
|
// }()
|
||||||
slowClient := network.NewClient(self.Transport)
|
//
|
||||||
require.NotNil(t, slowClient)
|
// func() { // Lookup
|
||||||
|
// self := planet.StorageNodes[2]
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), slowClient)
|
//
|
||||||
defer ctx.Check(dialer.Close)
|
// tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
||||||
|
// require.NoError(t, err)
|
||||||
var group errgroup.Group
|
//
|
||||||
defer ctx.Check(group.Wait)
|
// self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
||||||
|
// Dial: 20 * time.Millisecond,
|
||||||
group.Go(func() error {
|
// })
|
||||||
_, err := dialer.FetchPeerIdentity(ctx, planet.Satellites[0].Local().Node)
|
//
|
||||||
if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
// network := &transport.SimulatedNetwork{
|
||||||
return errs.New("invalid error: %v", err)
|
// DialLatency: 200 * time.Second,
|
||||||
}
|
// BytesPerSecond: 1 * memory.KB,
|
||||||
_, err = dialer.FetchPeerIdentityUnverified(ctx, planet.Satellites[0].Addr())
|
// }
|
||||||
if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
//
|
||||||
return errs.New("invalid error: %v", err)
|
// slowClient := network.NewClient(self.Transport)
|
||||||
}
|
// require.NotNil(t, slowClient)
|
||||||
return nil
|
//
|
||||||
})
|
// dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), slowClient)
|
||||||
}()
|
// defer ctx.Check(dialer.Close)
|
||||||
|
//
|
||||||
func() { // Lookup
|
// var group errgroup.Group
|
||||||
self := planet.StorageNodes[2]
|
// defer ctx.Check(group.Wait)
|
||||||
|
//
|
||||||
tlsOpts, err := tlsopts.NewOptions(self.Identity, tlsopts.Config{}, nil)
|
// for _, peer := range peers {
|
||||||
require.NoError(t, err)
|
// peer := peer
|
||||||
|
// group.Go(func() error {
|
||||||
self.Transport = transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
// for _, target := range peers {
|
||||||
Dial: 20 * time.Millisecond,
|
// selfnode := self.Local().Node
|
||||||
})
|
// _, err := dialer.Lookup(ctx, &selfnode, peer.Local().Node, target.Local().Node.Id, self.Kademlia.RoutingTable.K())
|
||||||
|
// if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
||||||
network := &transport.SimulatedNetwork{
|
// return errs.New("invalid error: %v (peer:%s target:%s)", err, peer.ID(), target.ID())
|
||||||
DialLatency: 200 * time.Second,
|
// }
|
||||||
BytesPerSecond: 1 * memory.KB,
|
// }
|
||||||
}
|
// return nil
|
||||||
|
// })
|
||||||
slowClient := network.NewClient(self.Transport)
|
// }
|
||||||
require.NotNil(t, slowClient)
|
// }()
|
||||||
|
//}
|
||||||
dialer := kademliaclient.NewDialer(zaptest.NewLogger(t), slowClient)
|
//
|
||||||
defer ctx.Check(dialer.Close)
|
//func containsResult(nodes []*pb.Node, target storj.NodeID) bool {
|
||||||
|
// for _, node := range nodes {
|
||||||
var group errgroup.Group
|
// if node.Id == target {
|
||||||
defer ctx.Check(group.Wait)
|
// return true
|
||||||
|
// }
|
||||||
for _, peer := range peers {
|
// }
|
||||||
peer := peer
|
// return false
|
||||||
group.Go(func() error {
|
//}
|
||||||
for _, target := range peers {
|
|
||||||
selfnode := self.Local().Node
|
|
||||||
_, err := dialer.Lookup(ctx, &selfnode, peer.Local().Node, target.Local().Node.Id, self.Kademlia.RoutingTable.K())
|
|
||||||
if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
|
||||||
return errs.New("invalid error: %v (peer:%s target:%s)", err, peer.ID(), target.ID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsResult(nodes []*pb.Node, target storj.NodeID) bool {
|
|
||||||
for _, node := range nodes {
|
|
||||||
if node.Id == target {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
// Copyright (C) 2019 Storj Labs, Inc.
|
|
||||||
// See LICENSE for copying information.
|
|
||||||
|
|
||||||
package kademlia_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"storj.io/storj/internal/testcontext"
|
|
||||||
"storj.io/storj/internal/testplanet"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLookupNodes(t *testing.T) {
|
|
||||||
ctx := testcontext.New(t)
|
|
||||||
defer ctx.Cleanup()
|
|
||||||
|
|
||||||
planet, err := testplanet.New(t, 1, 8, 0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer ctx.Check(planet.Shutdown)
|
|
||||||
|
|
||||||
planet.Start(ctx)
|
|
||||||
|
|
||||||
k := planet.Satellites[0].Kademlia.Service
|
|
||||||
k.WaitForBootstrap() // redundant, but leaving here to be clear
|
|
||||||
|
|
||||||
seen, err := k.DumpNodes(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.NotEqual(t, len(seen), 0)
|
|
||||||
assert.NotNil(t, seen)
|
|
||||||
|
|
||||||
target := seen[0]
|
|
||||||
found, err := k.FindNode(ctx, target.Id)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, target.Id, found.Id)
|
|
||||||
}
|
|
@ -1,98 +0,0 @@
|
|||||||
// Copyright (C) 2019 Storj Labs, Inc.
|
|
||||||
// See LICENSE for copying information.
|
|
||||||
|
|
||||||
package kademlia_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
|
|
||||||
"storj.io/storj/bootstrap"
|
|
||||||
"storj.io/storj/internal/testcontext"
|
|
||||||
"storj.io/storj/internal/testplanet"
|
|
||||||
"storj.io/storj/storagenode"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMergePlanets(t *testing.T) {
|
|
||||||
ctx := testcontext.New(t)
|
|
||||||
defer ctx.Cleanup()
|
|
||||||
|
|
||||||
log := zaptest.NewLogger(t)
|
|
||||||
|
|
||||||
alpha, err := testplanet.NewCustom(log.Named("A"), testplanet.Config{
|
|
||||||
SatelliteCount: 2,
|
|
||||||
StorageNodeCount: 5,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
beta, err := testplanet.NewCustom(log.Named("B"), testplanet.Config{
|
|
||||||
SatelliteCount: 2,
|
|
||||||
StorageNodeCount: 5,
|
|
||||||
Identities: alpha.Identities(), // avoid using the same pregenerated identities
|
|
||||||
Reconfigure: testplanet.Reconfigure{
|
|
||||||
Bootstrap: func(index int, config *bootstrap.Config) {
|
|
||||||
config.Kademlia.BootstrapAddr = alpha.Bootstrap.Addr()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
defer ctx.Check(alpha.Shutdown)
|
|
||||||
defer ctx.Check(beta.Shutdown)
|
|
||||||
|
|
||||||
// during planet.Start
|
|
||||||
// every satellite & storage node looks itself up from bootstrap
|
|
||||||
// every storage node pings bootstrap
|
|
||||||
// every satellite pings every storage node
|
|
||||||
alpha.Start(ctx)
|
|
||||||
beta.Start(ctx)
|
|
||||||
|
|
||||||
allSatellites := []*testplanet.SatelliteSystem{}
|
|
||||||
allSatellites = append(allSatellites, alpha.Satellites...)
|
|
||||||
allSatellites = append(allSatellites, beta.Satellites...)
|
|
||||||
|
|
||||||
// make satellites refresh buckets 10 times
|
|
||||||
var group errgroup.Group
|
|
||||||
for _, satellite := range allSatellites {
|
|
||||||
satellite := satellite
|
|
||||||
group.Go(func() error {
|
|
||||||
satellite.Kademlia.Service.SetBucketRefreshThreshold(0)
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
satellite.Kademlia.Service.RefreshBuckets.TriggerWait()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
_ = group.Wait()
|
|
||||||
|
|
||||||
test := func(tag string, satellites []*testplanet.SatelliteSystem, storageNodes []*storagenode.Peer) string {
|
|
||||||
found, missing := 0, 0
|
|
||||||
for _, satellite := range satellites {
|
|
||||||
for _, storageNode := range storageNodes {
|
|
||||||
node, err := satellite.Overlay.Service.Get(ctx, storageNode.ID())
|
|
||||||
if assert.NoError(t, err, tag) {
|
|
||||||
found++
|
|
||||||
assert.Equal(t, storageNode.Addr(), node.Address.Address, tag)
|
|
||||||
} else {
|
|
||||||
missing++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s: Found %v out of %v (missing %v)", tag, found, found+missing, missing)
|
|
||||||
}
|
|
||||||
|
|
||||||
sumAA := test("A-A", alpha.Satellites, alpha.StorageNodes)
|
|
||||||
sumAB := test("A-B", alpha.Satellites, beta.StorageNodes)
|
|
||||||
sumBB := test("B-B", beta.Satellites, beta.StorageNodes)
|
|
||||||
sumBA := test("B-A", beta.Satellites, alpha.StorageNodes)
|
|
||||||
|
|
||||||
t.Log(sumAA)
|
|
||||||
t.Log(sumAB)
|
|
||||||
t.Log(sumBB)
|
|
||||||
t.Log(sumBA)
|
|
||||||
}
|
|
@ -28,8 +28,6 @@ message CheckInResponse {
|
|||||||
string ping_error_message = 2;
|
string ping_error_message = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ContactPingRequest {
|
message ContactPingRequest {}
|
||||||
}
|
|
||||||
|
|
||||||
message ContactPingResponse {
|
message ContactPingResponse {}
|
||||||
}
|
|
@ -398,7 +398,6 @@ func TestVerifierOfflineNode(t *testing.T) {
|
|||||||
queue := audits.Queue
|
queue := audits.Queue
|
||||||
|
|
||||||
audits.Worker.Loop.Pause()
|
audits.Worker.Loop.Pause()
|
||||||
satellite.Discovery.Service.Discovery.Pause()
|
|
||||||
|
|
||||||
ul := planet.Uplinks[0]
|
ul := planet.Uplinks[0]
|
||||||
testData := testrand.Bytes(8 * memory.KiB)
|
testData := testrand.Bytes(8 * memory.KiB)
|
||||||
|
@ -51,3 +51,20 @@ func TestSatelliteContactEndpoint(t *testing.T) {
|
|||||||
require.Equal(t, ident.PeerIdentity(), peerID)
|
require.Equal(t, ident.PeerIdentity(), peerID)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFetchInfo(t *testing.T) {
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
nodeDossier := planet.StorageNodes[0].Local()
|
||||||
|
node := pb.Node{Id: nodeDossier.Id, Address: nodeDossier.Address}
|
||||||
|
|
||||||
|
resp, err := planet.Satellites[0].Contact.Service.FetchInfo(ctx, node)
|
||||||
|
require.NotNil(t, resp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, nodeDossier.Type, resp.Type)
|
||||||
|
require.Equal(t, &nodeDossier.Operator, resp.Operator)
|
||||||
|
require.Equal(t, &nodeDossier.Capacity, resp.Capacity)
|
||||||
|
require.Equal(t, nodeDossier.Version.GetVersion(), resp.Version.GetVersion())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
42
satellite/contact/kademlia.go
Normal file
42
satellite/contact/kademlia.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright (C) 2019 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package contact
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"storj.io/storj/pkg/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KademliaEndpoint implements the NodesServer Interface for backwards compatibility
|
||||||
|
type KademliaEndpoint struct {
|
||||||
|
log *zap.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKademliaEndpoint returns a new endpoint
|
||||||
|
func NewKademliaEndpoint(log *zap.Logger) *KademliaEndpoint {
|
||||||
|
return &KademliaEndpoint{
|
||||||
|
log: log,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query is a node to node communication query
|
||||||
|
func (endpoint *KademliaEndpoint) Query(ctx context.Context, req *pb.QueryRequest) (_ *pb.QueryResponse, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
return &pb.QueryResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping provides an easy way to verify a node is online and accepting requests
|
||||||
|
func (endpoint *KademliaEndpoint) Ping(ctx context.Context, req *pb.PingRequest) (_ *pb.PingResponse, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
return &pb.PingResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestInfo returns the node info
|
||||||
|
func (endpoint *KademliaEndpoint) RequestInfo(ctx context.Context, req *pb.InfoRequest) (_ *pb.InfoResponse, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
return &pb.InfoResponse{}, nil
|
||||||
|
}
|
@ -4,10 +4,15 @@
|
|||||||
package contact
|
package contact
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/grpc"
|
||||||
"gopkg.in/spacemonkeygo/monkit.v2"
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||||
|
|
||||||
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/transport"
|
"storj.io/storj/pkg/transport"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
@ -17,24 +22,77 @@ var Error = errs.Class("contact")
|
|||||||
|
|
||||||
var mon = monkit.Package()
|
var mon = monkit.Package()
|
||||||
|
|
||||||
|
// Config contains configurable values for contact service
|
||||||
|
type Config struct {
|
||||||
|
ExternalAddress string `user:"true" help:"the public address of the node, useful for nodes behind NAT" default:""`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn represents a connection
|
||||||
|
type Conn struct {
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
client pb.NodesClient
|
||||||
|
}
|
||||||
|
|
||||||
// Service is the contact service between storage nodes and satellites.
|
// Service is the contact service between storage nodes and satellites.
|
||||||
// It is responsible for updating general node information like address, capacity, and uptime.
|
// It is responsible for updating general node information like address, capacity, and uptime.
|
||||||
// It is also responsible for updating peer identity information for verifying signatures from that node.
|
// It is also responsible for updating peer identity information for verifying signatures from that node.
|
||||||
//
|
//
|
||||||
// architecture: Service
|
// architecture: Service
|
||||||
type Service struct {
|
type Service struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
|
mutex sync.Mutex
|
||||||
|
self *overlay.NodeDossier
|
||||||
|
|
||||||
overlay *overlay.Service
|
overlay *overlay.Service
|
||||||
peerIDs overlay.PeerIdentities
|
peerIDs overlay.PeerIdentities
|
||||||
transport transport.Client
|
transport transport.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService creates a new contact service.
|
// NewService creates a new contact service.
|
||||||
func NewService(log *zap.Logger, overlay *overlay.Service, peerIDs overlay.PeerIdentities, transport transport.Client) *Service {
|
func NewService(log *zap.Logger, self *overlay.NodeDossier, overlay *overlay.Service, peerIDs overlay.PeerIdentities, transport transport.Client) *Service {
|
||||||
return &Service{
|
return &Service{
|
||||||
log: log,
|
log: log,
|
||||||
|
self: self,
|
||||||
overlay: overlay,
|
overlay: overlay,
|
||||||
peerIDs: peerIDs,
|
peerIDs: peerIDs,
|
||||||
transport: transport,
|
transport: transport,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Local returns the satellite node dossier
|
||||||
|
func (service *Service) Local() overlay.NodeDossier {
|
||||||
|
service.mutex.Lock()
|
||||||
|
defer service.mutex.Unlock()
|
||||||
|
return *service.self
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchInfo connects to a node and returns its node info.
|
||||||
|
func (service *Service) FetchInfo(ctx context.Context, target pb.Node) (_ *pb.InfoResponse, err error) {
|
||||||
|
conn, err := service.dialNode(ctx, target)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := conn.client.RequestInfo(ctx, &pb.InfoRequest{})
|
||||||
|
|
||||||
|
return resp, errs.Combine(err, conn.close())
|
||||||
|
}
|
||||||
|
|
||||||
|
// dialNode dials the specified node.
|
||||||
|
func (service *Service) dialNode(ctx context.Context, target pb.Node) (_ *Conn, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
grpcconn, err := service.transport.DialNode(ctx, &target)
|
||||||
|
return &Conn{
|
||||||
|
conn: grpcconn,
|
||||||
|
client: pb.NewNodesClient(grpcconn),
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// close disconnects this connection.
|
||||||
|
func (conn *Conn) close() error {
|
||||||
|
return conn.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes resources
|
||||||
|
func (service *Service) Close() error { return nil }
|
||||||
|
@ -5,7 +5,6 @@ package discovery
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
@ -14,8 +13,7 @@ import (
|
|||||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||||
|
|
||||||
"storj.io/storj/internal/sync2"
|
"storj.io/storj/internal/sync2"
|
||||||
"storj.io/storj/pkg/kademlia"
|
"storj.io/storj/satellite/contact"
|
||||||
"storj.io/storj/pkg/storj"
|
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -29,7 +27,6 @@ var (
|
|||||||
// Config loads on the configuration values for the cache
|
// Config loads on the configuration values for the cache
|
||||||
type Config struct {
|
type Config struct {
|
||||||
RefreshInterval time.Duration `help:"the interval at which the cache refreshes itself in seconds" default:"1s"`
|
RefreshInterval time.Duration `help:"the interval at which the cache refreshes itself in seconds" default:"1s"`
|
||||||
DiscoveryInterval time.Duration `help:"the interval at which the satellite attempts to find new nodes via random node ID lookups" default:"1s"`
|
|
||||||
RefreshLimit int `help:"the amount of nodes read from the overlay in a single pagination call" default:"100"`
|
RefreshLimit int `help:"the amount of nodes read from the overlay in a single pagination call" default:"100"`
|
||||||
RefreshConcurrency int `help:"the amount of nodes refreshed in parallel" default:"8"`
|
RefreshConcurrency int `help:"the amount of nodes refreshed in parallel" default:"8"`
|
||||||
}
|
}
|
||||||
@ -38,38 +35,33 @@ type Config struct {
|
|||||||
//
|
//
|
||||||
// architecture: Chore
|
// architecture: Chore
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
cache *overlay.Service
|
cache *overlay.Service
|
||||||
kad *kademlia.Kademlia
|
contact *contact.Service
|
||||||
|
|
||||||
refreshLimit int
|
refreshLimit int
|
||||||
refreshConcurrency int
|
refreshConcurrency int
|
||||||
|
|
||||||
Refresh sync2.Cycle
|
Refresh sync2.Cycle
|
||||||
Discovery sync2.Cycle
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new discovery service.
|
// New returns a new discovery service.
|
||||||
func New(logger *zap.Logger, ol *overlay.Service, kad *kademlia.Kademlia, config Config) *Discovery {
|
func New(logger *zap.Logger, ol *overlay.Service, contact *contact.Service, config Config) *Discovery {
|
||||||
discovery := &Discovery{
|
discovery := &Discovery{
|
||||||
log: logger,
|
log: logger,
|
||||||
cache: ol,
|
cache: ol,
|
||||||
kad: kad,
|
contact: contact,
|
||||||
|
|
||||||
refreshLimit: config.RefreshLimit,
|
refreshLimit: config.RefreshLimit,
|
||||||
refreshConcurrency: config.RefreshConcurrency,
|
refreshConcurrency: config.RefreshConcurrency,
|
||||||
}
|
}
|
||||||
|
|
||||||
discovery.Refresh.SetInterval(config.RefreshInterval)
|
discovery.Refresh.SetInterval(config.RefreshInterval)
|
||||||
discovery.Discovery.SetInterval(config.DiscoveryInterval)
|
|
||||||
|
|
||||||
return discovery
|
return discovery
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes resources
|
// Close closes resources
|
||||||
func (discovery *Discovery) Close() error {
|
func (discovery *Discovery) Close() error {
|
||||||
discovery.Refresh.Close()
|
discovery.Refresh.Close()
|
||||||
discovery.Discovery.Close()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,13 +77,6 @@ func (discovery *Discovery) Run(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
discovery.Discovery.Start(ctx, &group, func(ctx context.Context) error {
|
|
||||||
err := discovery.discover(ctx)
|
|
||||||
if err != nil {
|
|
||||||
discovery.log.Error("error with cache discovery: ", zap.Error(err))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return group.Wait()
|
return group.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,8 +104,7 @@ func (discovery *Discovery) refresh(ctx context.Context) (err error) {
|
|||||||
node := node
|
node := node
|
||||||
|
|
||||||
limiter.Go(ctx, func() {
|
limiter.Go(ctx, func() {
|
||||||
// NB: FetchInfo updates node uptime already
|
info, err := discovery.contact.FetchInfo(ctx, *node)
|
||||||
info, err := discovery.kad.FetchInfo(ctx, *node)
|
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -144,27 +128,3 @@ func (discovery *Discovery) refresh(ctx context.Context) (err error) {
|
|||||||
limiter.Wait()
|
limiter.Wait()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discovery runs lookups for random node ID's to find new nodes in the network
|
|
||||||
func (discovery *Discovery) discover(ctx context.Context) (err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
r, err := randomID()
|
|
||||||
if err != nil {
|
|
||||||
return Error.Wrap(err)
|
|
||||||
}
|
|
||||||
_, err = discovery.kad.FindNode(ctx, r)
|
|
||||||
if err != nil && !kademlia.NodeNotFound.Has(err) {
|
|
||||||
return Error.Wrap(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func randomID() (storj.NodeID, error) {
|
|
||||||
b := make([]byte, 32)
|
|
||||||
_, err := rand.Read(b)
|
|
||||||
if err != nil {
|
|
||||||
return storj.NodeID{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
return storj.NodeIDFromBytes(b)
|
|
||||||
}
|
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"storj.io/storj/internal/testcontext"
|
"storj.io/storj/internal/testcontext"
|
||||||
"storj.io/storj/internal/testplanet"
|
"storj.io/storj/internal/testplanet"
|
||||||
@ -26,35 +25,3 @@ func TestCache_Refresh(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCache_Discovery(t *testing.T) {
|
|
||||||
testplanet.Run(t, testplanet.Config{
|
|
||||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
||||||
satellite := planet.Satellites[0]
|
|
||||||
testnode := planet.StorageNodes[0]
|
|
||||||
offlineID := testnode.ID()
|
|
||||||
|
|
||||||
satellite.Kademlia.Service.RefreshBuckets.Pause()
|
|
||||||
|
|
||||||
satellite.Discovery.Service.Refresh.Pause()
|
|
||||||
satellite.Discovery.Service.Discovery.Pause()
|
|
||||||
|
|
||||||
overlay := satellite.Overlay.Service
|
|
||||||
|
|
||||||
// mark node as offline in overlay
|
|
||||||
_, err := overlay.UpdateUptime(ctx, offlineID, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
node, err := overlay.Get(ctx, offlineID)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, overlay.IsOnline(node))
|
|
||||||
|
|
||||||
satellite.Discovery.Service.Discovery.TriggerWait()
|
|
||||||
|
|
||||||
found, err := overlay.Get(ctx, offlineID)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, offlineID, found.Id)
|
|
||||||
assert.True(t, overlay.IsOnline(found))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -5,12 +5,9 @@ package satellite
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"net/mail"
|
"net/mail"
|
||||||
"net/smtp"
|
"net/smtp"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -23,7 +20,6 @@ import (
|
|||||||
"storj.io/storj/internal/version"
|
"storj.io/storj/internal/version"
|
||||||
"storj.io/storj/pkg/auth/grpcauth"
|
"storj.io/storj/pkg/auth/grpcauth"
|
||||||
"storj.io/storj/pkg/identity"
|
"storj.io/storj/pkg/identity"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/peertls/extensions"
|
"storj.io/storj/pkg/peertls/extensions"
|
||||||
"storj.io/storj/pkg/peertls/tlsopts"
|
"storj.io/storj/pkg/peertls/tlsopts"
|
||||||
@ -61,8 +57,6 @@ import (
|
|||||||
"storj.io/storj/satellite/repair/repairer"
|
"storj.io/storj/satellite/repair/repairer"
|
||||||
"storj.io/storj/satellite/rewards"
|
"storj.io/storj/satellite/rewards"
|
||||||
"storj.io/storj/satellite/vouchers"
|
"storj.io/storj/satellite/vouchers"
|
||||||
"storj.io/storj/storage"
|
|
||||||
"storj.io/storj/storage/boltdb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var mon = monkit.Package()
|
var mon = monkit.Package()
|
||||||
@ -112,7 +106,7 @@ type Config struct {
|
|||||||
Identity identity.Config
|
Identity identity.Config
|
||||||
Server server.Config
|
Server server.Config
|
||||||
|
|
||||||
Kademlia kademlia.Config
|
Contact contact.Config
|
||||||
Overlay overlay.Config
|
Overlay overlay.Config
|
||||||
Discovery discovery.Config
|
Discovery discovery.Config
|
||||||
|
|
||||||
@ -155,18 +149,10 @@ type Peer struct {
|
|||||||
Version *version.Service
|
Version *version.Service
|
||||||
|
|
||||||
// services and endpoints
|
// services and endpoints
|
||||||
Kademlia struct {
|
|
||||||
kdb, ndb, adb storage.KeyValueStore // TODO: move these into DB
|
|
||||||
|
|
||||||
RoutingTable *kademlia.RoutingTable
|
|
||||||
Service *kademlia.Kademlia
|
|
||||||
Endpoint *kademlia.Endpoint
|
|
||||||
Inspector *kademlia.Inspector
|
|
||||||
}
|
|
||||||
|
|
||||||
Contact struct {
|
Contact struct {
|
||||||
Service *contact.Service
|
Service *contact.Service
|
||||||
Endpoint *contact.Endpoint
|
Endpoint *contact.Endpoint
|
||||||
|
KEndpoint *contact.KademliaEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
Overlay struct {
|
Overlay struct {
|
||||||
@ -295,19 +281,17 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
|
|
||||||
peer.Overlay.DB = overlay.NewCombinedCache(peer.DB.OverlayCache())
|
peer.Overlay.DB = overlay.NewCombinedCache(peer.DB.OverlayCache())
|
||||||
peer.Overlay.Service = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
|
peer.Overlay.Service = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
|
||||||
peer.Transport = peer.Transport.WithObservers(peer.Overlay.Service)
|
|
||||||
|
|
||||||
peer.Overlay.Inspector = overlay.NewInspector(peer.Overlay.Service)
|
peer.Overlay.Inspector = overlay.NewInspector(peer.Overlay.Service)
|
||||||
pb.RegisterOverlayInspectorServer(peer.Server.PrivateGRPC(), peer.Overlay.Inspector)
|
pb.RegisterOverlayInspectorServer(peer.Server.PrivateGRPC(), peer.Overlay.Inspector)
|
||||||
pb.DRPCRegisterOverlayInspector(peer.Server.PrivateDRPC(), peer.Overlay.Inspector)
|
pb.DRPCRegisterOverlayInspector(peer.Server.PrivateDRPC(), peer.Overlay.Inspector)
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // setup kademlia
|
{ // setup contact service
|
||||||
log.Debug("Setting up Kademlia")
|
log.Debug("Setting up contact service")
|
||||||
config := config.Kademlia
|
c := config.Contact
|
||||||
// TODO: move this setup logic into kademlia package
|
if c.ExternalAddress == "" {
|
||||||
if config.ExternalAddress == "" {
|
c.ExternalAddress = peer.Addr()
|
||||||
config.ExternalAddress = peer.Addr()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pbVersion, err := versionInfo.Proto()
|
pbVersion, err := versionInfo.Proto()
|
||||||
@ -319,67 +303,25 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
Node: pb.Node{
|
Node: pb.Node{
|
||||||
Id: peer.ID(),
|
Id: peer.ID(),
|
||||||
Address: &pb.NodeAddress{
|
Address: &pb.NodeAddress{
|
||||||
Address: config.ExternalAddress,
|
Address: c.ExternalAddress,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Type: pb.NodeType_SATELLITE,
|
Type: pb.NodeType_SATELLITE,
|
||||||
Operator: pb.NodeOperator{
|
|
||||||
Email: config.Operator.Email,
|
|
||||||
Wallet: config.Operator.Wallet,
|
|
||||||
},
|
|
||||||
Version: *pbVersion,
|
Version: *pbVersion,
|
||||||
}
|
}
|
||||||
|
peer.Contact.Service = contact.NewService(peer.Log.Named("contact:service"), self, peer.Overlay.Service, peer.DB.PeerIdentities(), peer.Transport)
|
||||||
{ // setup routing table
|
|
||||||
// TODO: clean this up, should be part of database
|
|
||||||
log.Debug("Setting up routing table")
|
|
||||||
bucketIdentifier := peer.ID().String()[:5] // need a way to differentiate between nodes if running more than one simultaneously
|
|
||||||
dbpath := filepath.Join(config.DBPath, fmt.Sprintf("kademlia_%s.db", bucketIdentifier))
|
|
||||||
|
|
||||||
if err := os.MkdirAll(config.DBPath, 0777); err != nil && !os.IsExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dbs, err := boltdb.NewShared(dbpath, kademlia.KademliaBucket, kademlia.NodeBucket, kademlia.AntechamberBucket)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Combine(err, peer.Close())
|
|
||||||
}
|
|
||||||
peer.Kademlia.kdb, peer.Kademlia.ndb, peer.Kademlia.adb = dbs[0], dbs[1], dbs[2]
|
|
||||||
|
|
||||||
peer.Kademlia.RoutingTable, err = kademlia.NewRoutingTable(peer.Log.Named("routing"), self, peer.Kademlia.kdb, peer.Kademlia.ndb, peer.Kademlia.adb, &config.RoutingTableConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Combine(err, peer.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.Transport = peer.Transport.WithObservers(peer.Kademlia.RoutingTable)
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.Kademlia.Service, err = kademlia.NewService(peer.Log.Named("kademlia"), peer.Transport, peer.Kademlia.RoutingTable, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Combine(err, peer.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, nil, peer.Kademlia.RoutingTable, nil)
|
|
||||||
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Kademlia.Endpoint)
|
|
||||||
pb.DRPCRegisterNodes(peer.Server.DRPC(), peer.Kademlia.Endpoint)
|
|
||||||
|
|
||||||
peer.Kademlia.Inspector = kademlia.NewInspector(peer.Kademlia.Service, peer.Identity)
|
|
||||||
pb.RegisterKadInspectorServer(peer.Server.PrivateGRPC(), peer.Kademlia.Inspector)
|
|
||||||
pb.DRPCRegisterKadInspector(peer.Server.PrivateDRPC(), peer.Kademlia.Inspector)
|
|
||||||
}
|
|
||||||
|
|
||||||
{ // setup contact service
|
|
||||||
log.Debug("Setting up contact service")
|
|
||||||
peer.Contact.Service = contact.NewService(peer.Log.Named("contact"), peer.Overlay.Service, peer.DB.PeerIdentities(), peer.Transport)
|
|
||||||
peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Contact.Service)
|
peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Contact.Service)
|
||||||
|
peer.Contact.KEndpoint = contact.NewKademliaEndpoint(peer.Log.Named("contact:nodes_service_endpoint"))
|
||||||
pb.RegisterNodeServer(peer.Server.GRPC(), peer.Contact.Endpoint)
|
pb.RegisterNodeServer(peer.Server.GRPC(), peer.Contact.Endpoint)
|
||||||
|
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Contact.KEndpoint)
|
||||||
pb.DRPCRegisterNode(peer.Server.DRPC(), peer.Contact.Endpoint)
|
pb.DRPCRegisterNode(peer.Server.DRPC(), peer.Contact.Endpoint)
|
||||||
|
pb.DRPCRegisterNodes(peer.Server.DRPC(), peer.Contact.KEndpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // setup discovery
|
{ // setup discovery
|
||||||
log.Debug("Setting up discovery")
|
log.Debug("Setting up discovery")
|
||||||
config := config.Discovery
|
config := config.Discovery
|
||||||
peer.Discovery.Service = discovery.New(peer.Log.Named("discovery"), peer.Overlay.Service, peer.Kademlia.Service, config)
|
peer.Discovery.Service = discovery.New(peer.Log.Named("discovery"), peer.Overlay.Service, peer.Contact.Service, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // setup vouchers
|
{ // setup vouchers
|
||||||
@ -424,7 +366,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
config.Orders.Expiration,
|
config.Orders.Expiration,
|
||||||
&pb.NodeAddress{
|
&pb.NodeAddress{
|
||||||
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
||||||
Address: config.Kademlia.ExternalAddress,
|
Address: config.Contact.ExternalAddress,
|
||||||
},
|
},
|
||||||
config.Repairer.MaxExcessRateOptimalThreshold,
|
config.Repairer.MaxExcessRateOptimalThreshold,
|
||||||
)
|
)
|
||||||
@ -731,12 +673,6 @@ func (peer *Peer) Run(ctx context.Context) (err error) {
|
|||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(peer.Version.Run(ctx))
|
return errs2.IgnoreCanceled(peer.Version.Run(ctx))
|
||||||
})
|
})
|
||||||
group.Go(func() error {
|
|
||||||
return errs2.IgnoreCanceled(peer.Kademlia.Service.Bootstrap(ctx))
|
|
||||||
})
|
|
||||||
group.Go(func() error {
|
|
||||||
return errs2.IgnoreCanceled(peer.Kademlia.Service.Run(ctx))
|
|
||||||
})
|
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(peer.Discovery.Service.Run(ctx))
|
return errs2.IgnoreCanceled(peer.Discovery.Service.Run(ctx))
|
||||||
})
|
})
|
||||||
@ -846,24 +782,13 @@ func (peer *Peer) Close() error {
|
|||||||
errlist.Add(peer.Discovery.Service.Close())
|
errlist.Add(peer.Discovery.Service.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: add kademlia.Endpoint for consistency
|
if peer.Contact.Service != nil {
|
||||||
if peer.Kademlia.Service != nil {
|
errlist.Add(peer.Contact.Service.Close())
|
||||||
errlist.Add(peer.Kademlia.Service.Close())
|
|
||||||
}
|
}
|
||||||
if peer.Kademlia.RoutingTable != nil {
|
|
||||||
errlist.Add(peer.Kademlia.RoutingTable.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
if peer.Overlay.Service != nil {
|
if peer.Overlay.Service != nil {
|
||||||
errlist.Add(peer.Overlay.Service.Close())
|
errlist.Add(peer.Overlay.Service.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
if peer.Kademlia.ndb != nil || peer.Kademlia.kdb != nil || peer.Kademlia.adb != nil {
|
|
||||||
errlist.Add(peer.Kademlia.kdb.Close())
|
|
||||||
errlist.Add(peer.Kademlia.ndb.Close())
|
|
||||||
errlist.Add(peer.Kademlia.adb.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
return errlist.Err()
|
return errlist.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -871,7 +796,7 @@ func (peer *Peer) Close() error {
|
|||||||
func (peer *Peer) ID() storj.NodeID { return peer.Identity.ID }
|
func (peer *Peer) ID() storj.NodeID { return peer.Identity.ID }
|
||||||
|
|
||||||
// Local returns the peer local node info.
|
// Local returns the peer local node info.
|
||||||
func (peer *Peer) Local() overlay.NodeDossier { return peer.Kademlia.RoutingTable.Local() }
|
func (peer *Peer) Local() overlay.NodeDossier { return peer.Contact.Service.Local() }
|
||||||
|
|
||||||
// Addr returns the public address.
|
// Addr returns the public address.
|
||||||
func (peer *Peer) Addr() string { return peer.Server.Addr().String() }
|
func (peer *Peer) Addr() string { return peer.Server.Addr().String() }
|
||||||
|
@ -52,7 +52,6 @@ func TestDataRepair(t *testing.T) {
|
|||||||
uplinkPeer := planet.Uplinks[0]
|
uplinkPeer := planet.Uplinks[0]
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
satellite.Discovery.Service.Refresh.Stop()
|
satellite.Discovery.Service.Refresh.Stop()
|
||||||
// stop audit to prevent possible interactions i.e. repair timeout problems
|
// stop audit to prevent possible interactions i.e. repair timeout problems
|
||||||
satellite.Audit.Worker.Loop.Pause()
|
satellite.Audit.Worker.Loop.Pause()
|
||||||
@ -188,7 +187,6 @@ func TestCorruptDataRepair_Failed(t *testing.T) {
|
|||||||
uplinkPeer := planet.Uplinks[0]
|
uplinkPeer := planet.Uplinks[0]
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
satellite.Discovery.Service.Refresh.Stop()
|
satellite.Discovery.Service.Refresh.Stop()
|
||||||
// stop audit to prevent possible interactions i.e. repair timeout problems
|
// stop audit to prevent possible interactions i.e. repair timeout problems
|
||||||
satellite.Audit.Worker.Loop.Pause()
|
satellite.Audit.Worker.Loop.Pause()
|
||||||
@ -311,7 +309,6 @@ func TestCorruptDataRepair_Succeed(t *testing.T) {
|
|||||||
uplinkPeer := planet.Uplinks[0]
|
uplinkPeer := planet.Uplinks[0]
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
satellite.Discovery.Service.Refresh.Stop()
|
satellite.Discovery.Service.Refresh.Stop()
|
||||||
// stop audit to prevent possible interactions i.e. repair timeout problems
|
// stop audit to prevent possible interactions i.e. repair timeout problems
|
||||||
satellite.Audit.Worker.Loop.Pause()
|
satellite.Audit.Worker.Loop.Pause()
|
||||||
@ -428,7 +425,6 @@ func TestRemoveIrreparableSegmentFromQueue(t *testing.T) {
|
|||||||
uplinkPeer := planet.Uplinks[0]
|
uplinkPeer := planet.Uplinks[0]
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
satellite.Discovery.Service.Refresh.Stop()
|
satellite.Discovery.Service.Refresh.Stop()
|
||||||
// stop audit to prevent possible interactions i.e. repair timeout problems
|
// stop audit to prevent possible interactions i.e. repair timeout problems
|
||||||
satellite.Audit.Worker.Loop.Stop()
|
satellite.Audit.Worker.Loop.Stop()
|
||||||
@ -513,7 +509,6 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
|||||||
uplinkPeer := planet.Uplinks[0]
|
uplinkPeer := planet.Uplinks[0]
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
satellite.Discovery.Service.Refresh.Stop()
|
satellite.Discovery.Service.Refresh.Stop()
|
||||||
|
|
||||||
satellite.Repair.Checker.Loop.Pause()
|
satellite.Repair.Checker.Loop.Pause()
|
||||||
@ -629,7 +624,6 @@ func TestDataRepairUploadLimit(t *testing.T) {
|
|||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
// stop discovery service so that we do not get a race condition when we delete nodes from overlay
|
||||||
satellite.Discovery.Service.Discovery.Stop()
|
|
||||||
satellite.Discovery.Service.Refresh.Stop()
|
satellite.Discovery.Service.Refresh.Stop()
|
||||||
// stop audit to prevent possible interactions i.e. repair timeout problems
|
// stop audit to prevent possible interactions i.e. repair timeout problems
|
||||||
satellite.Audit.Worker.Loop.Pause()
|
satellite.Audit.Worker.Loop.Pause()
|
||||||
|
@ -50,6 +50,13 @@ PATH=$RELEASE_DIR/bin:$PATH storj-sim -x --host $STORJ_NETWORK_HOST4 network tes
|
|||||||
# this replaces anywhere that has "/release/" in the config file, which currently just renames the static dir paths
|
# this replaces anywhere that has "/release/" in the config file, which currently just renames the static dir paths
|
||||||
sed -i -e 's#/release/#/branch/#g' `storj-sim network env SATELLITE_0_DIR`/config.yaml
|
sed -i -e 's#/release/#/branch/#g' `storj-sim network env SATELLITE_0_DIR`/config.yaml
|
||||||
|
|
||||||
|
# remove kademlia from config
|
||||||
|
sed -i -e 's/kademlia.//g' `storj-sim network env STORAGENODE_5_DIR`/config.yaml
|
||||||
|
sed -i -e 's/kademlia.//g' `storj-sim network env STORAGENODE_6_DIR`/config.yaml
|
||||||
|
sed -i -e 's/kademlia.//g' `storj-sim network env STORAGENODE_7_DIR`/config.yaml
|
||||||
|
sed -i -e 's/kademlia.//g' `storj-sim network env STORAGENODE_8_DIR`/config.yaml
|
||||||
|
sed -i -e 's/kademlia.//g' `storj-sim network env STORAGENODE_9_DIR`/config.yaml
|
||||||
|
|
||||||
## Ensure that partially upgraded network works
|
## Ensure that partially upgraded network works
|
||||||
|
|
||||||
# keep half of the storage nodes on the old version
|
# keep half of the storage nodes on the old version
|
||||||
|
36
scripts/testdata/satellite-config.yaml.lock
vendored
36
scripts/testdata/satellite-config.yaml.lock
vendored
@ -49,6 +49,9 @@
|
|||||||
# stripe api key
|
# stripe api key
|
||||||
# console.stripe-key: ""
|
# console.stripe-key: ""
|
||||||
|
|
||||||
|
# the public address of the node, useful for nodes behind NAT
|
||||||
|
contact.external-address: ""
|
||||||
|
|
||||||
# satellite database connection string
|
# satellite database connection string
|
||||||
# database: postgres://
|
# database: postgres://
|
||||||
|
|
||||||
@ -70,9 +73,6 @@
|
|||||||
# If set, a path to write a process trace SVG to
|
# If set, a path to write a process trace SVG to
|
||||||
# debug.trace-out: ""
|
# debug.trace-out: ""
|
||||||
|
|
||||||
# the interval at which the satellite attempts to find new nodes via random node ID lookups
|
|
||||||
# discovery.discovery-interval: 1s
|
|
||||||
|
|
||||||
# the amount of nodes refreshed in parallel
|
# the amount of nodes refreshed in parallel
|
||||||
# discovery.refresh-concurrency: 8
|
# discovery.refresh-concurrency: 8
|
||||||
|
|
||||||
@ -103,36 +103,6 @@ identity.cert-path: /root/.local/share/storj/identity/satellite/identity.cert
|
|||||||
# path to the private key for this identity
|
# path to the private key for this identity
|
||||||
identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||||
|
|
||||||
# alpha is a system wide concurrency parameter
|
|
||||||
# kademlia.alpha: 5
|
|
||||||
|
|
||||||
# the Kademlia node to bootstrap against
|
|
||||||
# kademlia.bootstrap-addr: bootstrap.storj.io:8888
|
|
||||||
|
|
||||||
# the base interval to wait when retrying bootstrap
|
|
||||||
# kademlia.bootstrap-backoff-base: 1s
|
|
||||||
|
|
||||||
# the maximum amount of time to wait when retrying bootstrap
|
|
||||||
# kademlia.bootstrap-backoff-max: 30s
|
|
||||||
|
|
||||||
# size of each Kademlia bucket
|
|
||||||
# kademlia.bucket-size: 20
|
|
||||||
|
|
||||||
# the path for storage node db services to be created on
|
|
||||||
# kademlia.db-path: testdata/kademlia
|
|
||||||
|
|
||||||
# the public address of the Kademlia node, useful for nodes behind NAT
|
|
||||||
kademlia.external-address: ""
|
|
||||||
|
|
||||||
# operator email address
|
|
||||||
kademlia.operator.email: ""
|
|
||||||
|
|
||||||
# operator wallet address
|
|
||||||
kademlia.operator.wallet: ""
|
|
||||||
|
|
||||||
# size of Kademlia replacement cache
|
|
||||||
# kademlia.replacement-cache-size: 5
|
|
||||||
|
|
||||||
# what to use for storing real-time accounting data
|
# what to use for storing real-time accounting data
|
||||||
# live-accounting.storage-backend: plainmemory
|
# live-accounting.storage-backend: plainmemory
|
||||||
|
|
||||||
|
@ -34,28 +34,29 @@ var (
|
|||||||
//
|
//
|
||||||
// architecture: Service
|
// architecture: Service
|
||||||
type Service struct {
|
type Service struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
trust *trust.Pool
|
trust *trust.Pool
|
||||||
bandwidthDB bandwidth.DB
|
bandwidthDB bandwidth.DB
|
||||||
reputationDB reputation.DB
|
reputationDB reputation.DB
|
||||||
storageUsageDB storageusage.DB
|
storageUsageDB storageusage.DB
|
||||||
pieceStore *pieces.Store
|
pieceStore *pieces.Store
|
||||||
version *version.Service
|
contact *contact.Service
|
||||||
pingStats *contact.PingStats
|
|
||||||
|
version *version.Service
|
||||||
|
pingStats *contact.PingStats
|
||||||
|
|
||||||
allocatedBandwidth memory.Size
|
allocatedBandwidth memory.Size
|
||||||
allocatedDiskSpace memory.Size
|
allocatedDiskSpace memory.Size
|
||||||
nodeID storj.NodeID
|
|
||||||
walletAddress string
|
walletAddress string
|
||||||
startedAt time.Time
|
startedAt time.Time
|
||||||
versionInfo version.Info
|
versionInfo version.Info
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService returns new instance of Service.
|
// NewService returns new instance of Service.
|
||||||
func NewService(log *zap.Logger, bandwidth bandwidth.DB, pieceStore *pieces.Store, version *version.Service,
|
func NewService(log *zap.Logger, bandwidth bandwidth.DB, pieceStore *pieces.Store, version *version.Service,
|
||||||
allocatedBandwidth, allocatedDiskSpace memory.Size, walletAddress string, versionInfo version.Info, trust *trust.Pool,
|
allocatedBandwidth, allocatedDiskSpace memory.Size, walletAddress string, versionInfo version.Info, trust *trust.Pool,
|
||||||
reputationDB reputation.DB, storageUsageDB storageusage.DB, pingStats *contact.PingStats, myNodeID storj.NodeID) (*Service, error) {
|
reputationDB reputation.DB, storageUsageDB storageusage.DB, pingStats *contact.PingStats, contact *contact.Service) (*Service, error) {
|
||||||
if log == nil {
|
if log == nil {
|
||||||
return nil, errs.New("log can't be nil")
|
return nil, errs.New("log can't be nil")
|
||||||
}
|
}
|
||||||
@ -76,6 +77,9 @@ func NewService(log *zap.Logger, bandwidth bandwidth.DB, pieceStore *pieces.Stor
|
|||||||
return nil, errs.New("pingStats can't be nil")
|
return nil, errs.New("pingStats can't be nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if contact == nil {
|
||||||
|
return nil, errs.New("contact service can't be nil")
|
||||||
|
}
|
||||||
return &Service{
|
return &Service{
|
||||||
log: log,
|
log: log,
|
||||||
trust: trust,
|
trust: trust,
|
||||||
@ -87,7 +91,7 @@ func NewService(log *zap.Logger, bandwidth bandwidth.DB, pieceStore *pieces.Stor
|
|||||||
pingStats: pingStats,
|
pingStats: pingStats,
|
||||||
allocatedBandwidth: allocatedBandwidth,
|
allocatedBandwidth: allocatedBandwidth,
|
||||||
allocatedDiskSpace: allocatedDiskSpace,
|
allocatedDiskSpace: allocatedDiskSpace,
|
||||||
nodeID: myNodeID,
|
contact: contact,
|
||||||
walletAddress: walletAddress,
|
walletAddress: walletAddress,
|
||||||
startedAt: time.Now(),
|
startedAt: time.Now(),
|
||||||
versionInfo: versionInfo,
|
versionInfo: versionInfo,
|
||||||
@ -123,7 +127,7 @@ func (s *Service) GetDashboardData(ctx context.Context) (_ *Dashboard, err error
|
|||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
data := new(Dashboard)
|
data := new(Dashboard)
|
||||||
|
|
||||||
data.NodeID = s.nodeID
|
data.NodeID = s.contact.Local().Id
|
||||||
data.Wallet = s.walletAddress
|
data.Wallet = s.walletAddress
|
||||||
data.Version = s.versionInfo.Version
|
data.Version = s.versionInfo.Version
|
||||||
data.UpToDate = s.version.IsAllowed()
|
data.UpToDate = s.version.IsAllowed()
|
||||||
|
@ -11,30 +11,19 @@ import (
|
|||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"gopkg.in/spacemonkeygo/monkit.v2"
|
|
||||||
|
|
||||||
"storj.io/storj/internal/sync2"
|
"storj.io/storj/internal/sync2"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/transport"
|
"storj.io/storj/pkg/transport"
|
||||||
"storj.io/storj/storagenode/trust"
|
"storj.io/storj/storagenode/trust"
|
||||||
)
|
)
|
||||||
|
|
||||||
var mon = monkit.Package()
|
|
||||||
|
|
||||||
// Config contains configurable parameters for contact chore
|
|
||||||
type Config struct {
|
|
||||||
Interval time.Duration `help:"how frequently the node contact chore should run" releaseDefault:"1h" devDefault:"30s"`
|
|
||||||
// MaxSleep should remain at default value to decrease traffic congestion to satellite
|
|
||||||
MaxSleep time.Duration `help:"maximum duration to wait before pinging satellites" releaseDefault:"45m" devDefault:"0s" hidden:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chore is the contact chore for nodes announcing themselves to their trusted satellites
|
// Chore is the contact chore for nodes announcing themselves to their trusted satellites
|
||||||
//
|
//
|
||||||
// architecture: Chore
|
// architecture: Chore
|
||||||
type Chore struct {
|
type Chore struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
rt *kademlia.RoutingTable
|
service *Service
|
||||||
transport transport.Client
|
transport transport.Client
|
||||||
|
|
||||||
trust *trust.Pool
|
trust *trust.Pool
|
||||||
@ -44,10 +33,10 @@ type Chore struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewChore creates a new contact chore
|
// NewChore creates a new contact chore
|
||||||
func NewChore(log *zap.Logger, interval time.Duration, maxSleep time.Duration, trust *trust.Pool, transport transport.Client, rt *kademlia.RoutingTable) *Chore {
|
func NewChore(log *zap.Logger, interval time.Duration, maxSleep time.Duration, trust *trust.Pool, transport transport.Client, service *Service) *Chore {
|
||||||
return &Chore{
|
return &Chore{
|
||||||
log: log,
|
log: log,
|
||||||
rt: rt,
|
service: service,
|
||||||
transport: transport,
|
transport: transport,
|
||||||
|
|
||||||
trust: trust,
|
trust: trust,
|
||||||
@ -75,9 +64,8 @@ func (chore *Chore) Run(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
func (chore *Chore) pingSatellites(ctx context.Context) (err error) {
|
func (chore *Chore) pingSatellites(ctx context.Context) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
var group errgroup.Group
|
var group errgroup.Group
|
||||||
self := chore.rt.Local()
|
self := chore.service.Local()
|
||||||
satellites := chore.trust.GetSatellites(ctx)
|
satellites := chore.trust.GetSatellites(ctx)
|
||||||
for _, satellite := range satellites {
|
for _, satellite := range satellites {
|
||||||
satellite := satellite
|
satellite := satellite
|
||||||
|
@ -40,7 +40,7 @@ func TestStoragenodeContactEndpoint(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestContactChore(t *testing.T) {
|
func TestNodeInfoUpdated(t *testing.T) {
|
||||||
testplanet.Run(t, testplanet.Config{
|
testplanet.Run(t, testplanet.Config{
|
||||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
@ -60,7 +60,7 @@ func TestContactChore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
require.NotEqual(t, oldCapacity, newCapacity)
|
require.NotEqual(t, oldCapacity, newCapacity)
|
||||||
|
|
||||||
node.Kademlia.RoutingTable.UpdateSelf(&newCapacity)
|
node.Contact.Service.UpdateSelf(&newCapacity)
|
||||||
|
|
||||||
node.Contact.Chore.Loop.TriggerWait()
|
node.Contact.Chore.Loop.TriggerWait()
|
||||||
|
|
||||||
@ -74,3 +74,24 @@ func TestContactChore(t *testing.T) {
|
|||||||
require.Equal(t, newCapacity, newInfo.Capacity)
|
require.Equal(t, newCapacity, newInfo.Capacity)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRequestInfoEndpoint(t *testing.T) {
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
nodeDossier := planet.StorageNodes[0].Local()
|
||||||
|
|
||||||
|
// Satellite Trusted
|
||||||
|
conn, err := planet.Satellites[0].Transport.DialNode(ctx, &nodeDossier.Node)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(conn.Close)
|
||||||
|
|
||||||
|
resp, err := pb.NewNodesClient(conn).RequestInfo(ctx, &pb.InfoRequest{})
|
||||||
|
require.NotNil(t, resp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, nodeDossier.Type, resp.Type)
|
||||||
|
require.Equal(t, &nodeDossier.Operator, resp.Operator)
|
||||||
|
require.Equal(t, &nodeDossier.Capacity, resp.Capacity)
|
||||||
|
require.Equal(t, nodeDossier.Version.Version, resp.Version.Version)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
76
storagenode/contact/kademlia.go
Normal file
76
storagenode/contact/kademlia.go
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// Copyright (C) 2019 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package contact
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"storj.io/storj/pkg/identity"
|
||||||
|
"storj.io/storj/pkg/pb"
|
||||||
|
"storj.io/storj/pkg/storj"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SatelliteIDVerifier checks if the connection is from a trusted satellite
|
||||||
|
type SatelliteIDVerifier interface {
|
||||||
|
VerifySatelliteID(ctx context.Context, id storj.NodeID) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// KademliaEndpoint implements the NodesServer Interface for backwards compatibility
|
||||||
|
type KademliaEndpoint struct {
|
||||||
|
log *zap.Logger
|
||||||
|
service *Service
|
||||||
|
trust SatelliteIDVerifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKademliaEndpoint returns a new endpoint
|
||||||
|
func NewKademliaEndpoint(log *zap.Logger, service *Service, trust SatelliteIDVerifier) *KademliaEndpoint {
|
||||||
|
return &KademliaEndpoint{
|
||||||
|
log: log,
|
||||||
|
service: service,
|
||||||
|
trust: trust,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query is a node to node communication query
|
||||||
|
func (endpoint *KademliaEndpoint) Query(ctx context.Context, req *pb.QueryRequest) (_ *pb.QueryResponse, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
return &pb.QueryResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping provides an easy way to verify a node is online and accepting requests
|
||||||
|
func (endpoint *KademliaEndpoint) Ping(ctx context.Context, req *pb.PingRequest) (_ *pb.PingResponse, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
return &pb.PingResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestInfo returns the node info
|
||||||
|
func (endpoint *KademliaEndpoint) RequestInfo(ctx context.Context, req *pb.InfoRequest) (_ *pb.InfoResponse, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
self := endpoint.service.Local()
|
||||||
|
|
||||||
|
if endpoint.trust == nil {
|
||||||
|
return nil, status.Error(codes.Internal, "missing trust")
|
||||||
|
}
|
||||||
|
|
||||||
|
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Error(codes.Unauthenticated, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = endpoint.trust.VerifySatelliteID(ctx, peer.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.PermissionDenied, "untrusted peer %v", peer.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &pb.InfoResponse{
|
||||||
|
Type: self.Type,
|
||||||
|
Operator: &self.Operator,
|
||||||
|
Capacity: &self.Capacity,
|
||||||
|
Version: &self.Version,
|
||||||
|
}, nil
|
||||||
|
}
|
64
storagenode/contact/service.go
Normal file
64
storagenode/contact/service.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright (C) 2019 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package contact
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||||
|
|
||||||
|
"storj.io/storj/pkg/pb"
|
||||||
|
"storj.io/storj/satellite/overlay"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error is the default error class for contact package
|
||||||
|
var Error = errs.Class("contact")
|
||||||
|
|
||||||
|
var mon = monkit.Package()
|
||||||
|
|
||||||
|
// Config contains configurable values for contact service
|
||||||
|
type Config struct {
|
||||||
|
ExternalAddress string `user:"true" help:"the public address of the node, useful for nodes behind NAT" default:""`
|
||||||
|
|
||||||
|
// Chore config values
|
||||||
|
Interval time.Duration `help:"how frequently the node contact chore should run" releaseDefault:"1h" devDefault:"30s"`
|
||||||
|
// MaxSleep should remain at default value to decrease traffic congestion to satellite
|
||||||
|
MaxSleep time.Duration `help:"maximum duration to wait before pinging satellites" releaseDefault:"45m" devDefault:"0s" hidden:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service is the contact service between storage nodes and satellites
|
||||||
|
type Service struct {
|
||||||
|
log *zap.Logger
|
||||||
|
|
||||||
|
mutex *sync.Mutex
|
||||||
|
self *overlay.NodeDossier
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewService creates a new contact service
|
||||||
|
func NewService(log *zap.Logger, self *overlay.NodeDossier) *Service {
|
||||||
|
return &Service{
|
||||||
|
log: log,
|
||||||
|
mutex: &sync.Mutex{},
|
||||||
|
self: self,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local returns the storagenode node-dossier
|
||||||
|
func (service *Service) Local() overlay.NodeDossier {
|
||||||
|
service.mutex.Lock()
|
||||||
|
defer service.mutex.Unlock()
|
||||||
|
return *service.self
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSelf updates the local node with the capacity
|
||||||
|
func (service *Service) UpdateSelf(capacity *pb.NodeCapacity) {
|
||||||
|
service.mutex.Lock()
|
||||||
|
defer service.mutex.Unlock()
|
||||||
|
if capacity != nil {
|
||||||
|
service.self.Capacity = *capacity
|
||||||
|
}
|
||||||
|
}
|
@ -6,17 +6,14 @@ package inspector
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||||
|
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/storj"
|
|
||||||
"storj.io/storj/storagenode/bandwidth"
|
"storj.io/storj/storagenode/bandwidth"
|
||||||
"storj.io/storj/storagenode/contact"
|
"storj.io/storj/storagenode/contact"
|
||||||
"storj.io/storj/storagenode/pieces"
|
"storj.io/storj/storagenode/pieces"
|
||||||
@ -36,34 +33,37 @@ var (
|
|||||||
type Endpoint struct {
|
type Endpoint struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
pieceStore *pieces.Store
|
pieceStore *pieces.Store
|
||||||
kademlia *kademlia.Kademlia
|
contact *contact.Service
|
||||||
pingStats *contact.PingStats
|
pingStats *contact.PingStats
|
||||||
usageDB bandwidth.DB
|
usageDB bandwidth.DB
|
||||||
|
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
pieceStoreConfig piecestore.OldConfig
|
pieceStoreConfig piecestore.OldConfig
|
||||||
dashboardAddress net.Addr
|
dashboardAddress net.Addr
|
||||||
|
externalAddress string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpoint creates piecestore inspector instance
|
// NewEndpoint creates piecestore inspector instance
|
||||||
func NewEndpoint(
|
func NewEndpoint(
|
||||||
log *zap.Logger,
|
log *zap.Logger,
|
||||||
pieceStore *pieces.Store,
|
pieceStore *pieces.Store,
|
||||||
kademlia *kademlia.Kademlia,
|
contact *contact.Service,
|
||||||
pingStats *contact.PingStats,
|
pingStats *contact.PingStats,
|
||||||
usageDB bandwidth.DB,
|
usageDB bandwidth.DB,
|
||||||
pieceStoreConfig piecestore.OldConfig,
|
pieceStoreConfig piecestore.OldConfig,
|
||||||
dashbaordAddress net.Addr) *Endpoint {
|
dashbaordAddress net.Addr,
|
||||||
|
externalAddress string) *Endpoint {
|
||||||
|
|
||||||
return &Endpoint{
|
return &Endpoint{
|
||||||
log: log,
|
log: log,
|
||||||
pieceStore: pieceStore,
|
pieceStore: pieceStore,
|
||||||
kademlia: kademlia,
|
contact: contact,
|
||||||
pingStats: pingStats,
|
pingStats: pingStats,
|
||||||
usageDB: usageDB,
|
usageDB: usageDB,
|
||||||
pieceStoreConfig: pieceStoreConfig,
|
pieceStoreConfig: pieceStoreConfig,
|
||||||
dashboardAddress: dashbaordAddress,
|
dashboardAddress: dashbaordAddress,
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
|
externalAddress: externalAddress,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,26 +114,12 @@ func (inspector *Endpoint) getDashboardData(ctx context.Context) (_ *pb.Dashboar
|
|||||||
return &pb.DashboardResponse{}, Error.Wrap(err)
|
return &pb.DashboardResponse{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: querying all nodes is slow, find a more performant way to do this.
|
|
||||||
nodes, err := inspector.kademlia.FindNear(ctx, storj.NodeID{}, 10000000)
|
|
||||||
if err != nil {
|
|
||||||
return &pb.DashboardResponse{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bootstrapNodes := inspector.kademlia.GetBootstrapNodes()
|
|
||||||
bsNodes := make([]string, len(bootstrapNodes))
|
|
||||||
for i, node := range bootstrapNodes {
|
|
||||||
bsNodes[i] = node.Address.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
lastPingedAt, lastPingFromID, lastPingFromAddress := inspector.pingStats.WhenLastPinged()
|
lastPingedAt, lastPingFromID, lastPingFromAddress := inspector.pingStats.WhenLastPinged()
|
||||||
|
|
||||||
return &pb.DashboardResponse{
|
return &pb.DashboardResponse{
|
||||||
NodeId: inspector.kademlia.Local().Id,
|
NodeId: inspector.contact.Local().Id,
|
||||||
NodeConnections: int64(len(nodes)),
|
|
||||||
BootstrapAddress: strings.Join(bsNodes, ", "),
|
|
||||||
InternalAddress: "",
|
InternalAddress: "",
|
||||||
ExternalAddress: inspector.kademlia.Local().Address.Address,
|
ExternalAddress: inspector.contact.Local().Address.Address,
|
||||||
LastPinged: lastPingedAt,
|
LastPinged: lastPingedAt,
|
||||||
LastPingFromId: &lastPingFromID,
|
LastPingFromId: &lastPingFromID,
|
||||||
LastPingFromAddress: lastPingFromAddress,
|
LastPingFromAddress: lastPingFromAddress,
|
||||||
|
@ -135,7 +135,6 @@ func TestInspectorDashboard(t *testing.T) {
|
|||||||
assert.True(t, response.Uptime.Nanos > 0)
|
assert.True(t, response.Uptime.Nanos > 0)
|
||||||
assert.Equal(t, storageNode.ID(), response.NodeId)
|
assert.Equal(t, storageNode.ID(), response.NodeId)
|
||||||
assert.Equal(t, storageNode.Addr(), response.ExternalAddress)
|
assert.Equal(t, storageNode.Addr(), response.ExternalAddress)
|
||||||
assert.Equal(t, int64(len(planet.StorageNodes)+len(planet.Satellites)), response.NodeConnections)
|
|
||||||
assert.NotNil(t, response.Stats)
|
assert.NotNil(t, response.Stats)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -13,9 +13,9 @@ import (
|
|||||||
|
|
||||||
"storj.io/storj/internal/memory"
|
"storj.io/storj/internal/memory"
|
||||||
"storj.io/storj/internal/sync2"
|
"storj.io/storj/internal/sync2"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/storagenode/bandwidth"
|
"storj.io/storj/storagenode/bandwidth"
|
||||||
|
"storj.io/storj/storagenode/contact"
|
||||||
"storj.io/storj/storagenode/pieces"
|
"storj.io/storj/storagenode/pieces"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,8 +38,8 @@ type Config struct {
|
|||||||
// architecture: Service
|
// architecture: Service
|
||||||
type Service struct {
|
type Service struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
routingTable *kademlia.RoutingTable
|
|
||||||
store *pieces.Store
|
store *pieces.Store
|
||||||
|
contact *contact.Service
|
||||||
usageDB bandwidth.DB
|
usageDB bandwidth.DB
|
||||||
allocatedDiskSpace int64
|
allocatedDiskSpace int64
|
||||||
allocatedBandwidth int64
|
allocatedBandwidth int64
|
||||||
@ -50,11 +50,11 @@ type Service struct {
|
|||||||
// TODO: should it be responsible for monitoring actual bandwidth as well?
|
// TODO: should it be responsible for monitoring actual bandwidth as well?
|
||||||
|
|
||||||
// NewService creates a new storage node monitoring service.
|
// NewService creates a new storage node monitoring service.
|
||||||
func NewService(log *zap.Logger, routingTable *kademlia.RoutingTable, store *pieces.Store, usageDB bandwidth.DB, allocatedDiskSpace, allocatedBandwidth int64, interval time.Duration, config Config) *Service {
|
func NewService(log *zap.Logger, store *pieces.Store, contact *contact.Service, usageDB bandwidth.DB, allocatedDiskSpace, allocatedBandwidth int64, interval time.Duration, config Config) *Service {
|
||||||
return &Service{
|
return &Service{
|
||||||
log: log,
|
log: log,
|
||||||
routingTable: routingTable,
|
|
||||||
store: store,
|
store: store,
|
||||||
|
contact: contact,
|
||||||
usageDB: usageDB,
|
usageDB: usageDB,
|
||||||
allocatedDiskSpace: allocatedDiskSpace,
|
allocatedDiskSpace: allocatedDiskSpace,
|
||||||
allocatedBandwidth: allocatedBandwidth,
|
allocatedBandwidth: allocatedBandwidth,
|
||||||
@ -152,7 +152,7 @@ func (service *Service) updateNodeInformation(ctx context.Context) (err error) {
|
|||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
service.routingTable.UpdateSelf(&pb.NodeCapacity{
|
service.contact.UpdateSelf(&pb.NodeCapacity{
|
||||||
FreeBandwidth: service.allocatedBandwidth - usedBandwidth,
|
FreeBandwidth: service.allocatedBandwidth - usedBandwidth,
|
||||||
FreeDisk: service.allocatedDiskSpace - usedSpace,
|
FreeDisk: service.allocatedDiskSpace - usedSpace,
|
||||||
})
|
})
|
||||||
|
@ -26,7 +26,7 @@ func TestMonitor(t *testing.T) {
|
|||||||
for _, storageNode := range planet.StorageNodes {
|
for _, storageNode := range planet.StorageNodes {
|
||||||
storageNode.Storage2.Monitor.Loop.Pause()
|
storageNode.Storage2.Monitor.Loop.Pause()
|
||||||
|
|
||||||
info, err := satellite.Kademlia.Service.FetchInfo(ctx, storageNode.Local().Node)
|
info, err := satellite.Contact.Service.FetchInfo(ctx, storageNode.Local().Node)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// assume that all storage nodes have the same initial values
|
// assume that all storage nodes have the same initial values
|
||||||
@ -42,7 +42,7 @@ func TestMonitor(t *testing.T) {
|
|||||||
for _, storageNode := range planet.StorageNodes {
|
for _, storageNode := range planet.StorageNodes {
|
||||||
storageNode.Storage2.Monitor.Loop.TriggerWait()
|
storageNode.Storage2.Monitor.Loop.TriggerWait()
|
||||||
|
|
||||||
info, err := satellite.Kademlia.Service.FetchInfo(ctx, storageNode.Local().Node)
|
info, err := satellite.Contact.Service.FetchInfo(ctx, storageNode.Local().Node)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
stats, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
stats, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
||||||
|
50
storagenode/operator.go
Normal file
50
storagenode/operator.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
// Copyright (C) 2019 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package storagenode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OperatorConfig defines properties related to storage node operator metadata
|
||||||
|
type OperatorConfig struct {
|
||||||
|
Email string `user:"true" help:"operator email address" default:""`
|
||||||
|
Wallet string `user:"true" help:"operator wallet address" default:""`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify verifies whether operator config is valid.
|
||||||
|
func (c OperatorConfig) Verify(log *zap.Logger) error {
|
||||||
|
if err := isOperatorEmailValid(log, c.Email); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := isOperatorWalletValid(log, c.Wallet); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isOperatorEmailValid(log *zap.Logger, email string) error {
|
||||||
|
if email == "" {
|
||||||
|
log.Sugar().Warn("Operator email address isn't specified.")
|
||||||
|
} else {
|
||||||
|
log.Sugar().Info("Operator email: ", email)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isOperatorWalletValid(log *zap.Logger, wallet string) error {
|
||||||
|
if wallet == "" {
|
||||||
|
return fmt.Errorf("operator wallet address isn't specified")
|
||||||
|
}
|
||||||
|
r := regexp.MustCompile("^0x[a-fA-F0-9]{40}$")
|
||||||
|
if match := r.MatchString(wallet); !match {
|
||||||
|
return fmt.Errorf("operator wallet address isn't valid")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Sugar().Info("operator wallet: ", wallet)
|
||||||
|
return nil
|
||||||
|
}
|
@ -10,12 +10,11 @@ import (
|
|||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||||
|
|
||||||
"storj.io/storj/internal/errs2"
|
"storj.io/storj/internal/errs2"
|
||||||
"storj.io/storj/internal/version"
|
"storj.io/storj/internal/version"
|
||||||
"storj.io/storj/pkg/identity"
|
"storj.io/storj/pkg/identity"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/peertls/extensions"
|
"storj.io/storj/pkg/peertls/extensions"
|
||||||
"storj.io/storj/pkg/peertls/tlsopts"
|
"storj.io/storj/pkg/peertls/tlsopts"
|
||||||
@ -65,17 +64,16 @@ type DB interface {
|
|||||||
UsedSerials() piecestore.UsedSerials
|
UsedSerials() piecestore.UsedSerials
|
||||||
Reputation() reputation.DB
|
Reputation() reputation.DB
|
||||||
StorageUsage() storageusage.DB
|
StorageUsage() storageusage.DB
|
||||||
|
|
||||||
// TODO: use better interfaces
|
|
||||||
RoutingTable() (kdb, ndb, adb storage.KeyValueStore)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config is all the configuration parameters for a Storage Node
|
// Config is all the configuration parameters for a Storage Node
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Identity identity.Config
|
Identity identity.Config
|
||||||
|
|
||||||
Server server.Config
|
Server server.Config
|
||||||
Kademlia kademlia.Config
|
|
||||||
|
Contact contact.Config
|
||||||
|
Operator OperatorConfig
|
||||||
|
|
||||||
// TODO: flatten storage config and only keep the new one
|
// TODO: flatten storage config and only keep the new one
|
||||||
Storage piecestore.OldConfig
|
Storage piecestore.OldConfig
|
||||||
@ -91,13 +89,11 @@ type Config struct {
|
|||||||
Version version.Config
|
Version version.Config
|
||||||
|
|
||||||
Bandwidth bandwidth.Config
|
Bandwidth bandwidth.Config
|
||||||
|
|
||||||
Contact contact.Config
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify verifies whether configuration is consistent and acceptable.
|
// Verify verifies whether configuration is consistent and acceptable.
|
||||||
func (config *Config) Verify(log *zap.Logger) error {
|
func (config *Config) Verify(log *zap.Logger) error {
|
||||||
return config.Kademlia.Verify(log)
|
return config.Operator.Verify(log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer is the representation of a Storage Node.
|
// Peer is the representation of a Storage Node.
|
||||||
@ -117,16 +113,12 @@ type Peer struct {
|
|||||||
|
|
||||||
// services and endpoints
|
// services and endpoints
|
||||||
// TODO: similar grouping to satellite.Peer
|
// TODO: similar grouping to satellite.Peer
|
||||||
Kademlia struct {
|
|
||||||
RoutingTable *kademlia.RoutingTable
|
|
||||||
Service *kademlia.Kademlia
|
|
||||||
Endpoint *kademlia.Endpoint
|
|
||||||
Inspector *kademlia.Inspector
|
|
||||||
}
|
|
||||||
|
|
||||||
Contact struct {
|
Contact struct {
|
||||||
Endpoint *contact.Endpoint
|
Service *contact.Service
|
||||||
Chore *contact.Chore
|
Chore *contact.Chore
|
||||||
|
Endpoint *contact.Endpoint
|
||||||
|
KEndpoint *contact.KademliaEndpoint
|
||||||
PingStats *contact.PingStats
|
PingStats *contact.PingStats
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,36 +187,29 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // setup trust pool before kademlia
|
{ // setup trust pool
|
||||||
peer.Storage2.Trust, err = trust.NewPool(peer.Transport, config.Storage.WhitelistedSatellites)
|
peer.Storage2.Trust, err = trust.NewPool(peer.Transport, config.Storage.WhitelistedSatellites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errs.Combine(err, peer.Close())
|
return nil, errs.Combine(err, peer.Close())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up Contact.PingStats before Kademlia (until Kademlia goes away, at which point this can
|
{ // setup contact service
|
||||||
// be folded back in with the Contact setup block). Both services must share pointers to this
|
c := config.Contact
|
||||||
// PingStats instance for now.
|
if c.ExternalAddress == "" {
|
||||||
peer.Contact.PingStats = &contact.PingStats{}
|
c.ExternalAddress = peer.Addr()
|
||||||
|
|
||||||
{ // setup kademlia
|
|
||||||
config := config.Kademlia
|
|
||||||
// TODO: move this setup logic into kademlia package
|
|
||||||
if config.ExternalAddress == "" {
|
|
||||||
config.ExternalAddress = peer.Addr()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pbVersion, err := versionInfo.Proto()
|
pbVersion, err := versionInfo.Proto()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errs.Combine(err, peer.Close())
|
return nil, errs.Combine(err, peer.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
self := &overlay.NodeDossier{
|
self := &overlay.NodeDossier{
|
||||||
Node: pb.Node{
|
Node: pb.Node{
|
||||||
Id: peer.ID(),
|
Id: peer.ID(),
|
||||||
Address: &pb.NodeAddress{
|
Address: &pb.NodeAddress{
|
||||||
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
||||||
Address: config.ExternalAddress,
|
Address: c.ExternalAddress,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Type: pb.NodeType_STORAGE,
|
Type: pb.NodeType_STORAGE,
|
||||||
@ -234,34 +219,15 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
},
|
},
|
||||||
Version: *pbVersion,
|
Version: *pbVersion,
|
||||||
}
|
}
|
||||||
|
peer.Contact.PingStats = new(contact.PingStats)
|
||||||
kdb, ndb, adb := peer.DB.RoutingTable()
|
peer.Contact.Service = contact.NewService(peer.Log.Named("contact:service"), self)
|
||||||
peer.Kademlia.RoutingTable, err = kademlia.NewRoutingTable(peer.Log.Named("routing"), self, kdb, ndb, adb, &config.RoutingTableConfig)
|
peer.Contact.Chore = contact.NewChore(peer.Log.Named("contact:chore"), config.Contact.Interval, config.Contact.MaxSleep, peer.Storage2.Trust, peer.Transport, peer.Contact.Service)
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Combine(err, peer.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.Transport = peer.Transport.WithObservers(peer.Kademlia.RoutingTable)
|
|
||||||
|
|
||||||
peer.Kademlia.Service, err = kademlia.NewService(peer.Log.Named("kademlia"), peer.Transport, peer.Kademlia.RoutingTable, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Combine(err, peer.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.Kademlia.Endpoint = kademlia.NewEndpoint(peer.Log.Named("kademlia:endpoint"), peer.Kademlia.Service, peer.Contact.PingStats, peer.Kademlia.RoutingTable, peer.Storage2.Trust)
|
|
||||||
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Kademlia.Endpoint)
|
|
||||||
pb.DRPCRegisterNodes(peer.Server.DRPC(), peer.Kademlia.Endpoint)
|
|
||||||
|
|
||||||
peer.Kademlia.Inspector = kademlia.NewInspector(peer.Kademlia.Service, peer.Identity)
|
|
||||||
pb.RegisterKadInspectorServer(peer.Server.PrivateGRPC(), peer.Kademlia.Inspector)
|
|
||||||
pb.DRPCRegisterKadInspector(peer.Server.PrivateDRPC(), peer.Kademlia.Inspector)
|
|
||||||
}
|
|
||||||
|
|
||||||
{ // setup contact service
|
|
||||||
peer.Contact.Chore = contact.NewChore(peer.Log.Named("contact:chore"), config.Contact.Interval, config.Contact.MaxSleep, peer.Storage2.Trust, peer.Transport, peer.Kademlia.RoutingTable)
|
|
||||||
peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Contact.PingStats)
|
peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Contact.PingStats)
|
||||||
|
peer.Contact.KEndpoint = contact.NewKademliaEndpoint(peer.Log.Named("contact:nodes_service_endpoint"), peer.Contact.Service, peer.Storage2.Trust)
|
||||||
pb.RegisterContactServer(peer.Server.GRPC(), peer.Contact.Endpoint)
|
pb.RegisterContactServer(peer.Server.GRPC(), peer.Contact.Endpoint)
|
||||||
|
pb.RegisterNodesServer(peer.Server.GRPC(), peer.Contact.KEndpoint)
|
||||||
pb.DRPCRegisterContact(peer.Server.DRPC(), peer.Contact.Endpoint)
|
pb.DRPCRegisterContact(peer.Server.DRPC(), peer.Contact.Endpoint)
|
||||||
|
pb.DRPCRegisterNodes(peer.Server.DRPC(), peer.Contact.KEndpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // setup storage
|
{ // setup storage
|
||||||
@ -283,8 +249,8 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
|
|
||||||
peer.Storage2.Monitor = monitor.NewService(
|
peer.Storage2.Monitor = monitor.NewService(
|
||||||
log.Named("piecestore:monitor"),
|
log.Named("piecestore:monitor"),
|
||||||
peer.Kademlia.RoutingTable,
|
|
||||||
peer.Storage2.Store,
|
peer.Storage2.Store,
|
||||||
|
peer.Contact.Service,
|
||||||
peer.DB.Bandwidth(),
|
peer.DB.Bandwidth(),
|
||||||
config.Storage.AllocatedDiskSpace.Int64(),
|
config.Storage.AllocatedDiskSpace.Int64(),
|
||||||
config.Storage.AllocatedBandwidth.Int64(),
|
config.Storage.AllocatedBandwidth.Int64(),
|
||||||
@ -363,13 +329,13 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
peer.Version,
|
peer.Version,
|
||||||
config.Storage.AllocatedBandwidth,
|
config.Storage.AllocatedBandwidth,
|
||||||
config.Storage.AllocatedDiskSpace,
|
config.Storage.AllocatedDiskSpace,
|
||||||
config.Kademlia.Operator.Wallet,
|
config.Operator.Wallet,
|
||||||
versionInfo,
|
versionInfo,
|
||||||
peer.Storage2.Trust,
|
peer.Storage2.Trust,
|
||||||
peer.DB.Reputation(),
|
peer.DB.Reputation(),
|
||||||
peer.DB.StorageUsage(),
|
peer.DB.StorageUsage(),
|
||||||
peer.Contact.PingStats,
|
peer.Contact.PingStats,
|
||||||
peer.Local().Id)
|
peer.Contact.Service)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errs.Combine(err, peer.Close())
|
return nil, errs.Combine(err, peer.Close())
|
||||||
@ -392,11 +358,12 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
|||||||
peer.Storage2.Inspector = inspector.NewEndpoint(
|
peer.Storage2.Inspector = inspector.NewEndpoint(
|
||||||
peer.Log.Named("pieces:inspector"),
|
peer.Log.Named("pieces:inspector"),
|
||||||
peer.Storage2.Store,
|
peer.Storage2.Store,
|
||||||
peer.Kademlia.Service,
|
peer.Contact.Service,
|
||||||
peer.Contact.PingStats,
|
peer.Contact.PingStats,
|
||||||
peer.DB.Bandwidth(),
|
peer.DB.Bandwidth(),
|
||||||
config.Storage,
|
config.Storage,
|
||||||
peer.Console.Listener.Addr(),
|
peer.Console.Listener.Addr(),
|
||||||
|
config.Contact.ExternalAddress,
|
||||||
)
|
)
|
||||||
pb.RegisterPieceStoreInspectorServer(peer.Server.PrivateGRPC(), peer.Storage2.Inspector)
|
pb.RegisterPieceStoreInspectorServer(peer.Server.PrivateGRPC(), peer.Storage2.Inspector)
|
||||||
pb.DRPCRegisterPieceStoreInspector(peer.Server.PrivateDRPC(), peer.Storage2.Inspector)
|
pb.DRPCRegisterPieceStoreInspector(peer.Server.PrivateDRPC(), peer.Storage2.Inspector)
|
||||||
@ -418,14 +385,9 @@ func (peer *Peer) Run(ctx context.Context) (err error) {
|
|||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(peer.Version.Run(ctx))
|
return errs2.IgnoreCanceled(peer.Version.Run(ctx))
|
||||||
})
|
})
|
||||||
|
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(peer.Kademlia.Service.Bootstrap(ctx))
|
return errs2.IgnoreCanceled(peer.Contact.Chore.Run(ctx))
|
||||||
})
|
})
|
||||||
group.Go(func() error {
|
|
||||||
return errs2.IgnoreCanceled(peer.Kademlia.Service.Run(ctx))
|
|
||||||
})
|
|
||||||
|
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(peer.Collector.Run(ctx))
|
return errs2.IgnoreCanceled(peer.Collector.Run(ctx))
|
||||||
})
|
})
|
||||||
@ -462,10 +424,6 @@ func (peer *Peer) Run(ctx context.Context) (err error) {
|
|||||||
return errs2.IgnoreCanceled(peer.Console.Endpoint.Run(ctx))
|
return errs2.IgnoreCanceled(peer.Console.Endpoint.Run(ctx))
|
||||||
})
|
})
|
||||||
|
|
||||||
group.Go(func() error {
|
|
||||||
return errs2.IgnoreCanceled(peer.Contact.Chore.Run(ctx))
|
|
||||||
})
|
|
||||||
|
|
||||||
return group.Wait()
|
return group.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,7 +439,6 @@ func (peer *Peer) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// close services in reverse initialization order
|
// close services in reverse initialization order
|
||||||
|
|
||||||
if peer.Contact.Chore != nil {
|
if peer.Contact.Chore != nil {
|
||||||
errlist.Add(peer.Contact.Chore.Close())
|
errlist.Add(peer.Contact.Chore.Close())
|
||||||
}
|
}
|
||||||
@ -504,12 +461,6 @@ func (peer *Peer) Close() error {
|
|||||||
errlist.Add(peer.Collector.Close())
|
errlist.Add(peer.Collector.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
if peer.Kademlia.Service != nil {
|
|
||||||
errlist.Add(peer.Kademlia.Service.Close())
|
|
||||||
}
|
|
||||||
if peer.Kademlia.RoutingTable != nil {
|
|
||||||
errlist.Add(peer.Kademlia.RoutingTable.Close())
|
|
||||||
}
|
|
||||||
if peer.Console.Endpoint != nil {
|
if peer.Console.Endpoint != nil {
|
||||||
errlist.Add(peer.Console.Endpoint.Close())
|
errlist.Add(peer.Console.Endpoint.Close())
|
||||||
} else if peer.Console.Listener != nil {
|
} else if peer.Console.Listener != nil {
|
||||||
@ -527,7 +478,7 @@ func (peer *Peer) Close() error {
|
|||||||
func (peer *Peer) ID() storj.NodeID { return peer.Identity.ID }
|
func (peer *Peer) ID() storj.NodeID { return peer.Identity.ID }
|
||||||
|
|
||||||
// Local returns the peer local node info.
|
// Local returns the peer local node info.
|
||||||
func (peer *Peer) Local() overlay.NodeDossier { return peer.Kademlia.RoutingTable.Local() }
|
func (peer *Peer) Local() overlay.NodeDossier { return peer.Contact.Service.Local() }
|
||||||
|
|
||||||
// Addr returns the public address.
|
// Addr returns the public address.
|
||||||
func (peer *Peer) Addr() string { return peer.Server.Addr().String() }
|
func (peer *Peer) Addr() string { return peer.Server.Addr().String() }
|
||||||
|
@ -15,13 +15,11 @@ import (
|
|||||||
_ "github.com/mattn/go-sqlite3" // used indirectly.
|
_ "github.com/mattn/go-sqlite3" // used indirectly.
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||||
|
|
||||||
"storj.io/storj/internal/dbutil"
|
"storj.io/storj/internal/dbutil"
|
||||||
"storj.io/storj/internal/migrate"
|
"storj.io/storj/internal/migrate"
|
||||||
"storj.io/storj/pkg/kademlia"
|
|
||||||
"storj.io/storj/storage"
|
"storj.io/storj/storage"
|
||||||
"storj.io/storj/storage/boltdb"
|
|
||||||
"storj.io/storj/storage/filestore"
|
"storj.io/storj/storage/filestore"
|
||||||
"storj.io/storj/storagenode"
|
"storj.io/storj/storagenode"
|
||||||
"storj.io/storj/storagenode/bandwidth"
|
"storj.io/storj/storagenode/bandwidth"
|
||||||
@ -69,10 +67,9 @@ type SQLDB interface {
|
|||||||
// Config configures storage node database
|
// Config configures storage node database
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// TODO: figure out better names
|
// TODO: figure out better names
|
||||||
Storage string
|
Storage string
|
||||||
Info string
|
Info string
|
||||||
Info2 string
|
Info2 string
|
||||||
Kademlia string
|
|
||||||
|
|
||||||
Pieces string
|
Pieces string
|
||||||
}
|
}
|
||||||
@ -99,8 +96,6 @@ type DB struct {
|
|||||||
usedSerialsDB *usedSerialsDB
|
usedSerialsDB *usedSerialsDB
|
||||||
satellitesDB *satellitesDB
|
satellitesDB *satellitesDB
|
||||||
|
|
||||||
kdb, ndb, adb storage.KeyValueStore
|
|
||||||
|
|
||||||
sqlDatabases map[string]*sql.DB
|
sqlDatabases map[string]*sql.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,17 +107,9 @@ func New(log *zap.Logger, config Config) (*DB, error) {
|
|||||||
}
|
}
|
||||||
pieces := filestore.New(log, piecesDir)
|
pieces := filestore.New(log, piecesDir)
|
||||||
|
|
||||||
dbs, err := boltdb.NewShared(config.Kademlia, kademlia.KademliaBucket, kademlia.NodeBucket, kademlia.AntechamberBucket)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
db := &DB{
|
db := &DB{
|
||||||
log: log,
|
log: log,
|
||||||
pieces: pieces,
|
pieces: pieces,
|
||||||
kdb: dbs[0],
|
|
||||||
ndb: dbs[1],
|
|
||||||
adb: dbs[2],
|
|
||||||
|
|
||||||
dbDirectory: filepath.Dir(config.Info2),
|
dbDirectory: filepath.Dir(config.Info2),
|
||||||
|
|
||||||
@ -156,6 +143,7 @@ func (db *DB) openDatabases() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.Combine(err, db.closeDatabases())
|
return errs.Combine(err, db.closeDatabases())
|
||||||
}
|
}
|
||||||
|
|
||||||
db.deprecatedInfoDB.Configure(deprecatedInfoDB)
|
db.deprecatedInfoDB.Configure(deprecatedInfoDB)
|
||||||
db.bandwidthDB.Configure(deprecatedInfoDB)
|
db.bandwidthDB.Configure(deprecatedInfoDB)
|
||||||
db.ordersDB.Configure(deprecatedInfoDB)
|
db.ordersDB.Configure(deprecatedInfoDB)
|
||||||
@ -204,13 +192,7 @@ func (db *DB) CreateTables() error {
|
|||||||
|
|
||||||
// Close closes any resources.
|
// Close closes any resources.
|
||||||
func (db *DB) Close() error {
|
func (db *DB) Close() error {
|
||||||
return errs.Combine(
|
return db.closeDatabases()
|
||||||
db.kdb.Close(),
|
|
||||||
db.ndb.Close(),
|
|
||||||
db.adb.Close(),
|
|
||||||
|
|
||||||
db.closeDatabases(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeDatabases closes all the SQLite database connections and removes them from the associated maps.
|
// closeDatabases closes all the SQLite database connections and removes them from the associated maps.
|
||||||
@ -283,11 +265,6 @@ func (db *DB) UsedSerials() piecestore.UsedSerials {
|
|||||||
return db.usedSerialsDB
|
return db.usedSerialsDB
|
||||||
}
|
}
|
||||||
|
|
||||||
// RoutingTable returns kademlia routing table
|
|
||||||
func (db *DB) RoutingTable() (kdb, ndb, adb storage.KeyValueStore) {
|
|
||||||
return db.kdb, db.ndb, db.adb
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawDatabases are required for testing purposes
|
// RawDatabases are required for testing purposes
|
||||||
func (db *DB) RawDatabases() map[string]SQLDB {
|
func (db *DB) RawDatabases() map[string]SQLDB {
|
||||||
return map[string]SQLDB{
|
return map[string]SQLDB{
|
||||||
|
@ -80,11 +80,10 @@ func TestMigrate(t *testing.T) {
|
|||||||
|
|
||||||
storageDir := ctx.Dir("storage")
|
storageDir := ctx.Dir("storage")
|
||||||
cfg := storagenodedb.Config{
|
cfg := storagenodedb.Config{
|
||||||
Pieces: storageDir,
|
Pieces: storageDir,
|
||||||
Storage: storageDir,
|
Storage: storageDir,
|
||||||
Info: filepath.Join(storageDir, "piecestore.db"),
|
Info: filepath.Join(storageDir, "piecestore.db"),
|
||||||
Info2: filepath.Join(storageDir, "info.db"),
|
Info2: filepath.Join(storageDir, "info.db"),
|
||||||
Kademlia: filepath.Join(storageDir, "kademlia"),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a new satellitedb connection
|
// create a new satellitedb connection
|
||||||
|
@ -28,11 +28,10 @@ func Run(t *testing.T, test func(t *testing.T, db storagenode.DB)) {
|
|||||||
|
|
||||||
storageDir := ctx.Dir("storage")
|
storageDir := ctx.Dir("storage")
|
||||||
cfg := storagenodedb.Config{
|
cfg := storagenodedb.Config{
|
||||||
Storage: storageDir,
|
Storage: storageDir,
|
||||||
Info: filepath.Join(storageDir, "piecestore.db"),
|
Info: filepath.Join(storageDir, "piecestore.db"),
|
||||||
Info2: filepath.Join(storageDir, "info.db"),
|
Info2: filepath.Join(storageDir, "info.db"),
|
||||||
Pieces: storageDir,
|
Pieces: storageDir,
|
||||||
Kademlia: filepath.Join(storageDir, "kad.db"),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := storagenodedb.New(log, cfg)
|
db, err := storagenodedb.New(log, cfg)
|
||||||
|
@ -38,9 +38,8 @@ func TestFileConcurrency(t *testing.T) {
|
|||||||
log := zaptest.NewLogger(t)
|
log := zaptest.NewLogger(t)
|
||||||
|
|
||||||
db, err := storagenodedb.New(log, storagenodedb.Config{
|
db, err := storagenodedb.New(log, storagenodedb.Config{
|
||||||
Pieces: ctx.Dir("storage"),
|
Pieces: ctx.Dir("storage"),
|
||||||
Info2: ctx.Dir("storage") + "/info.db",
|
Info2: ctx.Dir("storage") + "/info.db",
|
||||||
Kademlia: ctx.Dir("storage") + "/kademlia",
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -58,11 +57,10 @@ func TestInMemoryConcurrency(t *testing.T) {
|
|||||||
|
|
||||||
storageDir := ctx.Dir("storage")
|
storageDir := ctx.Dir("storage")
|
||||||
cfg := storagenodedb.Config{
|
cfg := storagenodedb.Config{
|
||||||
Pieces: storageDir,
|
Pieces: storageDir,
|
||||||
Storage: storageDir,
|
Storage: storageDir,
|
||||||
Info: filepath.Join(storageDir, "piecestore.db"),
|
Info: filepath.Join(storageDir, "piecestore.db"),
|
||||||
Info2: filepath.Join(storageDir, "info.db"),
|
Info2: filepath.Join(storageDir, "info.db"),
|
||||||
Kademlia: filepath.Join(storageDir, "kademlia"),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := storagenodedb.New(log, cfg)
|
db, err := storagenodedb.New(log, cfg)
|
||||||
|
Loading…
Reference in New Issue
Block a user