724bb44723
What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
143 lines
3.9 KiB
Go
143 lines
3.9 KiB
Go
// Copyright (C) 2019 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package inspector
|
|
|
|
import (
|
|
"context"
|
|
"net"
|
|
"time"
|
|
|
|
"github.com/golang/protobuf/ptypes"
|
|
"github.com/zeebo/errs"
|
|
"go.uber.org/zap"
|
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
"storj.io/storj/storagenode/bandwidth"
|
|
"storj.io/storj/storagenode/contact"
|
|
"storj.io/storj/storagenode/pieces"
|
|
"storj.io/storj/storagenode/piecestore"
|
|
)
|
|
|
|
var (
|
|
mon = monkit.Package()
|
|
|
|
// Error is the default error class for piecestore monitor errors
|
|
Error = errs.Class("piecestore inspector")
|
|
)
|
|
|
|
// Endpoint does inspectory things
|
|
//
|
|
// architecture: Endpoint
|
|
type Endpoint struct {
|
|
log *zap.Logger
|
|
pieceStore *pieces.Store
|
|
contact *contact.Service
|
|
pingStats *contact.PingStats
|
|
usageDB bandwidth.DB
|
|
|
|
startTime time.Time
|
|
pieceStoreConfig piecestore.OldConfig
|
|
dashboardAddress net.Addr
|
|
externalAddress string
|
|
}
|
|
|
|
// NewEndpoint creates piecestore inspector instance
|
|
func NewEndpoint(
|
|
log *zap.Logger,
|
|
pieceStore *pieces.Store,
|
|
contact *contact.Service,
|
|
pingStats *contact.PingStats,
|
|
usageDB bandwidth.DB,
|
|
pieceStoreConfig piecestore.OldConfig,
|
|
dashbaordAddress net.Addr,
|
|
externalAddress string) *Endpoint {
|
|
|
|
return &Endpoint{
|
|
log: log,
|
|
pieceStore: pieceStore,
|
|
contact: contact,
|
|
pingStats: pingStats,
|
|
usageDB: usageDB,
|
|
pieceStoreConfig: pieceStoreConfig,
|
|
dashboardAddress: dashbaordAddress,
|
|
startTime: time.Now(),
|
|
externalAddress: externalAddress,
|
|
}
|
|
}
|
|
|
|
func (inspector *Endpoint) retrieveStats(ctx context.Context) (_ *pb.StatSummaryResponse, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
// Space Usage
|
|
totalUsedSpace, err := inspector.pieceStore.SpaceUsedForPieces(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
usage, err := bandwidth.TotalMonthlySummary(ctx, inspector.usageDB)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
ingress := usage.Put + usage.PutRepair
|
|
egress := usage.Get + usage.GetAudit + usage.GetRepair
|
|
|
|
totalUsedBandwidth := usage.Total()
|
|
|
|
return &pb.StatSummaryResponse{
|
|
UsedSpace: totalUsedSpace,
|
|
AvailableSpace: inspector.pieceStoreConfig.AllocatedDiskSpace.Int64() - totalUsedSpace,
|
|
UsedIngress: ingress,
|
|
UsedEgress: egress,
|
|
UsedBandwidth: totalUsedBandwidth,
|
|
AvailableBandwidth: inspector.pieceStoreConfig.AllocatedBandwidth.Int64() - totalUsedBandwidth,
|
|
}, nil
|
|
}
|
|
|
|
// Stats returns current statistics about the storage node
|
|
func (inspector *Endpoint) Stats(ctx context.Context, in *pb.StatsRequest) (out *pb.StatSummaryResponse, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
statsSummary, err := inspector.retrieveStats(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return statsSummary, nil
|
|
}
|
|
|
|
func (inspector *Endpoint) getDashboardData(ctx context.Context) (_ *pb.DashboardResponse, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
statsSummary, err := inspector.retrieveStats(ctx)
|
|
if err != nil {
|
|
return &pb.DashboardResponse{}, Error.Wrap(err)
|
|
}
|
|
|
|
lastPingedAt, lastPingFromID, lastPingFromAddress := inspector.pingStats.WhenLastPinged()
|
|
|
|
return &pb.DashboardResponse{
|
|
NodeId: inspector.contact.Local().Id,
|
|
InternalAddress: "",
|
|
ExternalAddress: inspector.contact.Local().Address.Address,
|
|
LastPinged: lastPingedAt,
|
|
LastPingFromId: &lastPingFromID,
|
|
LastPingFromAddress: lastPingFromAddress,
|
|
DashboardAddress: inspector.dashboardAddress.String(),
|
|
Uptime: ptypes.DurationProto(time.Since(inspector.startTime)),
|
|
Stats: statsSummary,
|
|
}, nil
|
|
}
|
|
|
|
// Dashboard returns dashboard information
|
|
func (inspector *Endpoint) Dashboard(ctx context.Context, in *pb.DashboardRequest) (out *pb.DashboardResponse, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
data, err := inspector.getDashboardData(ctx)
|
|
if err != nil {
|
|
inspector.log.Warn("unable to get dashboard information")
|
|
return nil, err
|
|
}
|
|
return data, nil
|
|
}
|