storj/storagenode/console/service.go

527 lines
16 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"context"
"math"
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/private/version"
"storj.io/storj/private/date"
"storj.io/storj/private/version/checker"
"storj.io/storj/storagenode/bandwidth"
"storj.io/storj/storagenode/contact"
"storj.io/storj/storagenode/heldamount"
"storj.io/storj/storagenode/pieces"
"storj.io/storj/storagenode/pricing"
"storj.io/storj/storagenode/reputation"
"storj.io/storj/storagenode/satellites"
"storj.io/storj/storagenode/storageusage"
"storj.io/storj/storagenode/trust"
)
var (
// SNOServiceErr defines sno service error.
SNOServiceErr = errs.Class("storage node dashboard service error")
mon = monkit.Package()
)
// Service is handling storage node operator related logic.
2019-09-10 14:24:16 +01:00
//
// architecture: Service
type Service struct {
Remove Kademlia dependencies from Satellite and Storagenode (#2966) What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
2019-09-19 20:56:34 +01:00
log *zap.Logger
trust *trust.Pool
bandwidthDB bandwidth.DB
reputationDB reputation.DB
storageUsageDB storageusage.DB
pricingDB pricing.DB
satelliteDB satellites.DB
pieceStore *pieces.Store
Remove Kademlia dependencies from Satellite and Storagenode (#2966) What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
2019-09-19 20:56:34 +01:00
contact *contact.Service
version *checker.Service
Remove Kademlia dependencies from Satellite and Storagenode (#2966) What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
2019-09-19 20:56:34 +01:00
pingStats *contact.PingStats
allocatedDiskSpace memory.Size
Remove Kademlia dependencies from Satellite and Storagenode (#2966) What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
2019-09-19 20:56:34 +01:00
walletAddress string
startedAt time.Time
versionInfo version.Info
}
// NewService returns new instance of Service.
func NewService(log *zap.Logger, bandwidth bandwidth.DB, pieceStore *pieces.Store, version *checker.Service,
allocatedDiskSpace memory.Size, walletAddress string, versionInfo version.Info, trust *trust.Pool,
reputationDB reputation.DB, storageUsageDB storageusage.DB, pricingDB pricing.DB, satelliteDB satellites.DB, pingStats *contact.PingStats, contact *contact.Service) (*Service, error) {
if log == nil {
return nil, errs.New("log can't be nil")
}
if bandwidth == nil {
return nil, errs.New("bandwidth can't be nil")
}
if pieceStore == nil {
return nil, errs.New("pieceStore can't be nil")
}
if version == nil {
return nil, errs.New("version can't be nil")
}
if pingStats == nil {
return nil, errs.New("pingStats can't be nil")
}
Remove Kademlia dependencies from Satellite and Storagenode (#2966) What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
2019-09-19 20:56:34 +01:00
if contact == nil {
return nil, errs.New("contact service can't be nil")
}
return &Service{
log: log,
trust: trust,
bandwidthDB: bandwidth,
reputationDB: reputationDB,
storageUsageDB: storageUsageDB,
pricingDB: pricingDB,
satelliteDB: satelliteDB,
pieceStore: pieceStore,
version: version,
pingStats: pingStats,
allocatedDiskSpace: allocatedDiskSpace,
Remove Kademlia dependencies from Satellite and Storagenode (#2966) What: cmd/inspector/main.go: removes kad commands internal/testplanet/planet.go: Waits for contact chore to finish satellite/contact/nodesservice.go: creates an empty nodes service implementation satellite/contact/service.go: implements Local and FetchInfo methods & adds external address config value satellite/discovery/service.go: replaces kad.FetchInfo with contact.FetchInfo in Refresh() & removes Discover() satellite/peer.go: sets up contact service and endpoints storagenode/console/service.go: replaces nodeID with contact.Local() storagenode/contact/chore.go: replaces routing table with contact service storagenode/contact/nodesservice.go: creates empty implementation for ping and request info nodes service & implements RequestInfo method storagenode/contact/service.go: creates a service to return the local node and update its own capacity storagenode/monitor/monitor.go: uses contact service in place of routing table storagenode/operator.go: moves operatorconfig from kad into its own setup storagenode/peer.go: sets up contact service, chore, pingstats and endpoints satellite/overlay/config.go: changes NodeSelectionConfig.OnlineWindow default to 4hr to allow for accurate repair selection Removes kademlia setups in: cmd/storagenode/main.go cmd/storj-sim/network.go internal/testplane/planet.go internal/testplanet/satellite.go internal/testplanet/storagenode.go satellite/peer.go scripts/test-sim-backwards.sh scripts/testdata/satellite-config.yaml.lock storagenode/inspector/inspector.go storagenode/peer.go storagenode/storagenodedb/database.go Why: Replacing Kademlia Please describe the tests: • internal/testplanet/planet_test.go: TestBasic: assert that the storagenode can check in with the satellite without any errors TestContact: test that all nodes get inserted into both satellites' overlay cache during testplanet setup • satellite/contact/contact_test.go: TestFetchInfo: Tests that the FetchInfo method returns the correct info • storagenode/contact/contact_test.go: TestNodeInfoUpdated: tests that the contact chore updates the node information TestRequestInfoEndpoint: tests that the Request info endpoint returns the correct info Please describe the performance impact: Node discovery should be at least slightly more performant since each node connects directly to each satellite and no longer needs to wait for bootstrapping. It probably won't be faster in real time on start up since each node waits a random amount of time (less than 1 hr) to initialize its first connection (jitter).
2019-09-19 20:56:34 +01:00
contact: contact,
walletAddress: walletAddress,
startedAt: time.Now(),
versionInfo: versionInfo,
}, nil
}
// SatelliteInfo encapsulates satellite ID and disqualification.
type SatelliteInfo struct {
ID storj.NodeID `json:"id"`
URL string `json:"url"`
Disqualified *time.Time `json:"disqualified"`
Suspended *time.Time `json:"suspended"`
}
// Dashboard encapsulates dashboard stale data.
type Dashboard struct {
NodeID storj.NodeID `json:"nodeID"`
Wallet string `json:"wallet"`
Satellites []SatelliteInfo `json:"satellites"`
DiskSpace DiskSpaceInfo `json:"diskSpace"`
Bandwidth BandwidthInfo `json:"bandwidth"`
LastPinged time.Time `json:"lastPinged"`
Version version.SemVer `json:"version"`
AllowedVersion version.SemVer `json:"allowedVersion"`
UpToDate bool `json:"upToDate"`
StartedAt time.Time `json:"startedAt"`
}
// GetDashboardData returns stale dashboard data.
func (s *Service) GetDashboardData(ctx context.Context) (_ *Dashboard, err error) {
defer mon.Task()(&ctx)(&err)
data := new(Dashboard)
data.NodeID = s.contact.Local().ID
data.Wallet = s.walletAddress
data.Version = s.versionInfo.Version
data.StartedAt = s.startedAt
data.LastPinged = s.pingStats.WhenLastPinged()
data.AllowedVersion, data.UpToDate = s.version.IsAllowed(ctx)
stats, err := s.reputationDB.All(ctx)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
for _, rep := range stats {
url, err := s.trust.GetNodeURL(ctx, rep.SatelliteID)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
data.Satellites = append(data.Satellites,
SatelliteInfo{
ID: rep.SatelliteID,
Disqualified: rep.Disqualified,
Suspended: rep.Suspended,
URL: url.Address,
},
)
}
pieceTotal, _, err := s.pieceStore.SpaceUsedForPieces(ctx)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
trash, err := s.pieceStore.SpaceUsedForTrash(ctx)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
bandwidthUsage, err := s.bandwidthDB.MonthSummary(ctx, time.Now())
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
// temporary solution - in case we receive negative amount of free space we recalculate dir disk available space and recalculates used space.
// TODO: find real reason of negative space, garbage collector calculates trash correctly.
if s.allocatedDiskSpace.Int64()-pieceTotal-trash < 0 {
status, err := s.pieceStore.StorageStatus(ctx)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
data.DiskSpace = DiskSpaceInfo{
Used: s.allocatedDiskSpace.Int64() - status.DiskFree - trash,
Available: s.allocatedDiskSpace.Int64(),
Trash: trash,
}
} else {
data.DiskSpace = DiskSpaceInfo{
Used: pieceTotal,
Available: s.allocatedDiskSpace.Int64(),
Trash: trash,
}
}
data.Bandwidth = BandwidthInfo{
Used: bandwidthUsage,
}
return data, nil
}
// PriceModel is a satellite prices for storagenode usage TB/H.
type PriceModel struct {
EgressBandwidth int64
RepairBandwidth int64
AuditBandwidth int64
DiskSpace int64
}
// Satellite encapsulates satellite related data.
type Satellite struct {
ID storj.NodeID `json:"id"`
StorageDaily []storageusage.Stamp `json:"storageDaily"`
BandwidthDaily []bandwidth.UsageRollup `json:"bandwidthDaily"`
StorageSummary float64 `json:"storageSummary"`
BandwidthSummary int64 `json:"bandwidthSummary"`
EgressSummary int64 `json:"egressSummary"`
IngressSummary int64 `json:"ingressSummary"`
Audit reputation.Metric `json:"audit"`
Uptime reputation.Metric `json:"uptime"`
PriceModel PriceModel `json:"priceModel"`
NodeJoinedAt time.Time `json:"nodeJoinedAt"`
}
// GetSatelliteData returns satellite related data.
func (s *Service) GetSatelliteData(ctx context.Context, satelliteID storj.NodeID) (_ *Satellite, err error) {
defer mon.Task()(&ctx)(&err)
from, to := date.MonthBoundary(time.Now().UTC())
bandwidthDaily, err := s.bandwidthDB.GetDailySatelliteRollups(ctx, satelliteID, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
storageDaily, err := s.storageUsageDB.GetDaily(ctx, satelliteID, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
bandwidthSummary, err := s.bandwidthDB.SatelliteSummary(ctx, satelliteID, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
egressSummary, err := s.bandwidthDB.SatelliteEgressSummary(ctx, satelliteID, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
ingressSummary, err := s.bandwidthDB.SatelliteIngressSummary(ctx, satelliteID, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
storageSummary, err := s.storageUsageDB.SatelliteSummary(ctx, satelliteID, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
rep, err := s.reputationDB.Get(ctx, satelliteID)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
pricingModel, err := s.pricingDB.Get(ctx, satelliteID)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
satellitePricing := PriceModel{
EgressBandwidth: pricingModel.EgressBandwidth,
RepairBandwidth: pricingModel.RepairBandwidth,
AuditBandwidth: pricingModel.AuditBandwidth,
DiskSpace: pricingModel.DiskSpace,
}
return &Satellite{
ID: satelliteID,
StorageDaily: storageDaily,
BandwidthDaily: bandwidthDaily,
StorageSummary: storageSummary,
BandwidthSummary: bandwidthSummary.Total(),
EgressSummary: egressSummary.Total(),
IngressSummary: ingressSummary.Total(),
Audit: rep.Audit,
Uptime: rep.Uptime,
PriceModel: satellitePricing,
NodeJoinedAt: rep.JoinedAt,
}, nil
}
// Satellites represents consolidated data across all satellites.
type Satellites struct {
StorageDaily []storageusage.Stamp `json:"storageDaily"`
BandwidthDaily []bandwidth.UsageRollup `json:"bandwidthDaily"`
StorageSummary float64 `json:"storageSummary"`
BandwidthSummary int64 `json:"bandwidthSummary"`
EgressSummary int64 `json:"egressSummary"`
IngressSummary int64 `json:"ingressSummary"`
EarliestJoinedAt time.Time `json:"earliestJoinedAt"`
Audits []Audits `json:"audits"`
}
// Audits represents audit metrics across all satellites.
type Audits struct {
Audit reputation.Metric `json:"audit"`
SatelliteName string `json:"satelliteName"`
}
// GetAllSatellitesData returns bandwidth and storage daily usage consolidate
// among all satellites from the node's trust pool.
func (s *Service) GetAllSatellitesData(ctx context.Context) (_ *Satellites, err error) {
defer mon.Task()(&ctx)(nil)
from, to := date.MonthBoundary(time.Now().UTC())
var audits []Audits
bandwidthDaily, err := s.bandwidthDB.GetDailyRollups(ctx, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
storageDaily, err := s.storageUsageDB.GetDailyTotal(ctx, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
bandwidthSummary, err := s.bandwidthDB.Summary(ctx, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
egressSummary, err := s.bandwidthDB.EgressSummary(ctx, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
ingressSummary, err := s.bandwidthDB.IngressSummary(ctx, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
storageSummary, err := s.storageUsageDB.Summary(ctx, from, to)
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
satellitesIDs := s.trust.GetSatellites(ctx)
joinedAt := time.Now().UTC()
for i := 0; i < len(satellitesIDs); i++ {
stats, err := s.reputationDB.Get(ctx, satellitesIDs[i])
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
url, err := s.trust.GetNodeURL(ctx, satellitesIDs[i])
if err != nil {
return nil, SNOServiceErr.Wrap(err)
}
audits = append(audits, Audits{
Audit: stats.Audit,
SatelliteName: url.Address,
})
if !stats.JoinedAt.IsZero() && stats.JoinedAt.Before(joinedAt) {
joinedAt = stats.JoinedAt
}
}
return &Satellites{
StorageDaily: storageDaily,
BandwidthDaily: bandwidthDaily,
StorageSummary: storageSummary,
BandwidthSummary: bandwidthSummary.Total(),
EgressSummary: egressSummary.Total(),
IngressSummary: ingressSummary.Total(),
EarliestJoinedAt: joinedAt,
Audits: audits,
}, nil
}
// VerifySatelliteID verifies if the satellite belongs to the trust pool.
func (s *Service) VerifySatelliteID(ctx context.Context, satelliteID storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
err = s.trust.VerifySatelliteID(ctx, satelliteID)
if err != nil {
return SNOServiceErr.Wrap(err)
}
return nil
}
// GetSatelliteEstimatedPayout returns estimated payout for current and previous months from specific satellite with current level of load.
func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID) (payout heldamount.EstimatedPayout, err error) {
defer mon.Task()(&ctx)(&err)
currentMonthPayout, err := s.estimatedPayoutMonthly(ctx, satelliteID, time.Now().UTC())
if err != nil {
return heldamount.EstimatedPayout{}, SNOServiceErr.Wrap(err)
}
previousMonthPayout, err := s.estimatedPayoutMonthly(ctx, satelliteID, time.Now().UTC().AddDate(0, -1, 0).UTC())
if err != nil {
return heldamount.EstimatedPayout{}, SNOServiceErr.Wrap(err)
}
payout.CurrentMonth = currentMonthPayout
payout.PreviousMonth = previousMonthPayout
return payout, nil
}
// GetAllSatellitesEstimatedPayout returns estimated payout for current and previous months from all satellites with current level of load.
func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout heldamount.EstimatedPayout, err error) {
defer mon.Task()(&ctx)(&err)
satelliteIDs := s.trust.GetSatellites(ctx)
for i := 0; i < len(satelliteIDs); i++ {
current, err := s.estimatedPayoutMonthly(ctx, satelliteIDs[i], time.Now().UTC())
if err != nil {
return heldamount.EstimatedPayout{}, SNOServiceErr.Wrap(err)
}
previous, err := s.estimatedPayoutMonthly(ctx, satelliteIDs[i], time.Now().UTC().AddDate(0, -1, 0).UTC())
if err != nil {
return heldamount.EstimatedPayout{}, SNOServiceErr.Wrap(err)
}
payout.CurrentMonth.Payout += current.Payout
payout.CurrentMonth.EgressRepairAuditPayout += current.EgressRepairAuditPayout
payout.CurrentMonth.DiskSpacePayout += current.DiskSpacePayout
payout.CurrentMonth.DiskSpace += current.DiskSpace
payout.CurrentMonth.EgressBandwidth += current.EgressBandwidth
payout.CurrentMonth.EgressBandwidthPayout += current.EgressBandwidthPayout
payout.CurrentMonth.EgressRepairAudit += current.EgressRepairAudit
payout.CurrentMonth.Held += current.Held
payout.PreviousMonth.Payout += previous.Payout
payout.PreviousMonth.DiskSpacePayout += previous.DiskSpacePayout
payout.PreviousMonth.DiskSpace += previous.DiskSpace
payout.PreviousMonth.EgressBandwidth += previous.EgressBandwidth
payout.PreviousMonth.EgressBandwidthPayout += previous.EgressBandwidthPayout
payout.PreviousMonth.EgressRepairAuditPayout += previous.EgressRepairAuditPayout
payout.PreviousMonth.EgressRepairAudit += previous.EgressRepairAudit
payout.PreviousMonth.Held += previous.Held
}
return payout, nil
}
// estimatedPayoutMonthly returns estimated payout data monthly from specific satellite.
func (s *Service) estimatedPayoutMonthly(ctx context.Context, satelliteID storj.NodeID, month time.Time) (payoutData heldamount.PayoutMonthly, err error) {
defer mon.Task()(&ctx)(&err)
priceModel, err := s.pricingDB.Get(ctx, satelliteID)
if err != nil {
return heldamount.PayoutMonthly{}, SNOServiceErr.Wrap(err)
}
stats, err := s.reputationDB.Get(ctx, satelliteID)
if err != nil {
return heldamount.PayoutMonthly{}, SNOServiceErr.Wrap(err)
}
heldRate := s.getHeldRate(stats.JoinedAt)
payoutData.HeldRate = heldRate
from, to := date.MonthBoundary(month)
bandwidthDaily, err := s.bandwidthDB.GetDailySatelliteRollups(ctx, satelliteID, from, to)
if err != nil {
return heldamount.PayoutMonthly{}, SNOServiceErr.Wrap(err)
}
for i := 0; i < len(bandwidthDaily); i++ {
payoutData.EgressBandwidth += bandwidthDaily[i].Egress.Usage
payoutData.EgressRepairAudit += bandwidthDaily[i].Egress.Audit + bandwidthDaily[i].Egress.Repair
}
payoutData.EgressBandwidthPayout += int64(float64(payoutData.EgressBandwidth*priceModel.EgressBandwidth) / math.Pow10(12))
payoutData.EgressRepairAuditPayout += int64(float64(payoutData.EgressRepairAudit*priceModel.AuditBandwidth) / math.Pow10(12))
storageDaily, err := s.storageUsageDB.GetDaily(ctx, satelliteID, from, to)
if err != nil {
return heldamount.PayoutMonthly{}, SNOServiceErr.Wrap(err)
}
for j := 0; j < len(storageDaily); j++ {
payoutData.DiskSpace += storageDaily[j].AtRestTotal
}
// dividing by 720 to show tbm instead of tbh.
payoutData.DiskSpace /= 720
payoutData.DiskSpacePayout += int64(payoutData.DiskSpace * float64(priceModel.DiskSpace) / math.Pow10(12))
payoutData.Held = (payoutData.DiskSpacePayout + payoutData.EgressBandwidthPayout + payoutData.EgressRepairAuditPayout) * heldRate / 100
payoutData.Payout = payoutData.DiskSpacePayout + payoutData.EgressBandwidthPayout + payoutData.EgressRepairAuditPayout - payoutData.Held
return payoutData, nil
}
func (s *Service) getHeldRate(joinTime time.Time) (heldRate int64) {
monthsSinceJoin := date.MonthsCountSince(joinTime)
switch monthsSinceJoin {
case 0, 1, 2:
heldRate = 75
case 3, 4, 5:
heldRate = 50
case 6, 7, 8:
heldRate = 25
default:
heldRate = 0
}
return heldRate
}