2019-01-10 13:13:27 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package storagenode
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-01-29 23:04:30 +00:00
|
|
|
"errors"
|
2020-04-15 14:57:14 +01:00
|
|
|
"fmt"
|
2022-03-11 13:47:20 +00:00
|
|
|
"io/fs"
|
2019-06-20 12:52:32 +01:00
|
|
|
"net"
|
2022-03-11 13:47:20 +00:00
|
|
|
"os"
|
2020-04-14 13:39:42 +01:00
|
|
|
"path/filepath"
|
2019-11-26 16:25:21 +00:00
|
|
|
"time"
|
2019-01-10 13:13:27 +00:00
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2019-01-10 13:13:27 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/identity"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/peertls/extensions"
|
|
|
|
"storj.io/common/peertls/tlsopts"
|
|
|
|
"storj.io/common/rpc"
|
|
|
|
"storj.io/common/storj"
|
2020-03-23 19:18:20 +00:00
|
|
|
"storj.io/private/debug"
|
2020-03-23 19:30:31 +00:00
|
|
|
"storj.io/private/version"
|
2020-01-28 23:13:59 +00:00
|
|
|
"storj.io/storj/private/lifecycle"
|
2020-12-26 01:16:43 +00:00
|
|
|
"storj.io/storj/private/multinodepb"
|
2021-04-23 14:13:51 +01:00
|
|
|
"storj.io/storj/private/server"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/version/checker"
|
2019-01-10 13:13:27 +00:00
|
|
|
"storj.io/storj/storage"
|
2020-04-14 13:39:42 +01:00
|
|
|
"storj.io/storj/storage/filestore"
|
2020-11-09 15:22:30 +00:00
|
|
|
"storj.io/storj/storagenode/apikeys"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/bandwidth"
|
2019-05-08 12:11:59 +01:00
|
|
|
"storj.io/storj/storagenode/collector"
|
2019-06-20 12:52:32 +01:00
|
|
|
"storj.io/storj/storagenode/console"
|
|
|
|
"storj.io/storj/storagenode/console/consoleserver"
|
2019-09-04 20:04:18 +01:00
|
|
|
"storj.io/storj/storagenode/contact"
|
2019-10-11 14:58:12 +01:00
|
|
|
"storj.io/storj/storagenode/gracefulexit"
|
storagenode: accept HTTP calls on public port, listening for monitoring requests
Today each storagenode should have a port which is opened for the internet, and handles DRPC protocol calls.
When we do a HTTP call on the DRPC endpoint, it hangs until a timeout.
This patch changes the behavior: the main DRPC port of the storagenodes can accept HTTP requests and can be used to monitor the status of the node:
* if returns with HTTP 200 only if the storagnode is healthy (not suspended / disqualified + online score > 0.9)
* it CAN include information about the current status (per satellite). It's opt-in, you should configure it so.
In this way it becomes extremely easy to monitor storagenodes with external uptime services.
Note: this patch exposes some information which was not easily available before (especially the node status, and used satellites). I think it should be acceptable:
* Until having more community satellites, all storagenodes are connected to the main Storj satellites.
* With community satellites, it's good thing to have more transparency (easy way to check who is connected to which satellites)
The implementation is based on this line:
```
http.Serve(NewPrefixedListener([]byte("GET / HT"), publicMux.Route("GET / HT")), p.public.http)
```
This line answers to the TCP requests with `GET / HT...` (GET HTTP request to the route), but puts back the removed prefix.
Change-Id: I3700c7e24524850825ecdf75a4bcc3b4afcb3a74
2022-08-23 11:28:41 +01:00
|
|
|
"storj.io/storj/storagenode/healthcheck"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/inspector"
|
2020-10-30 12:51:26 +00:00
|
|
|
"storj.io/storj/storagenode/internalpb"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/monitor"
|
2020-12-26 01:16:43 +00:00
|
|
|
"storj.io/storj/storagenode/multinode"
|
2019-06-26 19:55:22 +01:00
|
|
|
"storj.io/storj/storagenode/nodestats"
|
2019-12-16 17:59:01 +00:00
|
|
|
"storj.io/storj/storagenode/notifications"
|
2021-03-17 18:47:23 +00:00
|
|
|
"storj.io/storj/storagenode/operator"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/orders"
|
2021-01-14 16:41:36 +00:00
|
|
|
"storj.io/storj/storagenode/payouts"
|
|
|
|
"storj.io/storj/storagenode/payouts/estimatedpayouts"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/pieces"
|
|
|
|
"storj.io/storj/storagenode/piecestore"
|
2020-05-27 22:07:24 +01:00
|
|
|
"storj.io/storj/storagenode/piecestore/usedserials"
|
2020-10-20 20:58:54 +01:00
|
|
|
"storj.io/storj/storagenode/piecetransfer"
|
2020-01-10 01:58:59 +00:00
|
|
|
"storj.io/storj/storagenode/preflight"
|
2020-04-10 15:03:14 +01:00
|
|
|
"storj.io/storj/storagenode/pricing"
|
2019-08-08 14:47:04 +01:00
|
|
|
"storj.io/storj/storagenode/reputation"
|
2019-08-19 19:52:47 +01:00
|
|
|
"storj.io/storj/storagenode/retain"
|
2019-10-01 15:34:03 +01:00
|
|
|
"storj.io/storj/storagenode/satellites"
|
2020-04-14 13:39:42 +01:00
|
|
|
"storj.io/storj/storagenode/storagenodedb"
|
2019-08-08 14:47:04 +01:00
|
|
|
"storj.io/storj/storagenode/storageusage"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/trust"
|
2020-02-21 17:41:54 +00:00
|
|
|
version2 "storj.io/storj/storagenode/version"
|
2022-03-11 13:47:20 +00:00
|
|
|
storagenodeweb "storj.io/storj/web/storagenode"
|
2019-01-10 13:13:27 +00:00
|
|
|
)
|
|
|
|
|
2019-06-04 13:31:39 +01:00
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
)
|
|
|
|
|
2020-12-05 16:01:42 +00:00
|
|
|
// DB is the master database for Storage Node.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Master Database
|
2019-01-10 13:13:27 +00:00
|
|
|
type DB interface {
|
2020-04-30 07:36:59 +01:00
|
|
|
// MigrateToLatest initializes the database
|
|
|
|
MigrateToLatest(ctx context.Context) error
|
2022-08-09 11:47:00 +01:00
|
|
|
|
2019-01-24 20:28:06 +00:00
|
|
|
// Close closes the database
|
|
|
|
Close() error
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
Pieces() storage.Blobs
|
|
|
|
|
|
|
|
Orders() orders.DB
|
2019-08-08 02:47:30 +01:00
|
|
|
V0PieceInfo() pieces.V0PieceInfoDB
|
|
|
|
PieceExpirationDB() pieces.PieceExpirationDB
|
2019-08-12 22:43:05 +01:00
|
|
|
PieceSpaceUsedDB() pieces.PieceSpaceUsedDB
|
2019-03-18 10:55:06 +00:00
|
|
|
Bandwidth() bandwidth.DB
|
2019-08-08 14:47:04 +01:00
|
|
|
Reputation() reputation.DB
|
|
|
|
StorageUsage() storageusage.DB
|
2019-10-01 15:34:03 +01:00
|
|
|
Satellites() satellites.DB
|
2019-12-16 17:59:01 +00:00
|
|
|
Notifications() notifications.DB
|
2021-01-14 16:41:36 +00:00
|
|
|
Payout() payouts.DB
|
2020-04-10 15:03:14 +01:00
|
|
|
Pricing() pricing.DB
|
2020-12-26 01:16:43 +00:00
|
|
|
APIKeys() apikeys.DB
|
2020-03-02 22:33:55 +00:00
|
|
|
|
|
|
|
Preflight(ctx context.Context) error
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
2020-03-15 22:16:56 +00:00
|
|
|
// Config is all the configuration parameters for a Storage Node.
|
2019-01-10 13:13:27 +00:00
|
|
|
type Config struct {
|
2019-01-25 14:54:54 +00:00
|
|
|
Identity identity.Config
|
|
|
|
|
2019-09-19 20:56:34 +01:00
|
|
|
Server server.Config
|
2020-01-28 17:35:45 +00:00
|
|
|
Debug debug.Config
|
2019-09-19 20:56:34 +01:00
|
|
|
|
2020-01-10 01:58:59 +00:00
|
|
|
Preflight preflight.Config
|
|
|
|
Contact contact.Config
|
2021-03-17 18:47:23 +00:00
|
|
|
Operator operator.Config
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-05-08 12:11:59 +01:00
|
|
|
// TODO: flatten storage config and only keep the new one
|
|
|
|
Storage piecestore.OldConfig
|
|
|
|
Storage2 piecestore.Config
|
|
|
|
Collector collector.Config
|
2019-04-03 20:13:39 +01:00
|
|
|
|
2020-04-14 13:39:42 +01:00
|
|
|
Filestore filestore.Config
|
|
|
|
|
|
|
|
Pieces pieces.Config
|
|
|
|
|
2019-08-19 19:52:47 +01:00
|
|
|
Retain retain.Config
|
|
|
|
|
2019-08-08 14:47:04 +01:00
|
|
|
Nodestats nodestats.Config
|
|
|
|
|
2019-06-20 12:52:32 +01:00
|
|
|
Console consoleserver.Config
|
|
|
|
|
storagenode: accept HTTP calls on public port, listening for monitoring requests
Today each storagenode should have a port which is opened for the internet, and handles DRPC protocol calls.
When we do a HTTP call on the DRPC endpoint, it hangs until a timeout.
This patch changes the behavior: the main DRPC port of the storagenodes can accept HTTP requests and can be used to monitor the status of the node:
* if returns with HTTP 200 only if the storagnode is healthy (not suspended / disqualified + online score > 0.9)
* it CAN include information about the current status (per satellite). It's opt-in, you should configure it so.
In this way it becomes extremely easy to monitor storagenodes with external uptime services.
Note: this patch exposes some information which was not easily available before (especially the node status, and used satellites). I think it should be acceptable:
* Until having more community satellites, all storagenodes are connected to the main Storj satellites.
* With community satellites, it's good thing to have more transparency (easy way to check who is connected to which satellites)
The implementation is based on this line:
```
http.Serve(NewPrefixedListener([]byte("GET / HT"), publicMux.Route("GET / HT")), p.public.http)
```
This line answers to the TCP requests with `GET / HT...` (GET HTTP request to the route), but puts back the removed prefix.
Change-Id: I3700c7e24524850825ecdf75a4bcc3b4afcb3a74
2022-08-23 11:28:41 +01:00
|
|
|
Healthcheck healthcheck.Config
|
|
|
|
|
2019-10-20 08:56:23 +01:00
|
|
|
Version checker.Config
|
2019-07-29 15:07:52 +01:00
|
|
|
|
|
|
|
Bandwidth bandwidth.Config
|
2019-10-15 16:29:47 +01:00
|
|
|
|
|
|
|
GracefulExit gracefulexit.Config
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 13:39:42 +01:00
|
|
|
// DatabaseConfig returns the storagenodedb.Config that should be used with this Config.
|
|
|
|
func (config *Config) DatabaseConfig() storagenodedb.Config {
|
2020-04-30 23:08:32 +01:00
|
|
|
dbdir := config.Storage2.DatabaseDir
|
|
|
|
if dbdir == "" {
|
|
|
|
dbdir = config.Storage.Path
|
|
|
|
}
|
2020-04-14 13:39:42 +01:00
|
|
|
return storagenodedb.Config{
|
|
|
|
Storage: config.Storage.Path,
|
2020-04-30 23:08:32 +01:00
|
|
|
Info: filepath.Join(dbdir, "piecestore.db"),
|
|
|
|
Info2: filepath.Join(dbdir, "info.db"),
|
2020-04-14 13:39:42 +01:00
|
|
|
Pieces: config.Storage.Path,
|
|
|
|
Filestore: config.Filestore,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-10 13:13:27 +00:00
|
|
|
// Verify verifies whether configuration is consistent and acceptable.
|
2019-01-23 10:39:03 +00:00
|
|
|
func (config *Config) Verify(log *zap.Logger) error {
|
2020-01-20 18:58:03 +00:00
|
|
|
err := config.Operator.Verify(log)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.Contact.ExternalAddress != "" {
|
|
|
|
err := isAddressValid(config.Contact.ExternalAddress)
|
|
|
|
if err != nil {
|
|
|
|
return errs.New("invalid contact.external-address: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.Server.Address != "" {
|
|
|
|
err := isAddressValid(config.Server.Address)
|
|
|
|
if err != nil {
|
|
|
|
return errs.New("invalid server.address: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func isAddressValid(addrstring string) error {
|
|
|
|
addr, port, err := net.SplitHostPort(addrstring)
|
|
|
|
if err != nil || port == "" {
|
|
|
|
return errs.New("split host-port %q failed: %+v", addrstring, err)
|
|
|
|
}
|
2020-01-28 14:25:17 +00:00
|
|
|
if addr == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2020-01-20 18:58:03 +00:00
|
|
|
resolvedhosts, err := net.LookupHost(addr)
|
|
|
|
if err != nil || len(resolvedhosts) == 0 {
|
|
|
|
return errs.New("lookup %q failed: %+v", addr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Peer is the representation of a Storage Node.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Peer
|
2019-01-10 13:13:27 +00:00
|
|
|
type Peer struct {
|
|
|
|
// core dependencies
|
2020-05-27 22:07:24 +01:00
|
|
|
Log *zap.Logger
|
|
|
|
Identity *identity.FullIdentity
|
|
|
|
DB DB
|
|
|
|
UsedSerials *usedserials.Table
|
2020-07-01 23:05:01 +01:00
|
|
|
OrdersStore *orders.FileStore
|
2019-01-10 13:13:27 +00:00
|
|
|
|
2020-01-28 23:13:59 +00:00
|
|
|
Servers *lifecycle.Group
|
|
|
|
Services *lifecycle.Group
|
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
Dialer rpc.Dialer
|
2019-01-23 10:39:03 +00:00
|
|
|
|
2019-03-07 18:19:37 +00:00
|
|
|
Server *server.Server
|
2019-01-10 13:13:27 +00:00
|
|
|
|
2020-02-21 17:41:54 +00:00
|
|
|
Version struct {
|
|
|
|
Chore *version2.Chore
|
|
|
|
Service *checker.Service
|
|
|
|
}
|
2019-04-03 20:13:39 +01:00
|
|
|
|
storagenode: accept HTTP calls on public port, listening for monitoring requests
Today each storagenode should have a port which is opened for the internet, and handles DRPC protocol calls.
When we do a HTTP call on the DRPC endpoint, it hangs until a timeout.
This patch changes the behavior: the main DRPC port of the storagenodes can accept HTTP requests and can be used to monitor the status of the node:
* if returns with HTTP 200 only if the storagnode is healthy (not suspended / disqualified + online score > 0.9)
* it CAN include information about the current status (per satellite). It's opt-in, you should configure it so.
In this way it becomes extremely easy to monitor storagenodes with external uptime services.
Note: this patch exposes some information which was not easily available before (especially the node status, and used satellites). I think it should be acceptable:
* Until having more community satellites, all storagenodes are connected to the main Storj satellites.
* With community satellites, it's good thing to have more transparency (easy way to check who is connected to which satellites)
The implementation is based on this line:
```
http.Serve(NewPrefixedListener([]byte("GET / HT"), publicMux.Route("GET / HT")), p.public.http)
```
This line answers to the TCP requests with `GET / HT...` (GET HTTP request to the route), but puts back the removed prefix.
Change-Id: I3700c7e24524850825ecdf75a4bcc3b4afcb3a74
2022-08-23 11:28:41 +01:00
|
|
|
Healthcheck struct {
|
|
|
|
Service *healthcheck.Service
|
|
|
|
Endpoint *healthcheck.Endpoint
|
|
|
|
}
|
|
|
|
|
2020-01-28 17:35:45 +00:00
|
|
|
Debug struct {
|
|
|
|
Listener net.Listener
|
|
|
|
Server *debug.Server
|
|
|
|
}
|
|
|
|
|
2019-01-10 13:13:27 +00:00
|
|
|
// services and endpoints
|
2019-11-04 19:01:02 +00:00
|
|
|
// TODO: similar grouping to satellite.Core
|
2019-01-10 13:13:27 +00:00
|
|
|
|
2020-01-10 01:58:59 +00:00
|
|
|
Preflight struct {
|
|
|
|
LocalTime *preflight.LocalTime
|
|
|
|
}
|
|
|
|
|
2019-09-04 20:04:18 +01:00
|
|
|
Contact struct {
|
2019-09-19 20:56:34 +01:00
|
|
|
Service *contact.Service
|
2019-09-06 17:14:03 +01:00
|
|
|
Chore *contact.Chore
|
2019-09-19 20:56:34 +01:00
|
|
|
Endpoint *contact.Endpoint
|
2019-09-06 17:14:03 +01:00
|
|
|
PingStats *contact.PingStats
|
2022-01-25 10:51:40 +00:00
|
|
|
QUICStats *contact.QUICStats
|
2019-09-04 20:04:18 +01:00
|
|
|
}
|
|
|
|
|
2020-09-08 16:15:08 +01:00
|
|
|
Estimation struct {
|
2021-01-14 16:41:36 +00:00
|
|
|
Service *estimatedpayouts.Service
|
2020-09-08 16:15:08 +01:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
Storage2 struct {
|
2019-05-08 12:11:59 +01:00
|
|
|
// TODO: lift things outside of it to organize better
|
2019-08-19 19:52:47 +01:00
|
|
|
Trust *trust.Pool
|
|
|
|
Store *pieces.Store
|
2019-11-26 16:25:21 +00:00
|
|
|
TrashChore *pieces.TrashChore
|
2019-08-19 19:52:47 +01:00
|
|
|
BlobsCache *pieces.BlobsUsageCache
|
|
|
|
CacheService *pieces.CacheService
|
|
|
|
RetainService *retain.Service
|
2020-04-20 21:29:18 +01:00
|
|
|
PieceDeleter *pieces.Deleter
|
2019-08-19 19:52:47 +01:00
|
|
|
Endpoint *piecestore.Endpoint
|
|
|
|
Inspector *inspector.Endpoint
|
|
|
|
Monitor *monitor.Service
|
2019-08-22 15:33:14 +01:00
|
|
|
Orders *orders.Service
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2019-05-08 12:11:59 +01:00
|
|
|
|
|
|
|
Collector *collector.Service
|
2019-06-20 12:52:32 +01:00
|
|
|
|
2019-08-08 14:47:04 +01:00
|
|
|
NodeStats struct {
|
|
|
|
Service *nodestats.Service
|
|
|
|
Cache *nodestats.Cache
|
|
|
|
}
|
2019-06-26 19:55:22 +01:00
|
|
|
|
2019-06-20 12:52:32 +01:00
|
|
|
// Web server with web UI
|
|
|
|
Console struct {
|
|
|
|
Listener net.Listener
|
|
|
|
Service *console.Service
|
|
|
|
Endpoint *consoleserver.Server
|
|
|
|
}
|
2019-07-29 15:07:52 +01:00
|
|
|
|
2020-10-20 20:58:54 +01:00
|
|
|
PieceTransfer struct {
|
|
|
|
Service piecetransfer.Service
|
|
|
|
}
|
|
|
|
|
2019-10-11 14:58:12 +01:00
|
|
|
GracefulExit struct {
|
2022-08-02 12:12:01 +01:00
|
|
|
Service *gracefulexit.Service
|
2020-07-10 17:12:31 +01:00
|
|
|
Endpoint *gracefulexit.Endpoint
|
|
|
|
Chore *gracefulexit.Chore
|
|
|
|
BlobsCleaner *gracefulexit.BlobsCleaner
|
2019-10-11 14:58:12 +01:00
|
|
|
}
|
|
|
|
|
2019-12-17 15:38:55 +00:00
|
|
|
Notifications struct {
|
|
|
|
Service *notifications.Service
|
|
|
|
}
|
|
|
|
|
2020-09-10 15:08:25 +01:00
|
|
|
Payout struct {
|
2021-01-14 16:41:36 +00:00
|
|
|
Service *payouts.Service
|
|
|
|
Endpoint *payouts.Endpoint
|
2020-03-13 14:01:12 +00:00
|
|
|
}
|
|
|
|
|
2019-07-29 15:07:52 +01:00
|
|
|
Bandwidth *bandwidth.Service
|
2020-10-21 13:34:15 +01:00
|
|
|
|
|
|
|
Reputation *reputation.Service
|
2020-12-26 01:16:43 +00:00
|
|
|
|
|
|
|
Multinode struct {
|
|
|
|
Storage *multinode.StorageEndpoint
|
|
|
|
Bandwidth *multinode.BandwidthEndpoint
|
|
|
|
Node *multinode.NodeEndpoint
|
2021-01-22 13:06:59 +00:00
|
|
|
Payout *multinode.PayoutEndpoint
|
2020-12-26 01:16:43 +00:00
|
|
|
}
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// New creates a new Storage Node.
|
2020-05-12 20:10:32 +01:00
|
|
|
func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB extensions.RevocationDB, config Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*Peer, error) {
|
2019-01-10 13:13:27 +00:00
|
|
|
peer := &Peer{
|
2019-02-11 11:17:32 +00:00
|
|
|
Log: log,
|
|
|
|
Identity: full,
|
|
|
|
DB: db,
|
2020-01-28 23:13:59 +00:00
|
|
|
|
|
|
|
Servers: lifecycle.NewGroup(log.Named("servers")),
|
|
|
|
Services: lifecycle.NewGroup(log.Named("services")),
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 17:41:54 +00:00
|
|
|
{ // setup notification service.
|
|
|
|
peer.Notifications.Service = notifications.NewService(peer.Log, peer.DB.Notifications())
|
|
|
|
}
|
|
|
|
|
2020-01-28 17:35:45 +00:00
|
|
|
{ // setup debug
|
|
|
|
var err error
|
|
|
|
if config.Debug.Address != "" {
|
|
|
|
peer.Debug.Listener, err = net.Listen("tcp", config.Debug.Address)
|
|
|
|
if err != nil {
|
2020-01-29 23:04:30 +00:00
|
|
|
withoutStack := errors.New(err.Error())
|
|
|
|
peer.Log.Debug("failed to start debug endpoints", zap.Error(withoutStack))
|
2020-01-28 17:35:45 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-30 12:49:40 +00:00
|
|
|
debugConfig := config.Debug
|
|
|
|
debugConfig.ControlTitle = "Storage Node"
|
2020-05-12 20:10:32 +01:00
|
|
|
peer.Debug.Server = debug.NewServerWithAtomicLevel(log.Named("debug"), peer.Debug.Listener, monkit.Default, debugConfig, atomicLogLevel)
|
2020-01-28 17:35:45 +00:00
|
|
|
peer.Servers.Add(lifecycle.Item{
|
|
|
|
Name: "debug",
|
|
|
|
Run: peer.Debug.Server.Run,
|
|
|
|
Close: peer.Debug.Server.Close,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-01-10 13:13:27 +00:00
|
|
|
var err error
|
|
|
|
|
2020-06-01 16:41:59 +01:00
|
|
|
{ // version setup
|
2019-10-21 11:50:59 +01:00
|
|
|
if !versionInfo.IsZero() {
|
2020-04-13 10:31:17 +01:00
|
|
|
peer.Log.Debug("Version info",
|
|
|
|
zap.Stringer("Version", versionInfo.Version.Version),
|
|
|
|
zap.String("Commit Hash", versionInfo.CommitHash),
|
|
|
|
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
|
|
|
|
zap.Bool("Release Build", versionInfo.Release),
|
|
|
|
)
|
2019-04-03 20:13:39 +01:00
|
|
|
}
|
2020-01-28 23:13:59 +00:00
|
|
|
|
2020-02-21 17:41:54 +00:00
|
|
|
peer.Version.Service = checker.NewService(log.Named("version"), config.Version, versionInfo, "Storagenode")
|
2020-06-01 16:41:59 +01:00
|
|
|
versionCheckInterval := 12 * time.Hour
|
|
|
|
peer.Version.Chore = version2.NewChore(peer.Log.Named("version:chore"), peer.Version.Service, peer.Notifications.Service, peer.Identity.ID, versionCheckInterval)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "version",
|
2020-02-21 17:41:54 +00:00
|
|
|
Run: peer.Version.Chore.Run,
|
2020-01-28 23:13:59 +00:00
|
|
|
})
|
2019-04-03 20:13:39 +01:00
|
|
|
}
|
|
|
|
|
storagenode: accept HTTP calls on public port, listening for monitoring requests
Today each storagenode should have a port which is opened for the internet, and handles DRPC protocol calls.
When we do a HTTP call on the DRPC endpoint, it hangs until a timeout.
This patch changes the behavior: the main DRPC port of the storagenodes can accept HTTP requests and can be used to monitor the status of the node:
* if returns with HTTP 200 only if the storagnode is healthy (not suspended / disqualified + online score > 0.9)
* it CAN include information about the current status (per satellite). It's opt-in, you should configure it so.
In this way it becomes extremely easy to monitor storagenodes with external uptime services.
Note: this patch exposes some information which was not easily available before (especially the node status, and used satellites). I think it should be acceptable:
* Until having more community satellites, all storagenodes are connected to the main Storj satellites.
* With community satellites, it's good thing to have more transparency (easy way to check who is connected to which satellites)
The implementation is based on this line:
```
http.Serve(NewPrefixedListener([]byte("GET / HT"), publicMux.Route("GET / HT")), p.public.http)
```
This line answers to the TCP requests with `GET / HT...` (GET HTTP request to the route), but puts back the removed prefix.
Change-Id: I3700c7e24524850825ecdf75a4bcc3b4afcb3a74
2022-08-23 11:28:41 +01:00
|
|
|
{
|
|
|
|
|
|
|
|
peer.Healthcheck.Service = healthcheck.NewService(peer.DB.Reputation(), config.Healthcheck.Details)
|
|
|
|
peer.Healthcheck.Endpoint = healthcheck.NewEndpoint(peer.Healthcheck.Service)
|
|
|
|
}
|
|
|
|
|
2019-01-10 13:13:27 +00:00
|
|
|
{ // setup listener and server
|
2019-03-07 18:19:37 +00:00
|
|
|
sc := config.Server
|
2019-08-19 23:10:38 +01:00
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
tlsOptions, err := tlsopts.NewOptions(peer.Identity, sc.Config, revocationDB)
|
2019-01-10 13:13:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
peer.Dialer = rpc.NewDefaultDialer(tlsOptions)
|
2019-01-10 13:13:27 +00:00
|
|
|
|
2021-01-28 23:24:35 +00:00
|
|
|
peer.Server, err = server.New(log.Named("server"), tlsOptions, sc)
|
2019-01-10 13:13:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2020-01-28 23:13:59 +00:00
|
|
|
|
storagenode: accept HTTP calls on public port, listening for monitoring requests
Today each storagenode should have a port which is opened for the internet, and handles DRPC protocol calls.
When we do a HTTP call on the DRPC endpoint, it hangs until a timeout.
This patch changes the behavior: the main DRPC port of the storagenodes can accept HTTP requests and can be used to monitor the status of the node:
* if returns with HTTP 200 only if the storagnode is healthy (not suspended / disqualified + online score > 0.9)
* it CAN include information about the current status (per satellite). It's opt-in, you should configure it so.
In this way it becomes extremely easy to monitor storagenodes with external uptime services.
Note: this patch exposes some information which was not easily available before (especially the node status, and used satellites). I think it should be acceptable:
* Until having more community satellites, all storagenodes are connected to the main Storj satellites.
* With community satellites, it's good thing to have more transparency (easy way to check who is connected to which satellites)
The implementation is based on this line:
```
http.Serve(NewPrefixedListener([]byte("GET / HT"), publicMux.Route("GET / HT")), p.public.http)
```
This line answers to the TCP requests with `GET / HT...` (GET HTTP request to the route), but puts back the removed prefix.
Change-Id: I3700c7e24524850825ecdf75a4bcc3b4afcb3a74
2022-08-23 11:28:41 +01:00
|
|
|
if config.Healthcheck.Enabled {
|
|
|
|
peer.Server.AddHTTPFallback(peer.Healthcheck.Endpoint.HandleHTTP)
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Servers.Add(lifecycle.Item{
|
|
|
|
Name: "server",
|
|
|
|
Run: func(ctx context.Context) error {
|
2020-04-15 14:57:14 +01:00
|
|
|
// Don't change the format of this comment, it is used to figure out the node id.
|
|
|
|
peer.Log.Info(fmt.Sprintf("Node %s started", peer.Identity.ID))
|
|
|
|
peer.Log.Info(fmt.Sprintf("Public server started on %s", peer.Addr()))
|
|
|
|
peer.Log.Info(fmt.Sprintf("Private server started on %s", peer.PrivateAddr()))
|
2020-01-28 23:13:59 +00:00
|
|
|
return peer.Server.Run(ctx)
|
|
|
|
},
|
|
|
|
Close: peer.Server.Close,
|
|
|
|
})
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 20:56:34 +01:00
|
|
|
{ // setup trust pool
|
2021-07-08 10:14:15 +01:00
|
|
|
peer.Storage2.Trust, err = trust.NewPool(log.Named("trust"), trust.Dialer(peer.Dialer), config.Storage2.Trust, peer.DB.Satellites())
|
2019-08-09 10:21:41 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "trust",
|
|
|
|
Run: peer.Storage2.Trust.Run,
|
|
|
|
})
|
2019-08-09 10:21:41 +01:00
|
|
|
}
|
|
|
|
|
2020-01-10 01:58:59 +00:00
|
|
|
{
|
|
|
|
peer.Preflight.LocalTime = preflight.NewLocalTime(peer.Log.Named("preflight:localtime"), config.Preflight, peer.Storage2.Trust, peer.Dialer)
|
|
|
|
}
|
|
|
|
|
2019-09-19 20:56:34 +01:00
|
|
|
{ // setup contact service
|
|
|
|
c := config.Contact
|
|
|
|
if c.ExternalAddress == "" {
|
|
|
|
c.ExternalAddress = peer.Addr()
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-10 07:04:24 +01:00
|
|
|
pbVersion, err := versionInfo.Proto()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2022-12-21 22:19:05 +00:00
|
|
|
noiseKeyAttestation, err := peer.Server.NoiseKeyAttestation(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2020-05-20 14:40:25 +01:00
|
|
|
self := contact.NodeInfo{
|
|
|
|
ID: peer.ID(),
|
|
|
|
Address: c.ExternalAddress,
|
2019-04-22 10:07:50 +01:00
|
|
|
Operator: pb.NodeOperator{
|
2021-01-18 14:33:13 +00:00
|
|
|
Email: config.Operator.Email,
|
|
|
|
Wallet: config.Operator.Wallet,
|
|
|
|
WalletFeatures: config.Operator.WalletFeatures,
|
2019-01-10 13:13:27 +00:00
|
|
|
},
|
2022-12-21 22:19:05 +00:00
|
|
|
Version: *pbVersion,
|
|
|
|
NoiseKeyAttestation: noiseKeyAttestation,
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
2019-09-19 20:56:34 +01:00
|
|
|
peer.Contact.PingStats = new(contact.PingStats)
|
2022-01-25 10:51:40 +00:00
|
|
|
peer.Contact.QUICStats = contact.NewQUICStats(peer.Server.IsQUICEnabled())
|
|
|
|
peer.Contact.Service = contact.NewService(peer.Log.Named("contact:service"), peer.Dialer, self, peer.Storage2.Trust, peer.Contact.QUICStats)
|
2020-01-28 23:13:59 +00:00
|
|
|
|
2020-02-26 02:39:44 +00:00
|
|
|
peer.Contact.Chore = contact.NewChore(peer.Log.Named("contact:chore"), config.Contact.Interval, peer.Contact.Service)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "contact:chore",
|
|
|
|
Run: peer.Contact.Chore.Run,
|
|
|
|
Close: peer.Contact.Chore.Close,
|
|
|
|
})
|
|
|
|
|
2021-04-30 19:40:59 +01:00
|
|
|
peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Storage2.Trust, peer.Contact.PingStats)
|
2020-03-24 17:49:20 +00:00
|
|
|
if err := pb.DRPCRegisterContact(peer.Server.DRPC(), peer.Contact.Endpoint); err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2019-09-04 20:04:18 +01:00
|
|
|
}
|
|
|
|
|
2019-05-08 12:11:59 +01:00
|
|
|
{ // setup storage
|
2020-01-30 18:01:50 +00:00
|
|
|
peer.Storage2.BlobsCache = pieces.NewBlobsUsageCache(peer.Log.Named("blobscache"), peer.DB.Pieces())
|
2019-08-12 22:43:05 +01:00
|
|
|
|
|
|
|
peer.Storage2.Store = pieces.NewStore(peer.Log.Named("pieces"),
|
|
|
|
peer.Storage2.BlobsCache,
|
|
|
|
peer.DB.V0PieceInfo(),
|
|
|
|
peer.DB.PieceExpirationDB(),
|
|
|
|
peer.DB.PieceSpaceUsedDB(),
|
2020-04-14 13:39:42 +01:00
|
|
|
config.Pieces,
|
2019-08-12 22:43:05 +01:00
|
|
|
)
|
|
|
|
|
2020-04-20 21:29:18 +01:00
|
|
|
peer.Storage2.PieceDeleter = pieces.NewDeleter(log.Named("piecedeleter"), peer.Storage2.Store, config.Storage2.DeleteWorkers, config.Storage2.DeleteQueueSize)
|
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "PieceDeleter",
|
|
|
|
Run: peer.Storage2.PieceDeleter.Run,
|
|
|
|
Close: peer.Storage2.PieceDeleter.Close,
|
|
|
|
})
|
|
|
|
|
2019-11-26 16:25:21 +00:00
|
|
|
peer.Storage2.TrashChore = pieces.NewTrashChore(
|
2020-01-06 12:34:54 +00:00
|
|
|
log.Named("pieces:trash"),
|
2019-11-26 16:25:21 +00:00
|
|
|
24*time.Hour, // choreInterval: how often to run the chore
|
|
|
|
7*24*time.Hour, // trashExpiryInterval: when items in the trash should be deleted
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
peer.Storage2.Store,
|
|
|
|
)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "pieces:trash",
|
|
|
|
Run: peer.Storage2.TrashChore.Run,
|
|
|
|
Close: peer.Storage2.TrashChore.Close,
|
|
|
|
})
|
2019-11-26 16:25:21 +00:00
|
|
|
|
2019-08-12 22:43:05 +01:00
|
|
|
peer.Storage2.CacheService = pieces.NewService(
|
2020-01-06 12:34:54 +00:00
|
|
|
log.Named("piecestore:cache"),
|
2019-08-12 22:43:05 +01:00
|
|
|
peer.Storage2.BlobsCache,
|
|
|
|
peer.Storage2.Store,
|
|
|
|
config.Storage2.CacheSyncInterval,
|
2022-08-07 23:40:59 +01:00
|
|
|
config.Storage2.PieceScanOnStartup,
|
2019-08-12 22:43:05 +01:00
|
|
|
)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "piecestore:cache",
|
|
|
|
Run: peer.Storage2.CacheService.Run,
|
|
|
|
Close: peer.Storage2.CacheService.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Piecestore Cache", peer.Storage2.CacheService.Loop))
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-04-15 11:12:22 +01:00
|
|
|
peer.Storage2.Monitor = monitor.NewService(
|
|
|
|
log.Named("piecestore:monitor"),
|
|
|
|
peer.Storage2.Store,
|
2019-09-19 20:56:34 +01:00
|
|
|
peer.Contact.Service,
|
2019-04-15 11:12:22 +01:00
|
|
|
peer.DB.Bandwidth(),
|
|
|
|
config.Storage.AllocatedDiskSpace.Int64(),
|
2020-10-13 13:47:55 +01:00
|
|
|
// TODO: use config.Storage.Monitor.Interval, but for some reason is not set
|
2019-04-15 11:12:22 +01:00
|
|
|
config.Storage.KBucketRefreshInterval,
|
2020-02-26 02:39:44 +00:00
|
|
|
peer.Contact.Chore.Trigger,
|
2019-06-10 11:14:50 +01:00
|
|
|
config.Storage2.Monitor,
|
2019-04-15 11:12:22 +01:00
|
|
|
)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "piecestore:monitor",
|
|
|
|
Run: peer.Storage2.Monitor.Run,
|
|
|
|
Close: peer.Storage2.Monitor.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Piecestore Monitor", peer.Storage2.Monitor.Loop))
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-08-19 19:52:47 +01:00
|
|
|
peer.Storage2.RetainService = retain.NewService(
|
|
|
|
peer.Log.Named("retain"),
|
|
|
|
peer.Storage2.Store,
|
|
|
|
config.Retain,
|
|
|
|
)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "retain",
|
|
|
|
Run: peer.Storage2.RetainService.Run,
|
|
|
|
Close: peer.Storage2.RetainService.Close,
|
|
|
|
})
|
2019-08-19 19:52:47 +01:00
|
|
|
|
2020-05-27 22:07:24 +01:00
|
|
|
peer.UsedSerials = usedserials.NewTable(config.Storage2.MaxUsedSerialsSize)
|
|
|
|
|
2020-07-01 23:05:01 +01:00
|
|
|
peer.OrdersStore, err = orders.NewFileStore(
|
2020-09-10 21:25:22 +01:00
|
|
|
peer.Log.Named("ordersfilestore"),
|
2020-07-01 23:05:01 +01:00
|
|
|
config.Storage2.Orders.Path,
|
|
|
|
config.Storage2.OrderLimitGracePeriod,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
peer.Storage2.Endpoint, err = piecestore.NewEndpoint(
|
|
|
|
peer.Log.Named("piecestore"),
|
2023-01-30 20:44:45 +00:00
|
|
|
peer.Identity,
|
2019-03-18 10:55:06 +00:00
|
|
|
peer.Storage2.Trust,
|
2019-04-15 11:12:22 +01:00
|
|
|
peer.Storage2.Monitor,
|
2019-08-19 19:52:47 +01:00
|
|
|
peer.Storage2.RetainService,
|
2019-10-03 19:31:39 +01:00
|
|
|
peer.Contact.PingStats,
|
2019-03-18 10:55:06 +00:00
|
|
|
peer.Storage2.Store,
|
2022-12-15 17:52:05 +00:00
|
|
|
peer.Storage2.TrashChore,
|
2020-04-20 21:29:18 +01:00
|
|
|
peer.Storage2.PieceDeleter,
|
2020-07-01 23:05:01 +01:00
|
|
|
peer.OrdersStore,
|
2019-03-18 10:55:06 +00:00
|
|
|
peer.DB.Bandwidth(),
|
2020-05-27 22:07:24 +01:00
|
|
|
peer.UsedSerials,
|
2019-03-18 10:55:06 +00:00
|
|
|
config.Storage2,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2020-02-20 17:56:47 +00:00
|
|
|
|
2020-05-11 06:20:34 +01:00
|
|
|
if err := pb.DRPCRegisterPiecestore(peer.Server.DRPC(), peer.Storage2.Endpoint); err != nil {
|
2020-03-24 17:49:20 +00:00
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2023-02-02 22:43:57 +00:00
|
|
|
if err := pb.DRPCRegisterReplaySafePiecestore(peer.Server.ReplaySafeDRPC(), peer.Storage2.Endpoint); err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
// TODO workaround for custom timeout for order sending request (read/write)
|
2019-09-02 12:24:02 +01:00
|
|
|
sc := config.Server
|
2019-09-19 05:46:39 +01:00
|
|
|
|
|
|
|
tlsOptions, err := tlsopts.NewOptions(peer.Identity, sc.Config, revocationDB)
|
2019-09-02 12:24:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
dialer := rpc.NewDefaultDialer(tlsOptions)
|
|
|
|
dialer.DialTimeout = config.Storage2.Orders.SenderDialTimeout
|
2019-09-02 12:24:02 +01:00
|
|
|
|
2019-08-22 15:33:14 +01:00
|
|
|
peer.Storage2.Orders = orders.NewService(
|
|
|
|
log.Named("orders"),
|
2019-09-19 05:46:39 +01:00
|
|
|
dialer,
|
2020-07-01 23:05:01 +01:00
|
|
|
peer.OrdersStore,
|
2019-03-27 10:24:35 +00:00
|
|
|
peer.DB.Orders(),
|
2019-07-18 18:15:09 +01:00
|
|
|
peer.Storage2.Trust,
|
2019-08-22 15:33:14 +01:00
|
|
|
config.Storage2.Orders,
|
2019-03-27 10:24:35 +00:00
|
|
|
)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "orders",
|
|
|
|
Run: peer.Storage2.Orders.Run,
|
|
|
|
Close: peer.Storage2.Orders.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Orders Sender", peer.Storage2.Orders.Sender))
|
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Orders Cleanup", peer.Storage2.Orders.Cleanup))
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2021-04-22 19:50:42 +01:00
|
|
|
{ // setup payouts.
|
|
|
|
peer.Payout.Service, err = payouts.NewService(
|
2021-01-14 16:41:36 +00:00
|
|
|
peer.Log.Named("payouts:service"),
|
2020-09-10 15:08:25 +01:00
|
|
|
peer.DB.Payout(),
|
2020-05-27 17:27:28 +01:00
|
|
|
peer.DB.Reputation(),
|
2020-07-09 18:43:06 +01:00
|
|
|
peer.DB.Satellites(),
|
2021-07-29 15:30:57 +01:00
|
|
|
peer.Storage2.Trust,
|
2020-07-02 14:54:32 +01:00
|
|
|
)
|
2020-11-04 14:38:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2021-04-22 19:50:42 +01:00
|
|
|
|
2021-01-14 16:41:36 +00:00
|
|
|
peer.Payout.Endpoint = payouts.NewEndpoint(
|
|
|
|
peer.Log.Named("payouts:endpoint"),
|
2020-03-13 14:01:12 +00:00
|
|
|
peer.Dialer,
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-10-21 13:34:15 +01:00
|
|
|
{ // setup reputation service.
|
|
|
|
peer.Reputation = reputation.NewService(
|
|
|
|
peer.Log.Named("reputation:service"),
|
|
|
|
peer.DB.Reputation(),
|
|
|
|
peer.Identity.ID,
|
|
|
|
peer.Notifications.Service,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-06-26 19:55:22 +01:00
|
|
|
{ // setup node stats service
|
2019-08-08 14:47:04 +01:00
|
|
|
peer.NodeStats.Service = nodestats.NewService(
|
|
|
|
peer.Log.Named("nodestats:service"),
|
2019-09-19 05:46:39 +01:00
|
|
|
peer.Dialer,
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Storage2.Trust,
|
|
|
|
)
|
2019-08-08 14:47:04 +01:00
|
|
|
|
|
|
|
peer.NodeStats.Cache = nodestats.NewCache(
|
|
|
|
peer.Log.Named("nodestats:cache"),
|
|
|
|
config.Nodestats,
|
|
|
|
nodestats.CacheStorage{
|
|
|
|
Reputation: peer.DB.Reputation(),
|
|
|
|
StorageUsage: peer.DB.StorageUsage(),
|
2020-09-10 15:08:25 +01:00
|
|
|
Payout: peer.DB.Payout(),
|
2020-04-10 15:03:14 +01:00
|
|
|
Pricing: peer.DB.Pricing(),
|
2019-08-08 14:47:04 +01:00
|
|
|
},
|
|
|
|
peer.NodeStats.Service,
|
2020-09-10 15:08:25 +01:00
|
|
|
peer.Payout.Endpoint,
|
2020-10-21 13:34:15 +01:00
|
|
|
peer.Reputation,
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Storage2.Trust,
|
|
|
|
)
|
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "nodestats:cache",
|
|
|
|
Run: peer.NodeStats.Cache.Run,
|
|
|
|
Close: peer.NodeStats.Cache.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Node Stats Cache Reputation", peer.NodeStats.Cache.Reputation))
|
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Node Stats Cache Storage", peer.NodeStats.Cache.Storage))
|
2019-06-26 19:55:22 +01:00
|
|
|
}
|
|
|
|
|
2020-09-08 16:15:08 +01:00
|
|
|
{ // setup estimation service
|
2021-01-14 16:41:36 +00:00
|
|
|
peer.Estimation.Service = estimatedpayouts.NewService(
|
2020-09-08 16:15:08 +01:00
|
|
|
peer.DB.Bandwidth(),
|
|
|
|
peer.DB.Reputation(),
|
|
|
|
peer.DB.StorageUsage(),
|
|
|
|
peer.DB.Pricing(),
|
|
|
|
peer.DB.Satellites(),
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-06-26 19:55:22 +01:00
|
|
|
{ // setup storage node operator dashboard
|
2021-12-21 05:33:57 +00:00
|
|
|
_, port, _ := net.SplitHostPort(peer.Addr())
|
2019-06-20 12:52:32 +01:00
|
|
|
peer.Console.Service, err = console.NewService(
|
|
|
|
peer.Log.Named("console:service"),
|
|
|
|
peer.DB.Bandwidth(),
|
2019-08-08 02:47:30 +01:00
|
|
|
peer.Storage2.Store,
|
2020-02-21 17:41:54 +00:00
|
|
|
peer.Version.Service,
|
2019-06-20 12:52:32 +01:00
|
|
|
config.Storage.AllocatedDiskSpace,
|
2019-10-01 00:33:00 +01:00
|
|
|
config.Operator.Wallet,
|
2019-08-14 13:17:11 +01:00
|
|
|
versionInfo,
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
peer.DB.Reputation(),
|
2019-09-11 21:41:43 +01:00
|
|
|
peer.DB.StorageUsage(),
|
2020-04-10 15:03:14 +01:00
|
|
|
peer.DB.Pricing(),
|
|
|
|
peer.DB.Satellites(),
|
2019-09-11 21:41:43 +01:00
|
|
|
peer.Contact.PingStats,
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Contact.Service,
|
2020-09-08 16:15:08 +01:00
|
|
|
peer.Estimation.Service,
|
2020-10-14 17:37:50 +01:00
|
|
|
peer.Storage2.BlobsCache,
|
2021-03-17 18:47:23 +00:00
|
|
|
config.Operator.WalletFeatures,
|
2021-12-21 05:33:57 +00:00
|
|
|
port,
|
2022-01-25 10:51:40 +00:00
|
|
|
peer.Contact.QUICStats,
|
2020-01-28 23:13:59 +00:00
|
|
|
)
|
2019-06-20 12:52:32 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
|
|
|
|
peer.Console.Listener, err = net.Listen("tcp", config.Console.Address)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
|
2022-03-11 13:47:20 +00:00
|
|
|
var assets fs.FS
|
|
|
|
assets = storagenodeweb.Assets
|
2019-10-08 09:52:19 +01:00
|
|
|
if config.Console.StaticDir != "" {
|
2022-03-11 13:47:20 +00:00
|
|
|
// HACKFIX: Previous setups specify the directory for web/storagenode,
|
|
|
|
// instead of the actual built data. This is for backwards compatibility.
|
|
|
|
distDir := filepath.Join(config.Console.StaticDir, "dist")
|
|
|
|
assets = os.DirFS(distDir)
|
2019-10-08 09:52:19 +01:00
|
|
|
}
|
|
|
|
|
2019-06-20 12:52:32 +01:00
|
|
|
peer.Console.Endpoint = consoleserver.NewServer(
|
|
|
|
peer.Log.Named("console:endpoint"),
|
2019-10-08 09:52:19 +01:00
|
|
|
assets,
|
2019-12-17 15:38:55 +00:00
|
|
|
peer.Notifications.Service,
|
2019-06-20 12:52:32 +01:00
|
|
|
peer.Console.Service,
|
2020-09-10 15:08:25 +01:00
|
|
|
peer.Payout.Service,
|
2019-06-20 12:52:32 +01:00
|
|
|
peer.Console.Listener,
|
|
|
|
)
|
2022-01-25 10:51:40 +00:00
|
|
|
|
|
|
|
// add console service to peer services
|
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "console:endpoint",
|
|
|
|
Run: peer.Console.Endpoint.Run,
|
|
|
|
Close: peer.Console.Endpoint.Close,
|
|
|
|
})
|
2019-06-20 12:52:32 +01:00
|
|
|
}
|
|
|
|
|
2019-07-06 14:40:58 +01:00
|
|
|
{ // setup storage inspector
|
|
|
|
peer.Storage2.Inspector = inspector.NewEndpoint(
|
|
|
|
peer.Log.Named("pieces:inspector"),
|
2019-08-08 02:47:30 +01:00
|
|
|
peer.Storage2.Store,
|
2019-09-19 20:56:34 +01:00
|
|
|
peer.Contact.Service,
|
2019-09-11 21:41:43 +01:00
|
|
|
peer.Contact.PingStats,
|
2019-07-06 14:40:58 +01:00
|
|
|
peer.DB.Bandwidth(),
|
|
|
|
config.Storage,
|
|
|
|
peer.Console.Listener.Addr(),
|
2019-09-19 20:56:34 +01:00
|
|
|
config.Contact.ExternalAddress,
|
2019-07-06 14:40:58 +01:00
|
|
|
)
|
2020-10-30 12:51:26 +00:00
|
|
|
if err := internalpb.DRPCRegisterPieceStoreInspector(peer.Server.PrivateDRPC(), peer.Storage2.Inspector); err != nil {
|
2020-03-24 17:49:20 +00:00
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2019-07-06 14:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-10-20 20:58:54 +01:00
|
|
|
{ // setup piecetransfer service
|
|
|
|
peer.PieceTransfer.Service = piecetransfer.NewService(
|
|
|
|
peer.Log.Named("piecetransfer"),
|
|
|
|
peer.Storage2.Store,
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
peer.Dialer,
|
|
|
|
// using GracefulExit config here for historical reasons
|
|
|
|
config.GracefulExit.MinDownloadTimeout,
|
|
|
|
config.GracefulExit.MinBytesPerSecond,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-10-11 14:58:12 +01:00
|
|
|
{ // setup graceful exit service
|
2020-10-20 20:58:54 +01:00
|
|
|
peer.GracefulExit.Service = gracefulexit.NewService(
|
|
|
|
peer.Log.Named("gracefulexit:service"),
|
|
|
|
peer.Storage2.Store,
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
peer.DB.Satellites(),
|
|
|
|
peer.Dialer,
|
|
|
|
config.GracefulExit,
|
|
|
|
)
|
|
|
|
|
2019-10-11 14:58:12 +01:00
|
|
|
peer.GracefulExit.Endpoint = gracefulexit.NewEndpoint(
|
|
|
|
peer.Log.Named("gracefulexit:endpoint"),
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
peer.DB.Satellites(),
|
2020-06-05 21:11:46 +01:00
|
|
|
peer.Dialer,
|
2019-10-11 14:58:12 +01:00
|
|
|
peer.Storage2.BlobsCache,
|
|
|
|
)
|
2020-10-30 13:04:07 +00:00
|
|
|
if err := internalpb.DRPCRegisterNodeGracefulExit(peer.Server.PrivateDRPC(), peer.GracefulExit.Endpoint); err != nil {
|
2020-03-24 17:49:20 +00:00
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2019-10-15 16:29:47 +01:00
|
|
|
|
|
|
|
peer.GracefulExit.Chore = gracefulexit.NewChore(
|
|
|
|
peer.Log.Named("gracefulexit:chore"),
|
2020-10-20 20:58:54 +01:00
|
|
|
peer.GracefulExit.Service,
|
|
|
|
peer.PieceTransfer.Service,
|
2019-10-22 21:42:21 +01:00
|
|
|
peer.Dialer,
|
2020-10-20 20:58:54 +01:00
|
|
|
config.GracefulExit,
|
2019-10-15 16:29:47 +01:00
|
|
|
)
|
2020-07-10 17:12:31 +01:00
|
|
|
peer.GracefulExit.BlobsCleaner = gracefulexit.NewBlobsCleaner(
|
2020-10-20 20:58:54 +01:00
|
|
|
peer.Log.Named("gracefulexit:blobscleaner"),
|
2020-07-10 17:12:31 +01:00
|
|
|
peer.Storage2.Store,
|
|
|
|
peer.Storage2.Trust,
|
|
|
|
peer.DB.Satellites(),
|
|
|
|
)
|
|
|
|
// Runs once on node start to clean blobs from trash that left after successful GE.
|
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "gracefulexit:blobscleaner",
|
|
|
|
Run: peer.GracefulExit.BlobsCleaner.RemoveBlobs,
|
|
|
|
})
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "gracefulexit:chore",
|
|
|
|
Run: peer.GracefulExit.Chore.Run,
|
|
|
|
Close: peer.GracefulExit.Chore.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Graceful Exit", peer.GracefulExit.Chore.Loop))
|
2019-10-11 14:58:12 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 22:07:24 +01:00
|
|
|
peer.Collector = collector.NewService(peer.Log.Named("collector"), peer.Storage2.Store, peer.UsedSerials, config.Collector)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "collector",
|
|
|
|
Run: peer.Collector.Run,
|
|
|
|
Close: peer.Collector.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Collector", peer.Collector.Loop))
|
2019-05-08 12:11:59 +01:00
|
|
|
|
2019-07-29 15:07:52 +01:00
|
|
|
peer.Bandwidth = bandwidth.NewService(peer.Log.Named("bandwidth"), peer.DB.Bandwidth(), config.Bandwidth)
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Services.Add(lifecycle.Item{
|
|
|
|
Name: "bandwidth",
|
|
|
|
Run: peer.Bandwidth.Run,
|
|
|
|
Close: peer.Bandwidth.Close,
|
|
|
|
})
|
2020-01-29 15:37:50 +00:00
|
|
|
peer.Debug.Server.Panel.Add(
|
|
|
|
debug.Cycle("Bandwidth", peer.Bandwidth.Loop))
|
2019-07-29 15:07:52 +01:00
|
|
|
|
2020-12-26 01:16:43 +00:00
|
|
|
{ // setup multinode endpoints
|
|
|
|
// TODO: add to peer?
|
|
|
|
apiKeys := apikeys.NewService(peer.DB.APIKeys())
|
|
|
|
|
|
|
|
peer.Multinode.Storage = multinode.NewStorageEndpoint(
|
|
|
|
peer.Log.Named("multinode:storage-endpoint"),
|
|
|
|
apiKeys,
|
2021-04-22 19:50:42 +01:00
|
|
|
peer.Storage2.Monitor,
|
2021-06-03 12:52:28 +01:00
|
|
|
peer.DB.StorageUsage(),
|
2021-04-22 19:50:42 +01:00
|
|
|
)
|
2020-12-26 01:16:43 +00:00
|
|
|
|
|
|
|
peer.Multinode.Bandwidth = multinode.NewBandwidthEndpoint(
|
|
|
|
peer.Log.Named("multinode:bandwidth-endpoint"),
|
|
|
|
apiKeys,
|
2021-04-22 19:50:42 +01:00
|
|
|
peer.DB.Bandwidth(),
|
|
|
|
)
|
2020-12-26 01:16:43 +00:00
|
|
|
|
|
|
|
peer.Multinode.Node = multinode.NewNodeEndpoint(
|
|
|
|
peer.Log.Named("multinode:node-endpoint"),
|
2021-06-01 08:29:46 +01:00
|
|
|
config.Operator,
|
2020-12-26 01:16:43 +00:00
|
|
|
apiKeys,
|
|
|
|
peer.Version.Service.Info,
|
2021-01-05 07:59:22 +00:00
|
|
|
peer.Contact.PingStats,
|
2021-01-07 23:26:31 +00:00
|
|
|
peer.DB.Reputation(),
|
2021-04-22 19:50:42 +01:00
|
|
|
peer.Storage2.Trust,
|
|
|
|
)
|
2020-12-26 01:16:43 +00:00
|
|
|
|
2021-01-22 13:06:59 +00:00
|
|
|
peer.Multinode.Payout = multinode.NewPayoutEndpoint(
|
|
|
|
peer.Log.Named("multinode:payout-endpoint"),
|
|
|
|
apiKeys,
|
2021-04-22 19:50:42 +01:00
|
|
|
peer.DB.Payout(),
|
2021-05-05 18:10:58 +01:00
|
|
|
peer.Estimation.Service,
|
2021-04-22 19:50:42 +01:00
|
|
|
peer.Payout.Service,
|
|
|
|
)
|
2021-01-22 13:06:59 +00:00
|
|
|
|
2020-12-26 01:16:43 +00:00
|
|
|
if err = multinodepb.DRPCRegisterStorage(peer.Server.DRPC(), peer.Multinode.Storage); err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
if err = multinodepb.DRPCRegisterBandwidth(peer.Server.DRPC(), peer.Multinode.Bandwidth); err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
|
|
|
if err = multinodepb.DRPCRegisterNode(peer.Server.DRPC(), peer.Multinode.Node); err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2021-06-16 17:46:25 +01:00
|
|
|
if err = multinodepb.DRPCRegisterPayout(peer.Server.DRPC(), peer.Multinode.Payout); err != nil {
|
2021-01-22 13:06:59 +00:00
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2021-07-06 10:36:32 +01:00
|
|
|
if err = multinodepb.DRPCRegisterPayouts(peer.Server.DRPC(), peer.Multinode.Payout); err != nil {
|
|
|
|
return nil, errs.Combine(err, peer.Close())
|
|
|
|
}
|
2020-12-26 01:16:43 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 13:13:27 +00:00
|
|
|
return peer, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run runs storage node until it's either closed or it errors.
|
2019-06-04 13:31:39 +01:00
|
|
|
func (peer *Peer) Run(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-11-16 00:59:32 +00:00
|
|
|
// Refresh the trust pool first. It will be updated periodically via
|
|
|
|
// Run() below.
|
|
|
|
if err := peer.Storage2.Trust.Refresh(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-01-10 01:58:59 +00:00
|
|
|
if err := peer.Preflight.LocalTime.Check(ctx); err != nil {
|
2020-10-13 14:49:33 +01:00
|
|
|
peer.Log.Error("Failed preflight check.", zap.Error(err))
|
2020-01-10 01:58:59 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-02-06 13:19:14 +00:00
|
|
|
group, ctx := errgroup.WithContext(ctx)
|
2020-01-28 17:35:45 +00:00
|
|
|
|
2020-01-28 23:13:59 +00:00
|
|
|
peer.Servers.Run(ctx, group)
|
|
|
|
peer.Services.Run(ctx, group)
|
2019-10-15 16:29:47 +01:00
|
|
|
|
2019-01-10 13:13:27 +00:00
|
|
|
return group.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes all the resources.
|
|
|
|
func (peer *Peer) Close() error {
|
2020-01-28 23:13:59 +00:00
|
|
|
return errs.Combine(
|
|
|
|
peer.Servers.Close(),
|
|
|
|
peer.Services.Close(),
|
|
|
|
)
|
2019-01-10 13:13:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns the peer ID.
|
|
|
|
func (peer *Peer) ID() storj.NodeID { return peer.Identity.ID }
|
|
|
|
|
|
|
|
// Addr returns the public address.
|
2019-03-07 18:19:37 +00:00
|
|
|
func (peer *Peer) Addr() string { return peer.Server.Addr().String() }
|
|
|
|
|
2019-07-03 18:29:18 +01:00
|
|
|
// URL returns the storj.NodeURL.
|
|
|
|
func (peer *Peer) URL() storj.NodeURL { return storj.NodeURL{ID: peer.ID(), Address: peer.Addr()} }
|
|
|
|
|
2019-03-07 18:19:37 +00:00
|
|
|
// PrivateAddr returns the private address.
|
|
|
|
func (peer *Peer) PrivateAddr() string { return peer.Server.PrivateAddr().String() }
|