storj/internal/testplanet/satellite.go

234 lines
6.9 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package testplanet
import (
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/zeebo/errs"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/peertls/extensions"
"storj.io/storj/pkg/peertls/tlsopts"
"storj.io/storj/pkg/revocation"
"storj.io/storj/pkg/server"
"storj.io/storj/satellite"
"storj.io/storj/satellite/accounting/rollup"
"storj.io/storj/satellite/accounting/tally"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/console/consoleweb"
"storj.io/storj/satellite/dbcleanup"
"storj.io/storj/satellite/discovery"
"storj.io/storj/satellite/gc"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/marketingweb"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/repair/checker"
"storj.io/storj/satellite/repair/repairer"
"storj.io/storj/satellite/satellitedb"
)
// newSatellites initializes satellites
func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
var xs []*satellite.Peer
defer func() {
for _, x := range xs {
planet.peers = append(planet.peers, closablePeer{peer: x})
}
}()
for i := 0; i < count; i++ {
prefix := "satellite" + strconv.Itoa(i)
log := planet.log.Named(prefix)
storageDir := filepath.Join(planet.directory, prefix)
if err := os.MkdirAll(storageDir, 0700); err != nil {
return nil, err
}
identity, err := planet.NewIdentity()
if err != nil {
return nil, err
}
var db satellite.DB
if planet.config.Reconfigure.NewSatelliteDB != nil {
db, err = planet.config.Reconfigure.NewSatelliteDB(log.Named("db"), i)
} else {
db, err = satellitedb.NewInMemory(log.Named("db"))
}
if err != nil {
return nil, err
}
config := satellite.Config{
Server: server.Config{
Address: "127.0.0.1:0",
PrivateAddress: "127.0.0.1:0",
Config: tlsopts.Config{
RevocationDBURL: "bolt://" + filepath.Join(storageDir, "revocation.db"),
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: planet.whitelistPath,
PeerIDVersions: "latest",
Extensions: extensions.Config{
Revocation: false,
WhitelistSignedLeaf: false,
},
},
},
Kademlia: kademlia.Config{
Alpha: 5,
BootstrapBackoffBase: 500 * time.Millisecond,
BootstrapBackoffMax: 2 * time.Second,
DBPath: storageDir, // TODO: replace with master db
Operator: kademlia.OperatorConfig{
Email: prefix + "@mail.test",
Wallet: "0x" + strings.Repeat("00", 20),
},
},
Overlay: overlay.Config{
Node: overlay.NodeSelectionConfig{
UptimeCount: 0,
AuditCount: 0,
NewNodePercentage: 0,
OnlineWindow: 0,
DistinctIP: false,
AuditReputationRepairWeight: 1,
AuditReputationUplinkWeight: 1,
AuditReputationAlpha0: 1,
AuditReputationBeta0: 0,
AuditReputationLambda: 0.95,
AuditReputationWeight: 1,
AuditReputationDQ: 0.6,
UptimeReputationRepairWeight: 1,
UptimeReputationUplinkWeight: 1,
UptimeReputationAlpha0: 2,
UptimeReputationBeta0: 0,
UptimeReputationLambda: 0.99,
UptimeReputationWeight: 1,
UptimeReputationDQ: 0.6,
},
UpdateStatsBatchSize: 100,
},
Discovery: discovery.Config{
DiscoveryInterval: 1 * time.Second,
RefreshInterval: 1 * time.Second,
RefreshLimit: 100,
RefreshConcurrency: 2,
},
Metainfo: metainfo.Config{
DatabaseURL: "bolt://" + filepath.Join(storageDir, "pointers.db"),
MinRemoteSegmentSize: 0, // TODO: fix tests to work with 1024
MaxInlineSegmentSize: 8000,
Overlay: true,
RS: metainfo.RSConfig{
MaxSegmentSize: 64 * memory.MiB,
MaxBufferMem: memory.Size(256),
ErasureShareSize: memory.Size(256),
MinThreshold: (planet.config.StorageNodeCount * 1 / 5),
RepairThreshold: (planet.config.StorageNodeCount * 2 / 5),
SuccessThreshold: (planet.config.StorageNodeCount * 3 / 5),
MaxThreshold: (planet.config.StorageNodeCount * 4 / 5),
Validate: false,
},
Loop: metainfo.LoopConfig{
CoalesceDuration: 5 * time.Second,
},
},
Orders: orders.Config{
Expiration: 7 * 24 * time.Hour,
},
Checker: checker.Config{
Interval: 30 * time.Second,
IrreparableInterval: 15 * time.Second,
ReliabilityCacheStaleness: 5 * time.Minute,
},
Repairer: repairer.Config{
[V3-1927] Repairer uploads to max threshold instead of success… (#2423) * pkg/datarepair: Add test to check num upload pieces Add a new test for ensuring the number of pieces that the repair process upload when a segment is injured. * satellite/orders: Don't create "put order limits" over total Repair must not create "put order limits" more than the total count. * pkg/datarepair: Update upload repair pieces test Update the test which checks the number of pieces which are uploaded during a repair for using the same excess over the success threshold value than the implementation. * satellites/orders: Limit repair put order for not being total Limit the number of put orders to be used by repair for only uploading pieces to a % excess over the successful threshold. * pkg/datarepair: Change DataRepair test to pass again Make some changes in the DataRepair test to make pass again after the repair upload repaired pieces only until a % excess over success threshold. Also update the steps description of the DataRepair test after it has been changed, to match on what's now, besides to leave it more generic for avoiding having to update it on minimal future refactorings. * satellite: Make repair excess optimal threshold configurable Add a new configuration parameter to the satellite for being able to configure the percentage excess over the optimal threshold, used for determining how many pieces should be repaired/uploaded, rather than having the value hard coded. * repairer: Add configurable param to segments/repairer Add a new parameters to the segment/repairer to calculate the maximum number of excess nodes, based on the optimal threshold, that repaired pieces can be uploaded. This new parameter has been added for not returning more nodes than the number of upload orders for data repair satellite service calculate for repairing pieces. * pkg/storage/ec: Update log message in clien.Repair * satellite: Update configuration lock file
2019-07-11 23:44:47 +01:00
MaxRepair: 10,
Interval: time.Hour,
Timeout: 1 * time.Minute, // Repairs can take up to 10 seconds. Leaving room for outliers
MaxBufferMem: 4 * memory.MiB,
MaxExcessRateOptimalThreshold: 0.05,
},
Audit: audit.Config{
MaxRetriesStatDB: 0,
Interval: 30 * time.Second,
MinBytesPerSecond: 1 * memory.KB,
MinDownloadTimeout: 5 * time.Second,
},
GarbageCollection: gc.Config{
Interval: 1 * time.Minute,
Enabled: true,
InitialPieces: 10,
FalsePositiveRate: 0.1,
ConcurrentSends: 1,
},
DBCleanup: dbcleanup.Config{
SerialsInterval: time.Hour,
},
Tally: tally.Config{
Interval: 30 * time.Second,
},
Rollup: rollup.Config{
Interval: 2 * time.Minute,
MaxAlphaUsage: 25 * memory.GB,
DeleteTallies: false,
},
Mail: mailservice.Config{
SMTPServerAddress: "smtp.mail.test:587",
From: "Labs <storj@mail.test>",
AuthType: "simulate",
TemplatePath: filepath.Join(developmentRoot, "web/satellite/static/emails"),
},
Console: consoleweb.Config{
Address: "127.0.0.1:0",
StaticDir: filepath.Join(developmentRoot, "web/satellite"),
PasswordCost: console.TestPasswordCost,
AuthTokenSecret: "my-suppa-secret-key",
},
Marketing: marketingweb.Config{
Address: "127.0.0.1:0",
StaticDir: filepath.Join(developmentRoot, "web/marketing"),
},
Version: planet.NewVersionConfig(),
}
if planet.config.Reconfigure.Satellite != nil {
planet.config.Reconfigure.Satellite(log, i, &config)
}
versionInfo := planet.NewVersionInfo()
revocationDB, err := revocation.NewDBFromCfg(config.Server.Config)
if err != nil {
return xs, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
peer, err := satellite.New(log, identity, db, revocationDB, &config, versionInfo)
if err != nil {
return xs, err
}
err = db.CreateTables()
if err != nil {
return nil, err
}
planet.databases = append(planet.databases, db)
log.Debug("id=" + peer.ID().String() + " addr=" + peer.Addr())
xs = append(xs, peer)
}
return xs, nil
}