testplanet/satellite: reduce the number of places default values need to be configured

Satellites set their configuration values to default values using
cfgstruct, however, it turns out our tests don't test these values
at all! Instead, they have a completely separate definition system
that is easy to forget about.

As is to be expected, these values have drifted, and it appears
in a few cases test planet is testing unreasonable values that we
won't see in production, or perhaps worse, features enabled in
production were missed and weren't enabled in testplanet.

This change makes it so all values are configured the same,
systematic way, so it's easy to see when test values are different
than dev values or release values, and it's less hard to forget
to enable features in testplanet.

In terms of reviewing, this change should be actually fairly
easy to review, considering private/testplanet/satellite.go keeps
the current config system and the new one and confirms that they
result in identical configurations, so you can be certain that
nothing was missed and the config is all correct.
You can also check the config lock to see what actual config
values changed.

Change-Id: I6715d0794887f577e21742afcf56fd2b9d12170e
This commit is contained in:
JT Olio 2021-05-31 15:15:00 -06:00 committed by JT Olio
parent 75bf2ad586
commit da9ca0c650
31 changed files with 404 additions and 84 deletions

2
go.mod
View File

@ -56,6 +56,6 @@ require (
storj.io/common v0.0.0-20210504141454-bcb03a80052f
storj.io/drpc v0.0.20
storj.io/monkit-jaeger v0.0.0-20210426161729-debb1cbcbbd7
storj.io/private v0.0.0-20210525113513-421a3648fb1e
storj.io/private v0.0.0-20210531210843-5b0f672699d1
storj.io/uplink v1.5.0-rc.1.0.20210512164354-e2e5889614a9
)

4
go.sum
View File

@ -859,7 +859,7 @@ storj.io/drpc v0.0.20/go.mod h1:eAxUDk8HWvGl9iqznpuphtZ+WIjIGPJFqNXuKHgRiMM=
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/monkit-jaeger v0.0.0-20210426161729-debb1cbcbbd7 h1:zi0w9zoBfvuqysSAqxJT1Ton2YB5IhyMM3/3CISjlrQ=
storj.io/monkit-jaeger v0.0.0-20210426161729-debb1cbcbbd7/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20210525113513-421a3648fb1e h1:rrl8c7lOCae04CYfYTciQBfAwaTrta3b4iWyxwI+XjM=
storj.io/private v0.0.0-20210525113513-421a3648fb1e/go.mod h1:iAc+LGwXYCe+YRRTlkfkg95ZBEL8pWHLVZ508/KQjOs=
storj.io/private v0.0.0-20210531210843-5b0f672699d1 h1:jR5WRCvaFHXw5As0YHpeSSH/jLjGieTjfSjOlmtfqX0=
storj.io/private v0.0.0-20210531210843-5b0f672699d1/go.mod h1:iAc+LGwXYCe+YRRTlkfkg95ZBEL8pWHLVZ508/KQjOs=
storj.io/uplink v1.5.0-rc.1.0.20210512164354-e2e5889614a9 h1:F+A+Ki4eo3uzYXxesihRBq7PYBhU8MgfZeebd4O8hio=
storj.io/uplink v1.5.0-rc.1.0.20210512164354-e2e5889614a9/go.mod h1:geRW2dh4rvPhgruFZbN71LSYkMmCJLpwg0y8K/uLr3Y=

View File

@ -0,0 +1,218 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information
// TODO: this whole file can be removed in a future PR.
package testplanet
import (
"reflect"
"github.com/zeebo/errs"
"storj.io/storj/satellite/compensation"
)
// showInequality can be removed in a future PR. This is a lightly edited version
// of reflect.DeepEqual but made to show what is different.
func showInequality(v1, v2 reflect.Value) error {
if !v1.IsValid() || !v2.IsValid() {
if v1.IsValid() != v2.IsValid() {
return errs.New("mismatch on validity")
}
return nil
}
if v1.Type() != v2.Type() {
return errs.New("type mismatch")
}
if v1.CanInterface() {
if dv1, ok := v1.Interface().(compensation.Rate); ok {
dv2 := v2.Interface().(compensation.Rate)
if dv1.String() == dv2.String() {
return nil
}
return errs.New("compensation.Rate mismatch: %q != %q", dv1.String(), dv2.String())
}
}
switch v1.Kind() {
case reflect.Array:
for i := 0; i < v1.Len(); i++ {
if err := showInequality(v1.Index(i), v2.Index(i)); err != nil {
return err
}
}
return nil
case reflect.Slice:
if v1.IsNil() != v2.IsNil() {
return errs.New("a slice is nil")
}
if v1.Len() != v2.Len() {
return errs.New("slice length mismatch")
}
if v1.Pointer() == v2.Pointer() {
return nil
}
for i := 0; i < v1.Len(); i++ {
if err := showInequality(v1.Index(i), v2.Index(i)); err != nil {
return err
}
}
return nil
case reflect.Interface:
if v1.IsNil() || v2.IsNil() {
if v1.IsNil() != v2.IsNil() {
return errs.New("an interface is nil")
}
return nil
}
return showInequality(v1.Elem(), v2.Elem())
case reflect.Ptr:
if v1.Pointer() == v2.Pointer() {
return nil
}
return showInequality(v1.Elem(), v2.Elem())
case reflect.Struct:
for i, n := 0, v1.NumField(); i < n; i++ {
if err := showInequality(v1.Field(i), v2.Field(i)); err != nil {
return errs.New("struct field %q: %+v", v1.Type().Field(i).Name, err)
}
}
return nil
case reflect.Map:
if v1.IsNil() != v2.IsNil() {
return errs.New("a map is nil")
}
if v1.Len() != v2.Len() {
return errs.New("map len mismatch")
}
if v1.Pointer() == v2.Pointer() {
return nil
}
for _, k := range v1.MapKeys() {
val1 := v1.MapIndex(k)
val2 := v2.MapIndex(k)
if !val1.IsValid() || !val2.IsValid() {
return errs.New("invalid map index")
}
if err := showInequality(val1, val2); err != nil {
return err
}
}
return nil
case reflect.Func:
if v1.IsNil() && v2.IsNil() {
return nil
}
// Can't do better than this:
return errs.New("funcs can't be equal")
default:
// Normal equality suffices
if v1.Interface() != v2.Interface() {
return errs.New("v1 %q != v2 %q", v1, v2)
}
return nil
}
}
// deepEqual is simply reflect.DeepEqual but with special handling for
// compensation.Rate.
func deepEqual(x, y interface{}) bool {
if x == nil || y == nil {
return x == y
}
v1 := reflect.ValueOf(x)
v2 := reflect.ValueOf(y)
if v1.Type() != v2.Type() {
return false
}
return deepValueEqual(v1, v2)
}
// deepValueEqual is simply reflect.deepValueEqual but with special handling
// for compensation.Rate.
func deepValueEqual(v1, v2 reflect.Value) bool {
if !v1.IsValid() || !v2.IsValid() {
return v1.IsValid() == v2.IsValid()
}
if v1.Type() != v2.Type() {
return false
}
if v1.CanInterface() {
if dv1, ok := v1.Interface().(compensation.Rate); ok {
return dv1.String() == v2.Interface().(compensation.Rate).String()
}
}
switch v1.Kind() {
case reflect.Array:
for i := 0; i < v1.Len(); i++ {
if !deepValueEqual(v1.Index(i), v2.Index(i)) {
return false
}
}
return true
case reflect.Slice:
if v1.IsNil() != v2.IsNil() {
return false
}
if v1.Len() != v2.Len() {
return false
}
if v1.Pointer() == v2.Pointer() {
return true
}
for i := 0; i < v1.Len(); i++ {
if !deepValueEqual(v1.Index(i), v2.Index(i)) {
return false
}
}
return true
case reflect.Interface:
if v1.IsNil() || v2.IsNil() {
return v1.IsNil() == v2.IsNil()
}
return deepValueEqual(v1.Elem(), v2.Elem())
case reflect.Ptr:
if v1.Pointer() == v2.Pointer() {
return true
}
return deepValueEqual(v1.Elem(), v2.Elem())
case reflect.Struct:
for i, n := 0, v1.NumField(); i < n; i++ {
if !deepValueEqual(v1.Field(i), v2.Field(i)) {
return false
}
}
return true
case reflect.Map:
if v1.IsNil() != v2.IsNil() {
return false
}
if v1.Len() != v2.Len() {
return false
}
if v1.Pointer() == v2.Pointer() {
return true
}
for _, k := range v1.MapKeys() {
val1 := v1.MapIndex(k)
val2 := v2.MapIndex(k)
if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2) {
return false
}
}
return true
case reflect.Func:
if v1.IsNil() && v2.IsNil() {
return true
}
// Can't do better than this:
return false
default:
// Normal equality suffices
return v1.Interface() == v2.Interface()
}
}

View File

@ -8,10 +8,12 @@ import (
"net"
"os"
"path/filepath"
"reflect"
"runtime/pprof"
"strconv"
"time"
"github.com/spf13/pflag"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
@ -24,6 +26,7 @@ import (
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/private/cfgstruct"
"storj.io/private/debug"
"storj.io/private/version"
"storj.io/storj/private/revocation"
@ -40,6 +43,7 @@ import (
"storj.io/storj/satellite/accounting/tally"
"storj.io/storj/satellite/admin"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/compensation"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/console/consoleauth"
"storj.io/storj/satellite/console/consoleweb"
@ -385,17 +389,14 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
return nil, err
}
// TODO: it is a huge surprise that this doesn't use the config
// parsing `default` or `devDefault` struct tag values.
// we should use storj.io/private/cfgstruct to autopopulate default
// config values and then only override ones in special cases.
config := satellite.Config{
// TODO: in a future PR, remove config2 entirely.
config2 := satellite.Config{
Server: server.Config{
Address: "127.0.0.1:0",
PrivateAddress: "127.0.0.1:0",
Config: tlsopts.Config{
RevocationDBURL: "bolt://" + filepath.Join(storageDir, "revocation.db"),
RevocationDBURL: "bolt://" + filepath.Join(storageDir, "revocations.db"),
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: planet.whitelistPath,
PeerIDVersions: "latest",
@ -622,6 +623,90 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
Metrics: metrics.Config{},
}
var config satellite.Config
cfgstruct.Bind(pflag.NewFlagSet("", pflag.PanicOnError), &config,
cfgstruct.UseTestDefaults(),
cfgstruct.ConfDir(storageDir),
cfgstruct.IdentityDir(storageDir),
cfgstruct.ConfigVar("TESTINTERVAL", defaultInterval.String()))
// TODO: these are almost certainly mistakenly set to the zero value
// in tests due to a prior mismatch between testplanet config and
// cfgstruct devDefaults. we need to make sure it's safe to remove
// these lines and then remove them.
config.Debug.Control = false
config.Overlay.Node.AsOfSystemTime.Enabled = false
config.Overlay.Node.AsOfSystemTime.DefaultInterval = 0
config.Overlay.AuditHistory.OfflineDQEnabled = false
config.Server.Config.Extensions.Revocation = false
config.Metainfo.Loop.MaxAsOfSystemDuration = 0
config.Orders.OrdersSemaphoreSize = 0
config.Checker.NodeFailureRate = 0
config.Audit.MaxRetriesStatDB = 0
config.GarbageCollection.RetainSendTimeout = 0
config.ExpiredDeletion.ListLimit = 0
config.Tally.SaveRollupBatchSize = 0
config.Tally.ReadRollupBatchSize = 0
config.Rollup.DeleteTallies = false
config.Payments.BonusRate = 0
config.Payments.MinCoinPayment = 0
config.Payments.NodeEgressBandwidthPrice = 0
config.Payments.NodeRepairBandwidthPrice = 0
config.Payments.NodeAuditBandwidthPrice = 0
config.Payments.NodeDiskSpacePrice = 0
config.Identity.CertPath = ""
config.Identity.KeyPath = ""
config.Metainfo.DatabaseURL = ""
config.Console.ContactInfoURL = ""
config.Console.FrameAncestors = ""
config.Console.LetUsKnowURL = ""
config.Console.SEO = ""
config.Console.SatelliteName = ""
config.Console.SatelliteOperator = ""
config.Console.TermsAndConditionsURL = ""
config.Console.PartneredSatellites = ""
config.Console.GeneralRequestURL = ""
config.Console.ProjectLimitsIncreaseRequestURL = ""
config.Console.GatewayCredentialsRequestURL = ""
config.Console.DocumentationURL = ""
config.Console.LinksharingURL = ""
config.Console.PathwayOverviewEnabled = false
config.GracefulExit.AsOfSystemTimeInterval = 0
config.Compensation.Rates.AtRestGBHours = compensation.Rate{}
config.Compensation.Rates.GetTB = compensation.Rate{}
config.Compensation.Rates.GetRepairTB = compensation.Rate{}
config.Compensation.Rates.GetAuditTB = compensation.Rate{}
config.Compensation.WithheldPercents = nil
config.Compensation.DisposePercent = 0
config.ProjectLimit.CacheCapacity = 0
config.ProjectLimit.CacheExpiration = 0
config.Metainfo.SegmentLoop.ListLimit = 0
// Actual testplanet-specific configuration
config.Server.Address = "127.0.0.1:0"
config.Server.PrivateAddress = "127.0.0.1:0"
config.Admin.Address = "127.0.0.1:0"
config.Console.Address = "127.0.0.1:0"
config.Server.Config.PeerCAWhitelistPath = planet.whitelistPath
config.Server.Config.UsePeerCAWhitelist = true
config.Version = planet.NewVersionConfig()
config.Metainfo.RS.Min = atLeastOne(planet.config.StorageNodeCount * 1 / 5)
config.Metainfo.RS.Repair = atLeastOne(planet.config.StorageNodeCount * 2 / 5)
config.Metainfo.RS.Success = atLeastOne(planet.config.StorageNodeCount * 3 / 5)
config.Metainfo.RS.Total = atLeastOne(planet.config.StorageNodeCount * 4 / 5)
config.Orders.EncryptionKeys = *encryptionKeys
config.LiveAccounting.StorageBackend = "redis://" + redis.Addr() + "?db=0"
config.Mail.TemplatePath = filepath.Join(developmentRoot, "web/satellite/static/emails")
config.Console.StaticDir = filepath.Join(developmentRoot, "web/satellite")
// TODO: remove the following equality check in a future PR.
if !deepEqual(config, config2) {
if err := showInequality(reflect.ValueOf(config), reflect.ValueOf(config2)); err != nil {
return nil, err
}
return nil, errs.New("show inequality error")
}
if planet.config.Reconfigure.Satellite != nil {
planet.config.Reconfigure.Satellite(log, index, &config)
}

View File

@ -17,8 +17,8 @@ import (
// IPRateLimiterConfig configures an IPRateLimiter.
type IPRateLimiterConfig struct {
Duration time.Duration `help:"the rate at which request are allowed" default:"5m"`
Burst int `help:"number of events before the limit kicks in" default:"5"`
NumLimits int `help:"number of IPs whose rate limits we store" default:"1000"`
Burst int `help:"number of events before the limit kicks in" default:"5" testDefault:"3"`
NumLimits int `help:"number of IPs whose rate limits we store" default:"1000" testDefault:"10"`
}
// IPRateLimiter imposes a rate limit per HTTP user IP.

View File

@ -19,8 +19,8 @@ var mon = monkit.Package()
// Config is a configuration struct for the Chore.
type Config struct {
Interval time.Duration `help:"how often to remove unused project bandwidth rollups" default:"168h"`
RetainMonths int `help:"number of months of project bandwidth rollups to retain, not including the current month" default:"2"`
Interval time.Duration `help:"how often to remove unused project bandwidth rollups" default:"168h" testDefault:"$TESTINTERVAL"`
RetainMonths int `help:"number of months of project bandwidth rollups to retain, not including the current month" default:"2" testDefault:"1"`
}
// Chore to remove unused project bandwidth rollups.

View File

@ -17,7 +17,7 @@ import (
// Config contains configurable values for rollup.
type Config struct {
Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s"`
Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s" testDefault:"$TESTINTERVAL"`
DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"`
}

View File

@ -23,9 +23,9 @@ var (
// Config contains configurable values for rollup archiver.
type Config struct {
Interval time.Duration `help:"how frequently rollup archiver should run" releaseDefault:"24h" devDefault:"120s"`
ArchiveAge time.Duration `help:"age at which a rollup is archived" default:"2160h"`
BatchSize int `help:"number of records to delete per delete execution. Used only for crdb which is slow without limit." default:"500"`
Interval time.Duration `help:"how frequently rollup archiver should run" releaseDefault:"24h" devDefault:"120s" testDefault:"$TESTINTERVAL"`
ArchiveAge time.Duration `help:"age at which a rollup is archived" default:"2160h" testDefault:"24h"`
BatchSize int `help:"number of records to delete per delete execution. Used only for crdb which is slow without limit." default:"500" testDefault:"1000"`
Enabled bool `help:"whether or not the rollup archive is enabled." default:"true"`
}

View File

@ -27,7 +27,7 @@ var (
// Config contains configurable values for the tally service.
type Config struct {
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s"`
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s" testDefault:"$TESTINTERVAL"`
SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"`
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
}

View File

@ -21,12 +21,12 @@ var Error = errs.Class("audit")
// Config contains configurable values for audit chore and workers.
type Config struct {
MaxRetriesStatDB int `help:"max number of times to attempt updating a statdb batch" default:"3"`
MinBytesPerSecond memory.Size `help:"the minimum acceptable bytes that storage nodes can transfer per second to the satellite" default:"128B"`
MinDownloadTimeout time.Duration `help:"the minimum duration for downloading a share from storage nodes before timing out" default:"5m0s"`
MinBytesPerSecond memory.Size `help:"the minimum acceptable bytes that storage nodes can transfer per second to the satellite" default:"128B" testDefault:"1.00 KB"`
MinDownloadTimeout time.Duration `help:"the minimum duration for downloading a share from storage nodes before timing out" default:"5m0s" testDefault:"5s"`
MaxReverifyCount int `help:"limit above which we consider an audit is failed" default:"3"`
ChoreInterval time.Duration `help:"how often to run the reservoir chore" releaseDefault:"24h" devDefault:"1m"`
QueueInterval time.Duration `help:"how often to recheck an empty audit queue" releaseDefault:"1h" devDefault:"1m"`
ChoreInterval time.Duration `help:"how often to run the reservoir chore" releaseDefault:"24h" devDefault:"1m" testDefault:"$TESTINTERVAL"`
QueueInterval time.Duration `help:"how often to recheck an empty audit queue" releaseDefault:"1h" devDefault:"1m" testDefault:"$TESTINTERVAL"`
Slots int `help:"number of reservoir slots allotted for nodes, currently capped at 3" default:"3"`
WorkerConcurrency int `help:"number of workers to run audits on segments" default:"2"`
}

View File

@ -61,12 +61,12 @@ var (
// Config contains configuration for console web server.
type Config struct {
Address string `help:"server address of the graphql api gateway and frontend app" devDefault:"" releaseDefault:":10100"`
Address string `help:"server address of the graphql api gateway and frontend app" devDefault:"127.0.0.1:0" releaseDefault:":10100"`
StaticDir string `help:"path to static resources" default:""`
ExternalAddress string `help:"external endpoint of the satellite if hosted" default:""`
// TODO: remove after Vanguard release
AuthToken string `help:"auth token needed for access to registration token creation endpoint" default:""`
AuthToken string `help:"auth token needed for access to registration token creation endpoint" default:"" testDefault:"very-secret-token"`
AuthTokenSecret string `help:"secret used to sign auth tokens" releaseDefault:"" devDefault:"my-suppa-secret-key"`
ContactInfoURL string `help:"url link to contacts page" default:"https://forum.storj.io"`

View File

@ -48,8 +48,8 @@ type Projects interface {
// UsageLimitsConfig is a configuration struct for default per-project usage limits.
type UsageLimitsConfig struct {
DefaultStorageLimit memory.Size `help:"the default storage usage limit" default:"50.00GB"`
DefaultBandwidthLimit memory.Size `help:"the default bandwidth usage limit" default:"50.00GB"`
DefaultStorageLimit memory.Size `help:"the default storage usage limit" default:"50.00GB" testDefault:"25.00 GB"`
DefaultBandwidthLimit memory.Size `help:"the default bandwidth usage limit" default:"50.00GB" testDefault:"25.00 GB"`
}
// Project is a database object that describes Project entity.

View File

@ -14,6 +14,7 @@ import (
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/spf13/pflag"
"github.com/stripe/stripe-go"
"github.com/zeebo/errs"
"go.uber.org/zap"
@ -22,6 +23,7 @@ import (
"storj.io/common/macaroon"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/private/cfgstruct"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/analytics"
"storj.io/storj/satellite/console/consoleauth"
@ -100,11 +102,23 @@ type Service struct {
minCoinPayment int64
}
func init() {
var c Config
cfgstruct.Bind(pflag.NewFlagSet("", pflag.PanicOnError), &c, cfgstruct.UseTestDefaults())
if c.PasswordCost != TestPasswordCost {
panic("invalid test constant defined in struct tag")
}
cfgstruct.Bind(pflag.NewFlagSet("", pflag.PanicOnError), &c, cfgstruct.UseReleaseDefaults())
if c.PasswordCost != 0 {
panic("invalid release constant defined in struct tag. should be 0 (=automatic)")
}
}
// Config keeps track of core console service configuration parameters.
type Config struct {
PasswordCost int `help:"password hashing cost (0=automatic)" internal:"true"`
PasswordCost int `help:"password hashing cost (0=automatic)" testDefault:"4" default:"0"`
OpenRegistrationEnabled bool `help:"enable open registration" default:"false"`
DefaultProjectLimit int `help:"default project limits for users" default:"3"`
DefaultProjectLimit int `help:"default project limits for users" default:"3" testDefault:"5"`
UsageLimits UsageLimitsConfig
}

View File

@ -23,7 +23,7 @@ import (
// Config contains configurable values for contact service.
type Config struct {
ExternalAddress string `user:"true" help:"the public address of the node, useful for nodes behind NAT" default:""`
Timeout time.Duration `help:"timeout for pinging storage nodes" default:"10m0s"`
Timeout time.Duration `help:"timeout for pinging storage nodes" default:"10m0s" testDefault:"1m"`
RateLimitInterval time.Duration `help:"the amount of time that should happen between contact attempts usually" releaseDefault:"10m0s" devDefault:"1ns"`
RateLimitBurst int `help:"the maximum burst size for the contact rate limit token bucket" releaseDefault:"2" devDefault:"1000"`

View File

@ -29,7 +29,7 @@ var (
// Config contains configurable values for garbage collection.
type Config struct {
Interval time.Duration `help:"the time between each send of garbage collection filters to storage nodes" releaseDefault:"120h" devDefault:"10m"`
Interval time.Duration `help:"the time between each send of garbage collection filters to storage nodes" releaseDefault:"120h" devDefault:"10m" testDefault:"$TESTINTERVAL"`
Enabled bool `help:"set if garbage collection is enabled or not" releaseDefault:"true" devDefault:"true"`
SkipFirst bool `help:"if true, skip the first run of GC" releaseDefault:"true" devDefault:"false"`
RunInCore bool `help:"if true, run garbage collection as part of the core" releaseDefault:"false" devDefault:"false"`

View File

@ -27,18 +27,18 @@ var (
type Config struct {
Enabled bool `help:"whether or not graceful exit is enabled on the satellite side." default:"true"`
ChoreBatchSize int `help:"size of the buffer used to batch inserts into the transfer queue." default:"500"`
ChoreInterval time.Duration `help:"how often to run the transfer queue chore." releaseDefault:"30s" devDefault:"10s"`
ChoreBatchSize int `help:"size of the buffer used to batch inserts into the transfer queue." default:"500" testDefault:"10"`
ChoreInterval time.Duration `help:"how often to run the transfer queue chore." releaseDefault:"30s" devDefault:"10s" testDefault:"$TESTINTERVAL"`
EndpointBatchSize int `help:"size of the buffer used to batch transfer queue reads and sends to the storage node." default:"300"`
EndpointBatchSize int `help:"size of the buffer used to batch transfer queue reads and sends to the storage node." default:"300" testDefault:"100"`
MaxFailuresPerPiece int `help:"maximum number of transfer failures per piece." default:"5"`
OverallMaxFailuresPercentage int `help:"maximum percentage of transfer failures per node." default:"10"`
MaxInactiveTimeFrame time.Duration `help:"maximum inactive time frame of transfer activities per node." default:"168h"`
RecvTimeout time.Duration `help:"the minimum duration for receiving a stream from a storage node before timing out" default:"2h"`
MaxOrderLimitSendCount int `help:"maximum number of order limits a satellite sends to a node before marking piece transfer failed" default:"10"`
NodeMinAgeInMonths int `help:"minimum age for a node on the network in order to initiate graceful exit" default:"6"`
MaxInactiveTimeFrame time.Duration `help:"maximum inactive time frame of transfer activities per node." default:"168h" testDefault:"10s"`
RecvTimeout time.Duration `help:"the minimum duration for receiving a stream from a storage node before timing out" default:"2h" testDefault:"1m"`
MaxOrderLimitSendCount int `help:"maximum number of order limits a satellite sends to a node before marking piece transfer failed" default:"10" testDefault:"3"`
NodeMinAgeInMonths int `help:"minimum age for a node on the network in order to initiate graceful exit" default:"6" testDefault:"0"`
AsOfSystemTimeInterval time.Duration `help:"interval for AS OF SYSTEM TIME clause (crdb specific) to read from db at a specific time in the past " default:"-10s"`
AsOfSystemTimeInterval time.Duration `help:"interval for AS OF SYSTEM TIME clause (crdb specific) to read from db at a specific time in the past" default:"-10s"`
TransferQueueBatchSize int `help:"batch size (crdb specific) for deleting and adding items to the transfer queue" default:"1000"`
}

View File

@ -20,9 +20,9 @@ import (
// Config defines values needed by mailservice service.
type Config struct {
SMTPServerAddress string `help:"smtp server address" default:""`
SMTPServerAddress string `help:"smtp server address" default:"" testDefault:"smtp.mail.test:587"`
TemplatePath string `help:"path to email templates source" default:""`
From string `help:"sender email address" default:""`
From string `help:"sender email address" default:"" testDefault:"Labs <storj@mail.test>"`
AuthType string `help:"smtp authentication type" releaseDefault:"login" devDefault:"simulate"`
Login string `help:"plain/login auth user login" default:""`
Password string `help:"plain/login auth user password" default:""`

View File

@ -160,9 +160,9 @@ func (observer *observerContext) Wait() error {
// Config contains configurable values for the metainfo loop.
type Config struct {
CoalesceDuration time.Duration `help:"how long to wait for new observers before starting iteration" releaseDefault:"5s" devDefault:"5s"`
CoalesceDuration time.Duration `help:"how long to wait for new observers before starting iteration" releaseDefault:"5s" devDefault:"5s" testDefault:"1s"`
RateLimit float64 `help:"rate limit (default is 0 which is unlimited segments per second)" default:"0"`
ListLimit int `help:"how many items to query in a batch" default:"2500"`
ListLimit int `help:"how many items to query in a batch" default:"2500" testDefault:"10000"`
MaxAsOfSystemDuration time.Duration `help:"limits how old can AS OF SYSTEM TIME query be" releaseDefault:"5m" devDefault:"5m"`
}

View File

@ -130,7 +130,7 @@ func (observer *observerContext) Wait() error {
// Config contains configurable values for the segments loop.
type Config struct {
CoalesceDuration time.Duration `help:"how long to wait for new observers before starting iteration" releaseDefault:"5s" devDefault:"5s"`
CoalesceDuration time.Duration `help:"how long to wait for new observers before starting iteration" releaseDefault:"5s" devDefault:"5s" testDefault:"1s"`
RateLimit float64 `help:"rate limit (default is 0 which is unlimited segments per second)" default:"0"`
ListLimit int `help:"how many items to query in a batch" default:"2500"`
}

View File

@ -94,24 +94,24 @@ func (rs *RSConfig) Set(s string) error {
// RateLimiterConfig is a configuration struct for endpoint rate limiting.
type RateLimiterConfig struct {
Enabled bool `help:"whether rate limiting is enabled." releaseDefault:"true" devDefault:"true"`
Rate float64 `help:"request rate per project per second." releaseDefault:"1000" devDefault:"100"`
CacheCapacity int `help:"number of projects to cache." releaseDefault:"10000" devDefault:"10"`
Rate float64 `help:"request rate per project per second." releaseDefault:"1000" devDefault:"100" testDefault:"1000"`
CacheCapacity int `help:"number of projects to cache." releaseDefault:"10000" devDefault:"10" testDefault:"100"`
CacheExpiration time.Duration `help:"how long to cache the projects limiter." releaseDefault:"10m" devDefault:"10s"`
}
// ProjectLimitConfig is a configuration struct for default project limits.
type ProjectLimitConfig struct {
MaxBuckets int `help:"max bucket count for a project." default:"100"`
MaxBuckets int `help:"max bucket count for a project." default:"100" testDefault:"10"`
}
// Config is a configuration struct that is everything you need to start a metainfo.
type Config struct {
DatabaseURL string `help:"the database connection string to use" default:"postgres://"`
MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"`
MinRemoteSegmentSize memory.Size `default:"1240" testDefault:"0" help:"minimum remote segment size"` // TODO: fix tests to work with 1024
MaxInlineSegmentSize memory.Size `default:"4KiB" help:"maximum inline segment size"`
MaxSegmentSize memory.Size `default:"64MiB" help:"maximum segment size"`
MaxMetadataSize memory.Size `default:"2KiB" help:"maximum segment metadata size"`
MaxCommitInterval time.Duration `default:"48h" help:"maximum time allowed to pass between creating and committing a segment"`
MaxCommitInterval time.Duration `default:"48h" testDefault:"1h" help:"maximum time allowed to pass between creating and committing a segment"`
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
RS RSConfig `releaseDefault:"29/35/80/110-256B" devDefault:"4/6/8/10-256B" help:"redundancy scheme configuration in the format k/m/o/n-sharesize"`
Loop metaloop.Config `help:"loop configuration"`

View File

@ -23,7 +23,7 @@ var (
// Config contains configurable values for expired segment cleanup.
type Config struct {
Interval time.Duration `help:"the time between each attempt to go through the db and clean up expired segments" releaseDefault:"24h" devDefault:"10s"`
Interval time.Duration `help:"the time between each attempt to go through the db and clean up expired segments" releaseDefault:"24h" devDefault:"10s" testDefault:"$TESTINTERVAL"`
Enabled bool `help:"set if expired segment cleanup is enabled or not" releaseDefault:"true" devDefault:"true"`
ListLimit int `help:"how many expired objects to query in a batch" default:"100"`
}

View File

@ -20,12 +20,12 @@ import (
// Config defines configuration options for Service.
type Config struct {
MaxConcurrency int `help:"maximum number of concurrent requests to storage nodes" default:"100"`
MaxConcurrentPieces int `help:"maximum number of concurrent pieces can be processed" default:"1000000"`
MaxConcurrentPieces int `help:"maximum number of concurrent pieces can be processed" default:"1000000" testDefault:"1000"`
MaxPiecesPerBatch int `help:"maximum number of pieces per batch" default:"5000"`
MaxPiecesPerRequest int `help:"maximum number pieces per single request" default:"1000"`
MaxPiecesPerBatch int `help:"maximum number of pieces per batch" default:"5000" testDefault:"4000"`
MaxPiecesPerRequest int `help:"maximum number pieces per single request" default:"1000" testDefault:"2000"`
DialTimeout time.Duration `help:"timeout for dialing nodes (0 means satellite default)" default:"0"`
DialTimeout time.Duration `help:"timeout for dialing nodes (0 means satellite default)" default:"0" testDefault:"2s"`
FailThreshold time.Duration `help:"threshold for retrying a failed node" releaseDefault:"5m" devDefault:"2s"`
RequestTimeout time.Duration `help:"timeout for a single delete request" releaseDefault:"1m" devDefault:"2s"`
}

View File

@ -33,10 +33,10 @@ var (
// Config is a configuration struct for orders Service.
type Config struct {
EncryptionKeys EncryptionKeys `help:"encryption keys to encrypt info in orders" default:""`
Expiration time.Duration `help:"how long until an order expires" default:"48h"` // 2 days
FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"1000"`
FlushInterval time.Duration `help:"how often to flush the rollups write cache to the database" devDefault:"30s" releaseDefault:"1m"`
NodeStatusLogging bool `hidden:"true" help:"deprecated, log the offline/disqualification status of nodes" default:"false"`
Expiration time.Duration `help:"how long until an order expires" default:"48h" testDefault:"168h"` // default is 2 days
FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"1000" testDefault:"10"`
FlushInterval time.Duration `help:"how often to flush the rollups write cache to the database" devDefault:"30s" releaseDefault:"1m" testDefault:"$TESTINTERVAL"`
NodeStatusLogging bool `hidden:"true" help:"deprecated, log the offline/disqualification status of nodes" default:"false" testDefault:"true"`
OrdersSemaphoreSize int `help:"how many concurrent orders to process at once. zero is unlimited" default:"2"`
}

View File

@ -38,9 +38,9 @@ type NodeSelectionConfig struct {
AuditCount int64 `help:"the number of times a node has been audited to not be considered a New Node" releaseDefault:"100" devDefault:"0"`
NewNodeFraction float64 `help:"the fraction of new nodes allowed per request" releaseDefault:"0.05" devDefault:"1"`
MinimumVersion string `help:"the minimum node software version for node selection queries" default:""`
OnlineWindow time.Duration `help:"the amount of time without seeing a node before its considered offline" default:"4h"`
OnlineWindow time.Duration `help:"the amount of time without seeing a node before its considered offline" default:"4h" testDefault:"1m"`
DistinctIP bool `help:"require distinct IPs when choosing nodes for upload" releaseDefault:"true" devDefault:"false"`
MinimumDiskSpace memory.Size `help:"how much disk space a node at minimum must have to be selected for upload" default:"500.00MB"`
MinimumDiskSpace memory.Size `help:"how much disk space a node at minimum must have to be selected for upload" default:"500.00MB" testDefault:"100.00MB"`
AuditReputationRepairWeight float64 `help:"weight to apply to audit reputation for total repair reputation calculation" default:"1.0"`
AuditReputationUplinkWeight float64 `help:"weight to apply to audit reputation for total uplink reputation calculation" default:"1.0"`
@ -56,7 +56,7 @@ type NodeSelectionConfig struct {
// AuditHistoryConfig is a configuration struct defining time periods and thresholds for penalizing nodes for being offline.
// It is used for downtime suspension and disqualification.
type AuditHistoryConfig struct {
WindowSize time.Duration `help:"The length of time spanning a single audit window" releaseDefault:"12h" devDefault:"5m"`
WindowSize time.Duration `help:"The length of time spanning a single audit window" releaseDefault:"12h" devDefault:"5m" testDefault:"10m"`
TrackingPeriod time.Duration `help:"The length of time to track audit windows for node suspension and disqualification" releaseDefault:"720h" devDefault:"1h"`
GracePeriod time.Duration `help:"The length of time to give suspended SNOs to diagnose and fix issues causing downtime. Afterwards, they will have one tracking period to reach the minimum online score before disqualification" releaseDefault:"168h" devDefault:"1h"`
OfflineThreshold float64 `help:"The point below which a node is punished for offline audits. Determined by calculating the ratio of online/total audits within each window and finding the average across windows within the tracking period." default:"0.6"`

View File

@ -19,8 +19,8 @@ var mon = monkit.Package()
// Config contains configurable values for stray nodes chore.
type Config struct {
EnableDQ bool `help:"whether nodes will be disqualified if they have not been contacted in some time" releaseDefault:"true" devDefault:"true"`
Interval time.Duration `help:"how often to check for and DQ stray nodes" releaseDefault:"168h" devDefault:"5m"`
MaxDurationWithoutContact time.Duration `help:"length of time a node can go without contacting satellite before being disqualified" releaseDefault:"720h" devDefault:"7200h"`
Interval time.Duration `help:"how often to check for and DQ stray nodes" releaseDefault:"168h" devDefault:"5m" testDefault:"1m"`
MaxDurationWithoutContact time.Duration `help:"length of time a node can go without contacting satellite before being disqualified" releaseDefault:"720h" devDefault:"7200h" testDefault:"30s"`
Limit int `help:"Max number of nodes to return in a single query. Chore will iterate until rows returned is less than limit" releaseDefault:"1000" devDefault:"1000"`
}

View File

@ -26,7 +26,7 @@ type UploadSelectionDB interface {
// UploadSelectionCacheConfig is a configuration for upload selection cache.
type UploadSelectionCacheConfig struct {
Disabled bool `help:"disable node cache" default:"false"`
Staleness time.Duration `help:"how stale the node selection cache can be" releaseDefault:"3m" devDefault:"5m"`
Staleness time.Duration `help:"how stale the node selection cache can be" releaseDefault:"3m" devDefault:"5m" testDefault:"3m"`
}
// UploadSelectionCache keeps a list of all the storage nodes that are qualified to store data

View File

@ -14,19 +14,19 @@ import (
type Config struct {
Provider string `help:"payments provider to use" default:""`
StripeCoinPayments stripecoinpayments.Config
StorageTBPrice string `help:"price user should pay for storing TB per month" default:"4"`
EgressTBPrice string `help:"price user should pay for each TB of egress" default:"7"`
ObjectPrice string `help:"price user should pay for each object stored in network per month" default:"0"`
StorageTBPrice string `help:"price user should pay for storing TB per month" default:"4" testDefault:"10"`
EgressTBPrice string `help:"price user should pay for each TB of egress" default:"7" testDefault:"45"`
ObjectPrice string `help:"price user should pay for each object stored in network per month" default:"0" testDefault:"0.0000022"`
BonusRate int64 `help:"amount of percents that user will earn as bonus credits by depositing in STORJ tokens" default:"10"`
CouponValue int64 `help:"coupon value in cents" default:"165"`
CouponDuration CouponDuration `help:"duration a new coupon is valid in months/billing cycles. An empty string means the coupon never expires" default:"1"`
CouponValue int64 `help:"coupon value in cents" default:"165" testDefault:"275"`
CouponDuration CouponDuration `help:"duration a new coupon is valid in months/billing cycles. An empty string means the coupon never expires" default:"1" testDefault:"2"`
CouponProjectLimit memory.Size `help:"project limit to which increase to after applying the coupon, 0 B means not changing it from the default" default:"0 B"`
MinCoinPayment int64 `help:"minimum value of coin payments in cents before coupon is applied" default:"1000"`
NodeEgressBandwidthPrice int64 `help:"price node receive for storing TB of egress in cents" default:"2000"`
NodeRepairBandwidthPrice int64 `help:"price node receive for storing TB of repair in cents" default:"1000"`
NodeAuditBandwidthPrice int64 `help:"price node receive for storing TB of audit in cents" default:"1000"`
NodeDiskSpacePrice int64 `help:"price node receive for storing disk space in cents/TB" default:"150"`
PaywallProportion float64 `help:"proportion of users which require a balance to create projects [0-1]" default:"0"`
PaywallProportion float64 `help:"proportion of users which require a balance to create projects [0-1]" default:"0" testDefault:"1"`
}
// CouponDuration is a configuration struct that keeps details about default

View File

@ -45,9 +45,9 @@ type Config struct {
StripeFreeTierCouponID string `help:"stripe free tier coupon ID" default:""`
CoinpaymentsPublicKey string `help:"coinpayments API public key" default:""`
CoinpaymentsPrivateKey string `help:"coinpayments API private key key" default:""`
TransactionUpdateInterval time.Duration `help:"amount of time we wait before running next transaction update loop" default:"2m"`
AccountBalanceUpdateInterval time.Duration `help:"amount of time we wait before running next account balance update loop" default:"2m"`
ConversionRatesCycleInterval time.Duration `help:"amount of time we wait before running next conversion rates update loop" default:"10m"`
TransactionUpdateInterval time.Duration `help:"amount of time we wait before running next transaction update loop" default:"2m" testDefault:"$TESTINTERVAL"`
AccountBalanceUpdateInterval time.Duration `help:"amount of time we wait before running next account balance update loop" default:"2m" testDefault:"$TESTINTERVAL"`
ConversionRatesCycleInterval time.Duration `help:"amount of time we wait before running next conversion rates update loop" default:"10m" testDefault:"$TESTINTERVAL"`
AutoAdvance bool `help:"toogle autoadvance feature for invoice creation" default:"false"`
ListingLimit int `help:"sets the maximum amount of items before we start paging on requests" default:"100" hidden:"true"`
}

View File

@ -15,14 +15,14 @@ import (
// Config contains configurable values for checker.
type Config struct {
Interval time.Duration `help:"how frequently checker should check for bad segments" releaseDefault:"30s" devDefault:"0h0m10s"`
IrreparableInterval time.Duration `help:"how frequently irrepairable checker should check for lost pieces" releaseDefault:"30m" devDefault:"0h0m5s"`
Interval time.Duration `help:"how frequently checker should check for bad segments" releaseDefault:"30s" devDefault:"0h0m10s" testDefault:"$TESTINTERVAL"`
IrreparableInterval time.Duration `help:"how frequently irrepairable checker should check for lost pieces" releaseDefault:"30m" devDefault:"0h0m5s" testDefault:"$TESTINTERVAL"`
ReliabilityCacheStaleness time.Duration `help:"how stale reliable node cache can be" releaseDefault:"5m" devDefault:"5m"`
ReliabilityCacheStaleness time.Duration `help:"how stale reliable node cache can be" releaseDefault:"5m" devDefault:"5m" testDefault:"1m"`
RepairOverrides RepairOverrides `help:"comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)" releaseDefault:"29/80/110-52,29/80/95-52,29/80/130-52" devDefault:""`
// Node failure rate is an estimation based on a 6 hour checker run interval (4 checker iterations per day), a network of about 9200 nodes, and about 2 nodes churning per day.
// This results in `2/9200/4 = 0.00005435` being the probability of any single node going down in the interval of one checker iteration.
NodeFailureRate float64 `help:"the probability of a single node going down within the next checker iteration" default:"0.00005435"`
NodeFailureRate float64 `help:"the probability of a single node going down within the next checker iteration" default:"0.00005435" `
}
// RepairOverride is a configuration struct that contains an override repair

View File

@ -29,12 +29,12 @@ var (
// Config contains configurable values for repairer.
type Config struct {
MaxRepair int `help:"maximum segments that can be repaired concurrently" releaseDefault:"5" devDefault:"1"`
Interval time.Duration `help:"how frequently repairer should try and repair more data" releaseDefault:"5m0s" devDefault:"1m0s"`
Timeout time.Duration `help:"time limit for uploading repaired pieces to new storage nodes" default:"5m0s"`
DownloadTimeout time.Duration `help:"time limit for downloading pieces from a node for repair" default:"5m0s"`
TotalTimeout time.Duration `help:"time limit for an entire repair job, from queue pop to upload completion" default:"45m"`
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4M"`
MaxRepair int `help:"maximum segments that can be repaired concurrently" releaseDefault:"5" devDefault:"1" testDefault:"10"`
Interval time.Duration `help:"how frequently repairer should try and repair more data" releaseDefault:"5m0s" devDefault:"1m0s" testDefault:"$TESTINTERVAL"`
Timeout time.Duration `help:"time limit for uploading repaired pieces to new storage nodes" default:"5m0s" testDefault:"1m"`
DownloadTimeout time.Duration `help:"time limit for downloading pieces from a node for repair" default:"5m0s" testDefault:"1m"`
TotalTimeout time.Duration `help:"time limit for an entire repair job, from queue pop to upload completion" default:"45m" testDefault:"10m"`
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4.0 MiB"`
MaxExcessRateOptimalThreshold float64 `help:"ratio applied to the optimal threshold to calculate the excess of the maximum number of repaired pieces to upload" default:"0.05"`
InMemoryRepair bool `help:"whether to download pieces for repair in memory (true) or download to disk (false)" default:"false"`
}

View File

@ -133,6 +133,9 @@ compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0
# names and addresses of partnered satellites in JSON list format
# console.partnered-satellites: '[["US1","https://us1.storj.io"],["EU1","https://eu1.storj.io"],["AP1","https://ap1.storj.io"]]'
# password hashing cost (0=automatic)
# console.password-cost: 0
# indicates if the overview onboarding step should render with pathways
# console.pathway-overview-enabled: true
@ -253,7 +256,7 @@ contact.external-address: ""
# if true, skip the first run of GC
# garbage-collection.skip-first: true
# interval for AS OF SYSTEM TIME clause (crdb specific) to read from db at a specific time in the past
# interval for AS OF SYSTEM TIME clause (crdb specific) to read from db at a specific time in the past
# graceful-exit.as-of-system-time-interval: -10s
# size of the buffer used to batch inserts into the transfer queue.
@ -626,7 +629,7 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# repairer.interval: 5m0s
# maximum buffer memory (in bytes) to be allocated for read buffers
# repairer.max-buffer-mem: 4.00 MB
# repairer.max-buffer-mem: 4.0 MiB
# ratio applied to the optimal threshold to calculate the excess of the maximum number of repaired pieces to upload
# repairer.max-excess-rate-optimal-threshold: 0.05