satellite/repair: enable declumping by default

This feature flag was disabled by default to test it slowly. Its enabled
for some time on one production satellite and test satellites without
any issue. We can enable it by default in code.

Change-Id: If9c36895bbbea12bd4aefa30cb4df912e1729e4c
This commit is contained in:
Michal Niewrzal 2023-07-17 16:23:20 +02:00 committed by Michał Niewrzał
parent 5272fd8497
commit 47a4d4986d
4 changed files with 10 additions and 5 deletions

View File

@ -24,7 +24,7 @@ type Config struct {
NodeFailureRate float64 `help:"the probability of a single node going down within the next checker iteration" default:"0.00005435" `
RepairQueueInsertBatchSize int `help:"Number of damaged segments to buffer in-memory before flushing to the repair queue" default:"100" `
RepairExcludedCountryCodes []string `help:"list of country codes to treat node from this country as offline " default:"" hidden:"true"`
DoDeclumping bool `help:"Treat pieces on the same network as in need of repair" default:"false"`
DoDeclumping bool `help:"Treat pieces on the same network as in need of repair" default:"true"`
DoPlacementCheck bool `help:"Treat pieces out of segment placement as in need of repair" default:"true"`
}

View File

@ -37,7 +37,7 @@ type Config struct {
ReputationUpdateEnabled bool `help:"whether the audit score of nodes should be updated as a part of repair" default:"false"`
UseRangedLoop bool `help:"whether to enable repair checker observer with ranged loop" default:"true"`
RepairExcludedCountryCodes []string `help:"list of country codes to treat node from this country as offline" default:"" hidden:"true"`
DoDeclumping bool `help:"repair pieces on the same network to other nodes" default:"false"`
DoDeclumping bool `help:"repair pieces on the same network to other nodes" default:"true"`
DoPlacementCheck bool `help:"repair pieces out of segment placement" default:"true"`
}

View File

@ -31,7 +31,12 @@ func TestSegmentRepairPlacement(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 12, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(1, 1, piecesCount, piecesCount),
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(1, 1, piecesCount, piecesCount),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Repairer.DoDeclumping = false
},
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))

View File

@ -92,7 +92,7 @@
# audit.worker-concurrency: 2
# Treat pieces on the same network as in need of repair
# checker.do-declumping: false
# checker.do-declumping: true
# Treat pieces out of segment placement as in need of repair
# checker.do-placement-check: true
@ -938,7 +938,7 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# repairer.dial-timeout: 5s
# repair pieces on the same network to other nodes
# repairer.do-declumping: false
# repairer.do-declumping: true
# repair pieces out of segment placement
# repairer.do-placement-check: true