From f5d717735b502245f7ef3f05626defe21ac6fb19 Mon Sep 17 00:00:00 2001 From: Michal Niewrzal Date: Wed, 25 Oct 2023 11:51:46 +0200 Subject: [PATCH] satellite/repair: fix checker and repairer logging This is fixing two small issues with logging: * repair checker was logging empty last net values as clumped pieces but in main logic we stopped classifying it this way * repairer summary log was showing incorrect number of pieces removed from segment because list contains duplicated entries There was no real issue here. Change-Id: Ifc6a83637d621d628598200cad00cce44fa4cbf9 --- satellite/repair/checker/observer.go | 2 +- satellite/repair/repairer/segments.go | 13 +++++++------ satellite/repair/repairer/segments_test.go | 8 +------- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/satellite/repair/checker/observer.go b/satellite/repair/checker/observer.go index cd3ece1c9..a1b5b13b0 100644 --- a/satellite/repair/checker/observer.go +++ b/satellite/repair/checker/observer.go @@ -529,7 +529,7 @@ func (cr *clumpingReport) String() string { netCounts := make(map[string]int) for _, lastNet := range cr.lastNets { if lastNet == "" { - lastNet = "unknown" + continue } netCounts[lastNet]++ } diff --git a/satellite/repair/repairer/segments.go b/satellite/repair/repairer/segments.go index 19f80fed8..294b928a9 100644 --- a/satellite/repair/repairer/segments.go +++ b/satellite/repair/repairer/segments.go @@ -14,6 +14,7 @@ import ( "github.com/zeebo/errs" "go.uber.org/zap" + "golang.org/x/exp/maps" "storj.io/common/pb" "storj.io/common/storj" @@ -586,7 +587,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue mon.FloatVal("healthy_ratio_after_repair").Observe(healthyRatioAfterRepair) //mon:locked stats.healthyRatioAfterRepair.Observe(healthyRatioAfterRepair) - var toRemove metabase.Pieces + toRemove := make(map[uint16]metabase.Piece, piecesCheck.Unhealthy.Size()) switch { case healthyAfterRepair >= int(segment.Redundancy.OptimalShares): // Repair was fully successful; remove all unhealthy pieces except those in @@ -599,7 +600,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue if retrievable && inExcludedCountry { continue } - toRemove = append(toRemove, piece) + toRemove[piece.Number] = piece } } case healthyAfterRepair > int(segment.Redundancy.RepairShares): @@ -608,7 +609,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue // jeopardy. for _, piece := range pieces { if piecesCheck.OutOfPlacement.Contains(int(piece.Number)) { - toRemove = append(toRemove, piece) + toRemove[piece.Number] = piece } } default: @@ -620,16 +621,16 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue // in any case, we want to remove pieces for which we have replacements now. for _, piece := range pieces { if repairedMap[piece.Number] { - toRemove = append(toRemove, piece) + toRemove[piece.Number] = piece } } // add pieces that failed piece hash verification to the removal list for _, outcome := range piecesReport.Failed { - toRemove = append(toRemove, outcome.Piece) + toRemove[outcome.Piece.Number] = outcome.Piece } - newPieces, err := segment.Pieces.Update(repairedPieces, toRemove) + newPieces, err := segment.Pieces.Update(repairedPieces, maps.Values(toRemove)) if err != nil { return false, repairPutError.Wrap(err) } diff --git a/satellite/repair/repairer/segments_test.go b/satellite/repair/repairer/segments_test.go index 5ede69d31..685cdbb4c 100644 --- a/satellite/repair/repairer/segments_test.go +++ b/satellite/repair/repairer/segments_test.go @@ -298,13 +298,7 @@ func TestSegmentRepairPlacementAndClumped(t *testing.T) { testplanet.Run(t, testplanet.Config{ SatelliteCount: 1, StorageNodeCount: 8, UplinkCount: 1, Reconfigure: testplanet.Reconfigure{ - Satellite: testplanet.Combine( - testplanet.ReconfigureRS(1, 2, 4, 4), - func(log *zap.Logger, index int, config *satellite.Config) { - config.Checker.DoDeclumping = true - config.Repairer.DoDeclumping = true - }, - ), + Satellite: testplanet.ReconfigureRS(1, 2, 4, 4), }, }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))