satellite/repair: fix checker and repairer logging

This is fixing two small issues with logging:
* repair checker was logging empty last net values as clumped pieces
but in main logic we stopped classifying it this way
* repairer summary log was showing incorrect number of pieces removed
from segment because list contains duplicated entries

There was no real issue here.

Change-Id: Ifc6a83637d621d628598200cad00cce44fa4cbf9
This commit is contained in:
Michal Niewrzal 2023-10-25 11:51:46 +02:00 committed by Storj Robot
parent 3ba452b301
commit f5d717735b
3 changed files with 9 additions and 14 deletions

View File

@ -529,7 +529,7 @@ func (cr *clumpingReport) String() string {
netCounts := make(map[string]int)
for _, lastNet := range cr.lastNets {
if lastNet == "" {
lastNet = "unknown"
continue
}
netCounts[lastNet]++
}

View File

@ -14,6 +14,7 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/exp/maps"
"storj.io/common/pb"
"storj.io/common/storj"
@ -586,7 +587,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
mon.FloatVal("healthy_ratio_after_repair").Observe(healthyRatioAfterRepair) //mon:locked
stats.healthyRatioAfterRepair.Observe(healthyRatioAfterRepair)
var toRemove metabase.Pieces
toRemove := make(map[uint16]metabase.Piece, piecesCheck.Unhealthy.Size())
switch {
case healthyAfterRepair >= int(segment.Redundancy.OptimalShares):
// Repair was fully successful; remove all unhealthy pieces except those in
@ -599,7 +600,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
if retrievable && inExcludedCountry {
continue
}
toRemove = append(toRemove, piece)
toRemove[piece.Number] = piece
}
}
case healthyAfterRepair > int(segment.Redundancy.RepairShares):
@ -608,7 +609,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// jeopardy.
for _, piece := range pieces {
if piecesCheck.OutOfPlacement.Contains(int(piece.Number)) {
toRemove = append(toRemove, piece)
toRemove[piece.Number] = piece
}
}
default:
@ -620,16 +621,16 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// in any case, we want to remove pieces for which we have replacements now.
for _, piece := range pieces {
if repairedMap[piece.Number] {
toRemove = append(toRemove, piece)
toRemove[piece.Number] = piece
}
}
// add pieces that failed piece hash verification to the removal list
for _, outcome := range piecesReport.Failed {
toRemove = append(toRemove, outcome.Piece)
toRemove[outcome.Piece.Number] = outcome.Piece
}
newPieces, err := segment.Pieces.Update(repairedPieces, toRemove)
newPieces, err := segment.Pieces.Update(repairedPieces, maps.Values(toRemove))
if err != nil {
return false, repairPutError.Wrap(err)
}

View File

@ -298,13 +298,7 @@ func TestSegmentRepairPlacementAndClumped(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 8, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(1, 2, 4, 4),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Checker.DoDeclumping = true
config.Repairer.DoDeclumping = true
},
),
Satellite: testplanet.ReconfigureRS(1, 2, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))