8d5a2a90f2
Current option is to put stream id and position as an input but it's not very efficient when we have long list of segments to repair. This change adds option to read whole csv file and process each entry one by one. If command will have single argument then it will treat it as csv file location and if will have two arguments then it will parse it just as stream id and position. Change-Id: I1e91cf57a794d81d74af0091c24a2e7d00d1fab9
75 lines
2.4 KiB
Go
75 lines
2.4 KiB
Go
// Copyright (C) 2022 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package main
|
|
|
|
import (
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/zap"
|
|
"go.uber.org/zap/zaptest"
|
|
"go.uber.org/zap/zaptest/observer"
|
|
|
|
"storj.io/common/memory"
|
|
"storj.io/common/storj"
|
|
"storj.io/common/testcontext"
|
|
"storj.io/common/testrand"
|
|
"storj.io/storj/private/testplanet"
|
|
)
|
|
|
|
func TestRepairSegment(t *testing.T) {
|
|
testplanet.Run(t, testplanet.Config{
|
|
SatelliteCount: 1, StorageNodeCount: 20, UplinkCount: 1,
|
|
Reconfigure: testplanet.Reconfigure{
|
|
Satellite: testplanet.ReconfigureRS(2, 4, 6, 8),
|
|
},
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
satellite := planet.Satellites[0]
|
|
|
|
expectedData := testrand.Bytes(20 * memory.KiB)
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket", "object", expectedData)
|
|
require.NoError(t, err)
|
|
|
|
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
|
require.NoError(t, err)
|
|
require.Len(t, segments, 1)
|
|
|
|
repairSegment(ctx, zaptest.NewLogger(t), satellite.Repairer, satellite.Metabase.DB, segments[0])
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "bucket", "object")
|
|
require.NoError(t, err)
|
|
require.Equal(t, expectedData, data)
|
|
|
|
segmentsAfter, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
|
require.NoError(t, err)
|
|
require.Len(t, segmentsAfter, 1)
|
|
|
|
// verify that there are no nodes from before repair as we replacing all of them
|
|
require.NotEqual(t, segments[0].Pieces, segmentsAfter[0].Pieces)
|
|
oldNodes := map[storj.NodeID]struct{}{}
|
|
for _, piece := range segments[0].Pieces {
|
|
oldNodes[piece.StorageNode] = struct{}{}
|
|
}
|
|
|
|
for _, piece := range segmentsAfter[0].Pieces {
|
|
_, found := oldNodes[piece.StorageNode]
|
|
require.False(t, found)
|
|
}
|
|
|
|
// delete all pieces
|
|
for _, node := range planet.StorageNodes {
|
|
err := node.Storage2.Store.DeleteSatelliteBlobs(ctx, planet.Satellites[0].ID())
|
|
require.NoError(t, err)
|
|
}
|
|
|
|
// we cannot download segment so repair is not possible
|
|
observedZapCore, observedLogs := observer.New(zap.ErrorLevel)
|
|
observedLogger := zap.New(observedZapCore)
|
|
repairSegment(ctx, observedLogger, satellite.Repairer, satellite.Metabase.DB, segments[0])
|
|
require.Contains(t, "download failed", observedLogs.All()[observedLogs.Len()-1].Message)
|
|
|
|
// TODO add more detailed tests
|
|
})
|
|
}
|