satellite/audit: fix sanity check for verify-piece-hashes command

The VerifyPieceHashes method has a sanity check for the number pieces to
be removed from the pointer after the audit for verifying the piece
hashes.

This sanity check failed when we executed the command on the production
satellites because the Verify command removes Fails and PendingAudits
nodes from the audit report if piece_hashes_verified = false.

A new temporary UsedToVerifyPieceHashes flag is added to
audits.Verifier. It is set to true only by the verify-piece-hashes
command. If the flag is true then the Verify method will always include
Fails and PendingAudits nodes in the report.

Test case is added to cover this use case.

Change-Id: I2c7cb6b12029d52b2fc565365eee0826c3de6ee8
This commit is contained in:
Kaloyan Raev 2020-10-07 17:17:48 +03:00
parent 4280142b24
commit e7f2ec7ddf
3 changed files with 60 additions and 1 deletions

View File

@ -177,6 +177,8 @@ func verifyPieceHashes(ctx context.Context) (err error) {
return runVerifierCmd(ctx, func(verifier *audit.Verifier) error {
var total, fixed int
verifier.UsedToVerifyPieceHashes = true
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
key, err := hex.DecodeString(scanner.Text())

View File

@ -67,7 +67,10 @@ type Verifier struct {
minDownloadTimeout time.Duration
OnTestingCheckSegmentAlteredHook func()
// Temporary fields for the verify-piece-hashes command
OnTestingVerifyMockFunc func() (Report, error)
UsedToVerifyPieceHashes bool
}
// NewVerifier creates a Verifier.
@ -107,6 +110,9 @@ func (verifier *Verifier) Verify(ctx context.Context, path storj.Path, skip map[
}
defer func() {
if verifier.UsedToVerifyPieceHashes {
return
}
// if piece hashes have not been verified for this segment, do not mark nodes as failing audit
if !pointer.PieceHashesVerified {
report.PendingAudits = nil

View File

@ -1056,3 +1056,54 @@ func TestVerifyPieceHashes(t *testing.T) {
}
})
}
func TestVerifierMissingPieceHashesNotVerified_UsedToVerifyPieceHashes(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
audits := satellite.Audit
audits.Worker.Loop.Pause()
audits.Chore.Loop.Pause()
ul := planet.Uplinks[0]
testData := testrand.Bytes(8 * memory.KiB)
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
require.NoError(t, err)
audits.Chore.Loop.TriggerWait()
queue := audits.Queues.Fetch()
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
// update pointer to have PieceHashesVerified false
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
pointer.PieceHashesVerified = false
err = satellite.Metainfo.Service.Put(ctx, metabase.SegmentKey(path), pointer)
require.NoError(t, err)
// delete the piece from the first node
origNumPieces := len(pointer.GetRemote().GetRemotePieces())
piece := pointer.GetRemote().GetRemotePieces()[0]
pieceID := pointer.GetRemote().RootPieceId.Derive(piece.NodeId, piece.PieceNum)
node := planet.FindNode(piece.NodeId)
err = node.Storage2.Store.Delete(ctx, satellite.ID(), pieceID)
require.NoError(t, err)
audits.Verifier.UsedToVerifyPieceHashes = true
report, err := audits.Verifier.Verify(ctx, path, nil)
require.NoError(t, err)
assert.Len(t, report.Successes, origNumPieces-1)
// expect a failed audit
assert.Len(t, report.Fails, 1)
assert.Len(t, report.Offlines, 0)
assert.Len(t, report.PendingAudits, 0)
})
}