2019-06-11 21:14:40 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package audit_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-06-19 10:02:25 +01:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-06-11 21:14:40 +01:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
|
|
|
"storj.io/storj/internal/memory"
|
|
|
|
"storj.io/storj/internal/testcontext"
|
|
|
|
"storj.io/storj/internal/testplanet"
|
2019-06-26 11:38:51 +01:00
|
|
|
"storj.io/storj/internal/testrand"
|
2019-06-11 21:14:40 +01:00
|
|
|
"storj.io/storj/pkg/peertls/tlsopts"
|
|
|
|
"storj.io/storj/pkg/pkcrypto"
|
|
|
|
"storj.io/storj/pkg/storj"
|
|
|
|
"storj.io/storj/pkg/transport"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/audit"
|
2019-06-11 21:14:40 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestReverifySuccess(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-01 15:02:00 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-11 21:14:40 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// This is a bulky test but all it's doing is:
|
|
|
|
// - uploads random data
|
|
|
|
// - uses the cursor to get a stripe
|
|
|
|
// - creates one pending audit for a node holding a piece for that stripe
|
|
|
|
// - the actual share is downloaded to make sure ExpectedShareHash is correct
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects one storage node to be marked as a success in the audit report
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-07-01 15:02:00 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
orders := satellite.Orders.Service
|
|
|
|
containment := satellite.DB.Containment()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
|
2019-09-11 23:37:01 +01:00
|
|
|
shareSize := pointer.GetRemote().GetRedundancy().GetErasureShareSize()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
|
|
|
rootPieceID := pointer.GetRemote().RootPieceId
|
2019-07-11 21:51:40 +01:00
|
|
|
limit, privateKey, err := orders.CreateAuditOrderLimit(ctx, bucketID, pieces[0].NodeId, pieces[0].PieceNum, rootPieceID, shareSize)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
share, err := audits.Verifier.GetShare(ctx, limit, privateKey, randomIndex, shareSize, int(pieces[0].PieceNum))
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: pieces[0].NodeId,
|
|
|
|
PieceID: rootPieceID,
|
2019-09-11 23:37:01 +01:00
|
|
|
StripeIndex: randomIndex,
|
2019-06-11 21:14:40 +01:00
|
|
|
ShareSize: shareSize,
|
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(share.Data),
|
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-11 21:14:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = containment.IncrementPending(ctx, pending)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := audits.Verifier.Reverify(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, report.Fails, 0)
|
|
|
|
require.Len(t, report.Offlines, 0)
|
|
|
|
require.Len(t, report.PendingAudits, 0)
|
|
|
|
require.Len(t, report.Successes, 1)
|
|
|
|
require.Equal(t, report.Successes[0], pieces[0].NodeId)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReverifyFailMissingShare(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-01 15:02:00 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-11 21:14:40 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data
|
|
|
|
// - uses the cursor to get a stripe
|
|
|
|
// - creates one pending audit for a node holding a piece for that stripe
|
|
|
|
// - the actual share is downloaded to make sure ExpectedShareHash is correct
|
|
|
|
// - delete piece from node
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects one storage node to be marked as a fail in the audit report
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-07-01 15:02:00 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
orders := satellite.Orders.Service
|
|
|
|
containment := satellite.DB.Containment()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
|
2019-09-11 23:37:01 +01:00
|
|
|
shareSize := pointer.GetRemote().GetRedundancy().GetErasureShareSize()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
|
|
|
rootPieceID := pointer.GetRemote().RootPieceId
|
2019-07-11 21:51:40 +01:00
|
|
|
limit, privateKey, err := orders.CreateAuditOrderLimit(ctx, bucketID, pieces[0].NodeId, pieces[0].PieceNum, rootPieceID, shareSize)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
share, err := audits.Verifier.GetShare(ctx, limit, privateKey, randomIndex, shareSize, int(pieces[0].PieceNum))
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: pieces[0].NodeId,
|
|
|
|
PieceID: rootPieceID,
|
2019-09-11 23:37:01 +01:00
|
|
|
StripeIndex: randomIndex,
|
2019-06-11 21:14:40 +01:00
|
|
|
ShareSize: shareSize,
|
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(share.Data),
|
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-11 21:14:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = containment.IncrementPending(ctx, pending)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// delete the piece from the first node
|
2019-09-11 23:37:01 +01:00
|
|
|
piece := pointer.GetRemote().GetRemotePieces()[0]
|
|
|
|
pieceID := pointer.GetRemote().RootPieceId.Derive(piece.NodeId, piece.PieceNum)
|
2019-07-03 17:53:15 +01:00
|
|
|
node := getStorageNode(planet, piece.NodeId)
|
2019-09-11 23:37:01 +01:00
|
|
|
err = node.Storage2.Store.Delete(ctx, satellite.ID(), pieceID)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := audits.Verifier.Reverify(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, report.Successes, 0)
|
|
|
|
require.Len(t, report.Offlines, 0)
|
|
|
|
require.Len(t, report.PendingAudits, 0)
|
|
|
|
require.Len(t, report.Fails, 1)
|
|
|
|
require.Equal(t, report.Fails[0], pieces[0].NodeId)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReverifyFailBadData(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-01 15:02:00 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-11 21:14:40 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data
|
|
|
|
// - uses the cursor to get a stripe
|
|
|
|
// - creates a pending audit for a node holding a piece for that stripe
|
|
|
|
// - makes ExpectedShareHash have random data
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects one storage node to be marked as a fail in the audit report
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-07-01 15:02:00 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
|
|
|
rootPieceID := pointer.GetRemote().RootPieceId
|
|
|
|
redundancy := pointer.GetRemote().GetRedundancy()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: pieces[0].NodeId,
|
|
|
|
PieceID: rootPieceID,
|
2019-09-11 23:37:01 +01:00
|
|
|
StripeIndex: randomIndex,
|
2019-06-11 21:14:40 +01:00
|
|
|
ShareSize: redundancy.ErasureShareSize,
|
2019-07-01 15:02:00 +01:00
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(nil),
|
2019-06-11 21:14:40 +01:00
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-11 21:14:40 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err = satellite.DB.Containment().IncrementPending(ctx, pending)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-18 19:08:15 +01:00
|
|
|
nodeID := pieces[0].NodeId
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := audits.Verifier.Reverify(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, report.Successes, 0)
|
|
|
|
require.Len(t, report.Offlines, 0)
|
|
|
|
require.Len(t, report.PendingAudits, 0)
|
|
|
|
require.Len(t, report.Fails, 1)
|
2019-07-18 19:08:15 +01:00
|
|
|
require.Equal(t, report.Fails[0], nodeID)
|
2019-06-11 21:14:40 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReverifyOffline(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-01 15:02:00 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-11 21:14:40 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data
|
|
|
|
// - uses the cursor to get a stripe
|
|
|
|
// - creates pending audits for one node holding a piece for that stripe
|
|
|
|
// - stop the node that has the pending audit
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects one storage node to be marked as offline in the audit report
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-07-01 15:02:00 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
|
|
|
rootPieceID := pointer.GetRemote().RootPieceId
|
|
|
|
redundancy := pointer.GetRemote().GetRedundancy()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: pieces[0].NodeId,
|
|
|
|
PieceID: rootPieceID,
|
2019-09-11 23:37:01 +01:00
|
|
|
StripeIndex: randomIndex,
|
2019-06-11 21:14:40 +01:00
|
|
|
ShareSize: redundancy.ErasureShareSize,
|
2019-06-26 11:38:51 +01:00
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(testrand.Bytes(10)),
|
2019-06-11 21:14:40 +01:00
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-11 21:14:40 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err = satellite.DB.Containment().IncrementPending(ctx, pending)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = stopStorageNode(ctx, planet, pieces[0].NodeId)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := audits.Verifier.Reverify(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, report.Successes, 0)
|
|
|
|
require.Len(t, report.Fails, 0)
|
|
|
|
require.Len(t, report.PendingAudits, 0)
|
|
|
|
require.Len(t, report.Offlines, 1)
|
|
|
|
require.Equal(t, report.Offlines[0], pieces[0].NodeId)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReverifyOfflineDialTimeout(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-01 15:02:00 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-11 21:14:40 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data
|
|
|
|
// - uses the cursor to get a stripe
|
|
|
|
// - creates pending audit for one node holding a piece for that stripe
|
|
|
|
// - uses a slow transport client so that dial timeout will happen (an offline case)
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects one storage node to be marked as offline in the audit report
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-07-01 15:02:00 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
network := &transport.SimulatedNetwork{
|
|
|
|
DialLatency: 200 * time.Second,
|
|
|
|
BytesPerSecond: 1 * memory.KiB,
|
|
|
|
}
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
tlsOpts, err := tlsopts.NewOptions(satellite.Identity, tlsopts.Config{}, nil)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
newTransport := transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
|
|
|
Dial: 20 * time.Millisecond,
|
|
|
|
})
|
|
|
|
|
|
|
|
slowClient := network.NewClient(newTransport)
|
|
|
|
require.NotNil(t, slowClient)
|
|
|
|
|
|
|
|
// This config value will create a very short timeframe allowed for receiving
|
|
|
|
// data from storage nodes. This will cause context to cancel and start
|
|
|
|
// downloading from new nodes.
|
|
|
|
minBytesPerSecond := 100 * memory.KiB
|
|
|
|
|
2019-07-01 15:02:00 +01:00
|
|
|
verifier := audit.NewVerifier(
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite.Log.Named("verifier"),
|
|
|
|
satellite.Metainfo.Service,
|
2019-06-11 21:14:40 +01:00
|
|
|
slowClient,
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite.Overlay.Service,
|
|
|
|
satellite.DB.Containment(),
|
|
|
|
satellite.Orders.Service,
|
|
|
|
satellite.Identity,
|
2019-06-11 21:14:40 +01:00
|
|
|
minBytesPerSecond,
|
|
|
|
5*time.Second)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
pieces := pointer.GetRemote().GetRemotePieces()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
rootPieceID := pointer.GetRemote().RootPieceId
|
|
|
|
redundancy := pointer.GetRemote().GetRedundancy()
|
2019-06-11 21:14:40 +01:00
|
|
|
|
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: pieces[0].NodeId,
|
|
|
|
PieceID: rootPieceID,
|
2019-09-11 23:37:01 +01:00
|
|
|
StripeIndex: randomIndex,
|
2019-06-11 21:14:40 +01:00
|
|
|
ShareSize: redundancy.ErasureShareSize,
|
2019-07-01 15:02:00 +01:00
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(nil),
|
2019-06-11 21:14:40 +01:00
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-11 21:14:40 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err = satellite.DB.Containment().IncrementPending(ctx, pending)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := verifier.Reverify(ctx, path)
|
2019-06-11 21:14:40 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, report.Successes, 0)
|
|
|
|
require.Len(t, report.Fails, 0)
|
|
|
|
require.Len(t, report.PendingAudits, 0)
|
|
|
|
require.Len(t, report.Offlines, 1)
|
|
|
|
require.Equal(t, report.Offlines[0], pending.NodeID)
|
|
|
|
})
|
|
|
|
}
|
2019-06-19 10:02:25 +01:00
|
|
|
|
|
|
|
func TestReverifyDeletedSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data
|
2019-09-11 23:37:01 +01:00
|
|
|
// - gets a path from the audit queue
|
|
|
|
// - creates one pending audit for a node holding a piece for that segment
|
2019-06-19 10:02:25 +01:00
|
|
|
// - deletes the file
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects reverification to pass successufully and the storage node to be not in containment mode
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-19 10:02:25 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-06-26 11:38:51 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-19 10:02:25 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
nodeID := pointer.GetRemote().GetRemotePieces()[0].NodeId
|
2019-06-19 10:02:25 +01:00
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: nodeID,
|
2019-09-11 23:37:01 +01:00
|
|
|
PieceID: pointer.GetRemote().RootPieceId,
|
|
|
|
StripeIndex: randomIndex,
|
|
|
|
ShareSize: pointer.GetRemote().GetRedundancy().GetErasureShareSize(),
|
2019-06-19 10:02:25 +01:00
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(nil),
|
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-19 10:02:25 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
containment := satellite.DB.Containment()
|
2019-06-19 10:02:25 +01:00
|
|
|
err = containment.IncrementPending(ctx, pending)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// delete the file
|
2019-09-11 23:37:01 +01:00
|
|
|
err = ul.Delete(ctx, satellite, "testbucket", "test/path")
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := audits.Verifier.Reverify(ctx, path)
|
|
|
|
require.True(t, audit.ErrSegmentDeleted.Has(err))
|
2019-06-19 10:02:25 +01:00
|
|
|
assert.Empty(t, report)
|
|
|
|
|
|
|
|
_, err = containment.Get(ctx, nodeID)
|
|
|
|
require.True(t, audit.ErrContainedNotFound.Has(err))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReverifyModifiedSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data
|
|
|
|
// - uses the cursor to get a stripe
|
|
|
|
// - creates one pending audit for a node holding a piece for that stripe
|
|
|
|
// - re-uploads the file
|
|
|
|
// - calls reverify on that same stripe
|
|
|
|
// - expects reverification to pass successufully and the storage node to be not in containment mode
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
2019-06-19 10:02:25 +01:00
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
2019-06-26 11:38:51 +01:00
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
2019-06-19 10:02:25 +01:00
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
nodeID := pointer.GetRemote().GetRemotePieces()[0].NodeId
|
2019-06-19 10:02:25 +01:00
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: nodeID,
|
2019-09-11 23:37:01 +01:00
|
|
|
PieceID: pointer.GetRemote().RootPieceId,
|
|
|
|
StripeIndex: randomIndex,
|
|
|
|
ShareSize: pointer.GetRemote().GetRedundancy().GetErasureShareSize(),
|
2019-06-19 10:02:25 +01:00
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(nil),
|
|
|
|
ReverifyCount: 0,
|
2019-09-11 23:37:01 +01:00
|
|
|
Path: path,
|
2019-06-19 10:02:25 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
containment := satellite.DB.Containment()
|
2019-06-19 10:02:25 +01:00
|
|
|
|
|
|
|
err = containment.IncrementPending(ctx, pending)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// replace the file
|
2019-09-11 23:37:01 +01:00
|
|
|
err = ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-11 23:37:01 +01:00
|
|
|
report, err := audits.Verifier.Reverify(ctx, path)
|
2019-06-19 10:02:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Empty(t, report)
|
|
|
|
|
|
|
|
_, err = containment.Get(ctx, nodeID)
|
|
|
|
require.True(t, audit.ErrContainedNotFound.Has(err))
|
|
|
|
})
|
|
|
|
}
|
2019-09-19 00:45:15 +01:00
|
|
|
|
|
|
|
func TestReverifyDifferentShare(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// - uploads random data to two files
|
|
|
|
// - get a random stripe to audit from file 1
|
|
|
|
// - creates one pending audit for a node holding a piece for that stripe
|
|
|
|
// - the actual share is downloaded to make sure ExpectedShareHash is correct
|
|
|
|
// - delete piece for file 1 from the selected node
|
|
|
|
// - calls reverify on some stripe from file 2
|
|
|
|
// - expects one storage node to be marked as a fail in the audit report
|
|
|
|
// - (if file 2 is used during reverify, the node will pass the audit and the test should fail)
|
|
|
|
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
audits := satellite.Audit
|
|
|
|
queue := audits.Queue
|
|
|
|
|
|
|
|
audits.Worker.Loop.Pause()
|
|
|
|
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
testData1 := testrand.Bytes(8 * memory.KiB)
|
|
|
|
testData2 := testrand.Bytes(8 * memory.KiB)
|
|
|
|
|
|
|
|
err := ul.Upload(ctx, satellite, "testbucket", "test/path1", testData1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = ul.Upload(ctx, satellite, "testbucket", "test/path2", testData2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
audits.Chore.Loop.TriggerWait()
|
|
|
|
path1, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
path2, err := queue.Next()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, path1, path2)
|
|
|
|
|
|
|
|
pointer1, err := satellite.Metainfo.Service.Get(ctx, path1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pointer2, err := satellite.Metainfo.Service.Get(ctx, path2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// find a node that contains a piece for both files
|
|
|
|
// save that node ID and the piece number associated with it for pointer1
|
|
|
|
var selectedNode storj.NodeID
|
|
|
|
var selectedPieceNum int32
|
|
|
|
p1Nodes := make(map[storj.NodeID]int32)
|
|
|
|
for _, piece := range pointer1.GetRemote().GetRemotePieces() {
|
|
|
|
p1Nodes[piece.NodeId] = piece.PieceNum
|
|
|
|
}
|
|
|
|
for _, piece := range pointer2.GetRemote().GetRemotePieces() {
|
|
|
|
pieceNum, ok := p1Nodes[piece.NodeId]
|
|
|
|
if ok {
|
|
|
|
selectedNode = piece.NodeId
|
|
|
|
selectedPieceNum = pieceNum
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NotEqual(t, selectedNode, storj.NodeID{})
|
|
|
|
|
|
|
|
randomIndex, err := audit.GetRandomStripe(ctx, pointer1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
orders := satellite.Orders.Service
|
|
|
|
containment := satellite.DB.Containment()
|
|
|
|
|
|
|
|
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
|
|
|
|
shareSize := pointer1.GetRemote().GetRedundancy().GetErasureShareSize()
|
|
|
|
|
|
|
|
rootPieceID := pointer1.GetRemote().RootPieceId
|
|
|
|
limit, privateKey, err := orders.CreateAuditOrderLimit(ctx, bucketID, selectedNode, selectedPieceNum, rootPieceID, shareSize)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
share, err := audits.Verifier.GetShare(ctx, limit, privateKey, randomIndex, shareSize, int(selectedPieceNum))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pending := &audit.PendingAudit{
|
|
|
|
NodeID: selectedNode,
|
|
|
|
PieceID: rootPieceID,
|
|
|
|
StripeIndex: randomIndex,
|
|
|
|
ShareSize: shareSize,
|
|
|
|
ExpectedShareHash: pkcrypto.SHA256Hash(share.Data),
|
|
|
|
ReverifyCount: 0,
|
|
|
|
Path: path1,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = containment.IncrementPending(ctx, pending)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// delete the piece for pointer1 from the selected node
|
|
|
|
pieceID := pointer1.GetRemote().RootPieceId.Derive(selectedNode, selectedPieceNum)
|
|
|
|
node := getStorageNode(planet, selectedNode)
|
|
|
|
err = node.Storage2.Store.Delete(ctx, satellite.ID(), pieceID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// reverify with path 2. Since the selected node was put in containment for path1,
|
|
|
|
// it should be audited for path1 and fail
|
|
|
|
report, err := audits.Verifier.Reverify(ctx, path2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, report.Successes, 0)
|
|
|
|
require.Len(t, report.Offlines, 0)
|
|
|
|
require.Len(t, report.PendingAudits, 0)
|
|
|
|
require.Len(t, report.Fails, 1)
|
|
|
|
require.Equal(t, report.Fails[0], selectedNode)
|
|
|
|
})
|
|
|
|
}
|