satellite/audit: teach Reporter about piecewise audits
The Reporter is responsible for processing results from auditing operations, logging the results, disqualifying nodes that reached the maximum reverification count, and passing the results on to the reputation system. In this commit, we extend the Reporter so that it knows how to process the results of piecewise reverification audits. We also change most reporter-related tests so that reverifications happen as piecewise reverification audits, exercising the new code. Note that piecewise reverification audits are not yet being done outside of tests. In a later commit, we will switch from doing segmentwise reverifications to piecewise reverifications, as part of the audit-scaling effort. Refs: https://github.com/storj/storj/issues/5230 Change-Id: I9438164ce1ea4d9a1790d18d0e1046a8eb04d8e9
This commit is contained in:
parent
231c783698
commit
1854351da6
@ -56,10 +56,9 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
|
||||
|
||||
require.Nil(t, dossier.Disqualified)
|
||||
|
||||
_, err = satellitePeer.Audit.Reporter.RecordAudits(ctx, audit.Report{
|
||||
satellitePeer.Audit.Reporter.RecordAudits(ctx, audit.Report{
|
||||
Successes: storj.NodeIDList{nodeID},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
reputationInfo, err := satellitePeer.Reputation.Service.Get(ctx, nodeID)
|
||||
require.NoError(t, err)
|
||||
@ -70,8 +69,7 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
|
||||
// failed audits.
|
||||
iterations := 1
|
||||
for ; ; iterations++ {
|
||||
_, err := satellitePeer.Audit.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
satellitePeer.Audit.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
reputationInfo, err := satellitePeer.Reputation.Service.Get(ctx, nodeID)
|
||||
require.NoError(t, err)
|
||||
|
@ -5,6 +5,7 @@ package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
@ -20,6 +21,7 @@ import (
|
||||
type reporter struct {
|
||||
log *zap.Logger
|
||||
reputations *reputation.Service
|
||||
overlay *overlay.Service
|
||||
containment Containment
|
||||
// newContainment is temporary, and will replace containment
|
||||
newContainment NewContainment
|
||||
@ -29,7 +31,9 @@ type reporter struct {
|
||||
|
||||
// Reporter records audit reports in the overlay and database.
|
||||
type Reporter interface {
|
||||
RecordAudits(ctx context.Context, req Report) (_ Report, err error)
|
||||
RecordAudits(ctx context.Context, req Report)
|
||||
ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error)
|
||||
RecordReverificationResult(ctx context.Context, pendingJob *ReverificationJob, outcome Outcome, reputation overlay.ReputationStatus) (err error)
|
||||
}
|
||||
|
||||
// Report contains audit result.
|
||||
@ -38,19 +42,22 @@ type Reporter interface {
|
||||
// succeeded, failed, were offline, have pending audits, or failed for unknown
|
||||
// reasons and their current reputation status.
|
||||
type Report struct {
|
||||
Successes storj.NodeIDList
|
||||
Fails storj.NodeIDList
|
||||
Offlines storj.NodeIDList
|
||||
PendingAudits []*PendingAudit
|
||||
Successes storj.NodeIDList
|
||||
Fails storj.NodeIDList
|
||||
Offlines storj.NodeIDList
|
||||
PendingAudits []*PendingAudit
|
||||
// PieceAudits is temporary and will replace PendingAudits.
|
||||
PieceAudits []*ReverificationJob
|
||||
Unknown storj.NodeIDList
|
||||
NodesReputation map[storj.NodeID]overlay.ReputationStatus
|
||||
}
|
||||
|
||||
// NewReporter instantiates a reporter.
|
||||
func NewReporter(log *zap.Logger, reputations *reputation.Service, containment Containment, newContainment NewContainment, maxRetries int, maxReverifyCount int32) Reporter {
|
||||
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, containment Containment, newContainment NewContainment, maxRetries int, maxReverifyCount int32) Reporter {
|
||||
return &reporter{
|
||||
log: log,
|
||||
reputations: reputations,
|
||||
overlay: overlay,
|
||||
containment: containment,
|
||||
newContainment: newContainment,
|
||||
maxRetries: maxRetries,
|
||||
@ -58,17 +65,18 @@ func NewReporter(log *zap.Logger, reputations *reputation.Service, containment C
|
||||
}
|
||||
}
|
||||
|
||||
// RecordAudits saves audit results to overlay. When no error, it returns
|
||||
// nil for both return values, otherwise it returns the report with the fields
|
||||
// set to the values which have been saved and the error.
|
||||
func (reporter *reporter) RecordAudits(ctx context.Context, req Report) (_ Report, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// RecordAudits saves audit results, applying reputation changes as appropriate.
|
||||
// If some records can not be updated after a number of attempts, the failures
|
||||
// are logged at level ERROR, but are otherwise thrown away.
|
||||
func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
|
||||
successes := req.Successes
|
||||
fails := req.Fails
|
||||
unknowns := req.Unknown
|
||||
offlines := req.Offlines
|
||||
pendingAudits := req.PendingAudits
|
||||
pieceAudits := req.PieceAudits
|
||||
|
||||
reporter.log.Debug("Reporting audits",
|
||||
zap.Int("successes", len(successes)),
|
||||
@ -76,44 +84,43 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) (_ Repor
|
||||
zap.Int("unknowns", len(unknowns)),
|
||||
zap.Int("offlines", len(offlines)),
|
||||
zap.Int("pending", len(pendingAudits)),
|
||||
zap.Int("piece-pending", len(pieceAudits)),
|
||||
)
|
||||
|
||||
var errlist errs.Group
|
||||
nodesReputation := req.NodesReputation
|
||||
|
||||
tries := 0
|
||||
for tries <= reporter.maxRetries {
|
||||
if len(successes) == 0 && len(fails) == 0 && len(unknowns) == 0 && len(offlines) == 0 && len(pendingAudits) == 0 {
|
||||
return Report{}, nil
|
||||
reportFailures := func(tries int, resultType string, err error, nodes storj.NodeIDList, pending []*PendingAudit, pieces []*ReverificationJob) {
|
||||
if err == nil || tries < reporter.maxRetries {
|
||||
// don't need to report anything until the last time through
|
||||
return
|
||||
}
|
||||
reporter.log.Error("failed to update reputation information with audit results",
|
||||
zap.String("result type", resultType),
|
||||
zap.Error(err),
|
||||
zap.String("node IDs", strings.Join(nodes.Strings(), ", ")),
|
||||
zap.Any("pending segment audits", pending),
|
||||
zap.Any("pending piece audits", pieces))
|
||||
}
|
||||
|
||||
var err error
|
||||
for tries := 0; tries <= reporter.maxRetries; tries++ {
|
||||
if len(successes) == 0 && len(fails) == 0 && len(unknowns) == 0 && len(offlines) == 0 && len(pendingAudits) == 0 && len(pieceAudits) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
errlist = errs.Group{}
|
||||
|
||||
successes, err = reporter.recordAuditStatus(ctx, successes, nodesReputation, reputation.AuditSuccess)
|
||||
errlist.Add(err)
|
||||
reportFailures(tries, "successful", err, successes, nil, nil)
|
||||
fails, err = reporter.recordAuditStatus(ctx, fails, nodesReputation, reputation.AuditFailure)
|
||||
errlist.Add(err)
|
||||
reportFailures(tries, "failed", err, fails, nil, nil)
|
||||
unknowns, err = reporter.recordAuditStatus(ctx, unknowns, nodesReputation, reputation.AuditUnknown)
|
||||
errlist.Add(err)
|
||||
reportFailures(tries, "unknown", err, unknowns, nil, nil)
|
||||
offlines, err = reporter.recordAuditStatus(ctx, offlines, nodesReputation, reputation.AuditOffline)
|
||||
errlist.Add(err)
|
||||
reportFailures(tries, "offline", err, offlines, nil, nil)
|
||||
pendingAudits, err = reporter.recordPendingAudits(ctx, pendingAudits, nodesReputation)
|
||||
errlist.Add(err)
|
||||
|
||||
tries++
|
||||
reportFailures(tries, "pending", err, nil, pendingAudits, nil)
|
||||
pieceAudits, err = reporter.recordPendingPieceAudits(ctx, pieceAudits, nodesReputation)
|
||||
reportFailures(tries, "pending", err, nil, nil, pieceAudits)
|
||||
}
|
||||
|
||||
err = errlist.Err()
|
||||
if tries >= reporter.maxRetries && err != nil {
|
||||
return Report{
|
||||
Successes: successes,
|
||||
Fails: fails,
|
||||
Offlines: offlines,
|
||||
Unknown: unknowns,
|
||||
PendingAudits: pendingAudits,
|
||||
}, errs.Combine(Error.New("some nodes failed to be updated in overlay"), err)
|
||||
}
|
||||
return Report{}, nil
|
||||
}
|
||||
|
||||
func (reporter *reporter) recordAuditStatus(ctx context.Context, nodeIDs storj.NodeIDList, nodesReputation map[storj.NodeID]overlay.ReputationStatus, auditOutcome reputation.AuditType) (failed storj.NodeIDList, err error) {
|
||||
@ -133,6 +140,59 @@ func (reporter *reporter) recordAuditStatus(ctx context.Context, nodeIDs storj.N
|
||||
return failed, errors.Err()
|
||||
}
|
||||
|
||||
// recordPendingPieceAudits updates the containment status of nodes with pending piece audits.
|
||||
// This function is temporary and will replace recordPendingAudits later in this commit chain.
|
||||
func (reporter *reporter) recordPendingPieceAudits(ctx context.Context, pendingAudits []*ReverificationJob, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failed []*ReverificationJob, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var errlist errs.Group
|
||||
|
||||
for _, pendingAudit := range pendingAudits {
|
||||
logger := reporter.log.With(
|
||||
zap.Stringer("Node ID", pendingAudit.Locator.NodeID),
|
||||
zap.Stringer("Stream ID", pendingAudit.Locator.StreamID),
|
||||
zap.Uint64("Position", pendingAudit.Locator.Position.Encode()),
|
||||
zap.Int("Piece Num", pendingAudit.Locator.PieceNum))
|
||||
|
||||
if pendingAudit.ReverifyCount < int(reporter.maxReverifyCount) {
|
||||
err := reporter.ReportReverificationNeeded(ctx, &pendingAudit.Locator)
|
||||
if err != nil {
|
||||
failed = append(failed, pendingAudit)
|
||||
errlist.Add(err)
|
||||
continue
|
||||
}
|
||||
logger.Info("reverification queued")
|
||||
continue
|
||||
}
|
||||
// record failure -- max reverify count reached
|
||||
logger.Info("max reverify count reached (audit failed)")
|
||||
err = reporter.reputations.ApplyAudit(ctx, pendingAudit.Locator.NodeID, nodesReputation[pendingAudit.Locator.NodeID], reputation.AuditFailure)
|
||||
if err != nil {
|
||||
logger.Info("failed to update reputation information", zap.Error(err))
|
||||
errlist.Add(err)
|
||||
failed = append(failed, pendingAudit)
|
||||
continue
|
||||
}
|
||||
_, stillContained, err := reporter.newContainment.Delete(ctx, &pendingAudit.Locator)
|
||||
if err != nil {
|
||||
if !ErrContainedNotFound.Has(err) {
|
||||
errlist.Add(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !stillContained {
|
||||
err = reporter.overlay.SetNodeContained(ctx, pendingAudit.Locator.NodeID, false)
|
||||
if err != nil {
|
||||
logger.Error("failed to mark node as not contained", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 {
|
||||
return failed, errs.Combine(Error.New("failed to record some pending audits"), errlist.Err())
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// recordPendingAudits updates the containment status of nodes with pending audits.
|
||||
func (reporter *reporter) recordPendingAudits(ctx context.Context, pendingAudits []*PendingAudit, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failed []*PendingAudit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
@ -175,3 +235,68 @@ func (reporter *reporter) recordPendingAudits(ctx context.Context, pendingAudits
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (reporter *reporter) ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
err = reporter.newContainment.Insert(ctx, piece)
|
||||
if err != nil {
|
||||
return Error.New("failed to queue reverification audit for node: %w", err)
|
||||
}
|
||||
|
||||
err = reporter.overlay.SetNodeContained(ctx, piece.NodeID, true)
|
||||
if err != nil {
|
||||
return Error.New("failed to update contained status: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reporter *reporter) RecordReverificationResult(ctx context.Context, pendingJob *ReverificationJob, outcome Outcome, reputation overlay.ReputationStatus) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
keepInQueue := true
|
||||
report := Report{
|
||||
NodesReputation: map[storj.NodeID]overlay.ReputationStatus{
|
||||
pendingJob.Locator.NodeID: reputation,
|
||||
},
|
||||
}
|
||||
switch outcome {
|
||||
case OutcomeNotPerformed:
|
||||
case OutcomeNotNecessary:
|
||||
keepInQueue = false
|
||||
case OutcomeSuccess:
|
||||
report.Successes = append(report.Successes, pendingJob.Locator.NodeID)
|
||||
keepInQueue = false
|
||||
case OutcomeFailure:
|
||||
report.Fails = append(report.Fails, pendingJob.Locator.NodeID)
|
||||
keepInQueue = false
|
||||
case OutcomeTimedOut:
|
||||
// This will get re-added to the reverification queue, but that is idempotent
|
||||
// and fine. We do need to add it to PendingAudits in order to get the
|
||||
// maxReverifyCount check.
|
||||
report.PieceAudits = append(report.PieceAudits, pendingJob)
|
||||
case OutcomeUnknownError:
|
||||
report.Unknown = append(report.Unknown, pendingJob.Locator.NodeID)
|
||||
keepInQueue = false
|
||||
case OutcomeNodeOffline:
|
||||
report.Offlines = append(report.Offlines, pendingJob.Locator.NodeID)
|
||||
}
|
||||
var errList errs.Group
|
||||
|
||||
// apply any necessary reputation changes
|
||||
reporter.RecordAudits(ctx, report)
|
||||
|
||||
// remove from reverifications queue if appropriate
|
||||
if !keepInQueue {
|
||||
_, stillContained, err := reporter.newContainment.Delete(ctx, &pendingJob.Locator)
|
||||
if err != nil {
|
||||
if !ErrContainedNotFound.Has(err) {
|
||||
errList.Add(err)
|
||||
}
|
||||
} else if !stillContained {
|
||||
err = reporter.overlay.SetNodeContained(ctx, pendingJob.Locator.NodeID, false)
|
||||
errList.Add(err)
|
||||
}
|
||||
}
|
||||
return errList.Err()
|
||||
}
|
||||
|
@ -11,11 +11,8 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pkcrypto"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/audit"
|
||||
@ -32,24 +29,20 @@ func TestReportPendingAudits(t *testing.T) {
|
||||
|
||||
nodeID := planet.StorageNodes[0].ID()
|
||||
|
||||
pending := audit.PendingAudit{
|
||||
NodeID: nodeID,
|
||||
PieceID: storj.NewPieceID(),
|
||||
StripeIndex: 1,
|
||||
ShareSize: 1 * memory.KiB.Int32(),
|
||||
ExpectedShareHash: pkcrypto.SHA256Hash([]byte("test")),
|
||||
pending := audit.ReverificationJob{
|
||||
Locator: audit.PieceLocator{
|
||||
NodeID: nodeID,
|
||||
},
|
||||
}
|
||||
|
||||
report := audit.Report{PendingAudits: []*audit.PendingAudit{&pending}}
|
||||
containment := satellite.DB.Containment()
|
||||
report := audit.Report{PieceAudits: []*audit.ReverificationJob{&pending}}
|
||||
containment := satellite.DB.NewContainment()
|
||||
|
||||
failed, err := audits.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
pa, err := containment.Get(ctx, nodeID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, pending, *pa)
|
||||
assert.Equal(t, pending.Locator, pa.Locator)
|
||||
})
|
||||
}
|
||||
|
||||
@ -66,9 +59,7 @@ func TestRecordAuditsAtLeastOnce(t *testing.T) {
|
||||
report := audit.Report{Successes: []storj.NodeID{nodeID}}
|
||||
|
||||
// expect RecordAudits to try recording at least once (maxRetries is set to 0)
|
||||
failed, err := audits.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
service := satellite.Reputation.Service
|
||||
node, err := service.Get(ctx, nodeID)
|
||||
@ -103,22 +94,16 @@ func TestRecordAuditsCorrectOutcome(t *testing.T) {
|
||||
Successes: []storj.NodeID{goodNode},
|
||||
Fails: []storj.NodeID{dqNode},
|
||||
Unknown: []storj.NodeID{suspendedNode},
|
||||
PendingAudits: []*audit.PendingAudit{
|
||||
PieceAudits: []*audit.ReverificationJob{
|
||||
{
|
||||
NodeID: pendingNode,
|
||||
PieceID: testrand.PieceID(),
|
||||
StripeIndex: 0,
|
||||
ShareSize: 10,
|
||||
ExpectedShareHash: []byte{},
|
||||
ReverifyCount: 0,
|
||||
Locator: audit.PieceLocator{NodeID: pendingNode},
|
||||
ReverifyCount: 0,
|
||||
},
|
||||
},
|
||||
Offlines: []storj.NodeID{offlineNode},
|
||||
}
|
||||
|
||||
failed, err := audits.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
overlay := satellite.Overlay.Service
|
||||
node, err := overlay.Get(ctx, goodNode)
|
||||
@ -158,9 +143,7 @@ func TestSuspensionTimeNotResetBySuccessiveAudit(t *testing.T) {
|
||||
|
||||
suspendedNode := planet.StorageNodes[0].ID()
|
||||
|
||||
failed, err := audits.Reporter.RecordAudits(ctx, audit.Report{Unknown: []storj.NodeID{suspendedNode}})
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, audit.Report{Unknown: []storj.NodeID{suspendedNode}})
|
||||
|
||||
overlay := satellite.Overlay.Service
|
||||
|
||||
@ -171,9 +154,7 @@ func TestSuspensionTimeNotResetBySuccessiveAudit(t *testing.T) {
|
||||
|
||||
suspendedAt := node.UnknownAuditSuspended
|
||||
|
||||
failed, err = audits.Reporter.RecordAudits(ctx, audit.Report{Unknown: []storj.NodeID{suspendedNode}})
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, audit.Report{Unknown: []storj.NodeID{suspendedNode}})
|
||||
|
||||
node, err = overlay.Get(ctx, suspendedNode)
|
||||
require.NoError(t, err)
|
||||
@ -205,9 +186,7 @@ func TestGracefullyExitedNotUpdated(t *testing.T) {
|
||||
report := audit.Report{
|
||||
Successes: storj.NodeIDList{successNode.ID(), failedNode.ID(), containedNode.ID(), unknownNode.ID(), offlineNode.ID()},
|
||||
}
|
||||
failed, err := audits.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
// mark each node as having gracefully exited
|
||||
for _, node := range nodeList {
|
||||
@ -221,23 +200,19 @@ func TestGracefullyExitedNotUpdated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
pending := audit.PendingAudit{
|
||||
NodeID: containedNode.ID(),
|
||||
PieceID: storj.NewPieceID(),
|
||||
StripeIndex: 1,
|
||||
ShareSize: 1 * memory.KiB.Int32(),
|
||||
ExpectedShareHash: pkcrypto.SHA256Hash([]byte("test")),
|
||||
pending := audit.ReverificationJob{
|
||||
Locator: audit.PieceLocator{
|
||||
NodeID: containedNode.ID(),
|
||||
},
|
||||
}
|
||||
report = audit.Report{
|
||||
Successes: storj.NodeIDList{successNode.ID()},
|
||||
Fails: storj.NodeIDList{failedNode.ID()},
|
||||
Offlines: storj.NodeIDList{offlineNode.ID()},
|
||||
PendingAudits: []*audit.PendingAudit{&pending},
|
||||
Unknown: storj.NodeIDList{unknownNode.ID()},
|
||||
Successes: storj.NodeIDList{successNode.ID()},
|
||||
Fails: storj.NodeIDList{failedNode.ID()},
|
||||
Offlines: storj.NodeIDList{offlineNode.ID()},
|
||||
PieceAudits: []*audit.ReverificationJob{&pending},
|
||||
Unknown: storj.NodeIDList{unknownNode.ID()},
|
||||
}
|
||||
failed, err = audits.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, failed)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
// since every node has gracefully exit, reputation, dq, and suspension should remain at default values
|
||||
for _, node := range nodeList {
|
||||
@ -260,8 +235,7 @@ func TestReportOfflineAudits(t *testing.T) {
|
||||
audits.Worker.Loop.Pause()
|
||||
reputationService := satellite.Core.Reputation.Service
|
||||
|
||||
_, err := audits.Reporter.RecordAudits(ctx, audit.Report{Offlines: storj.NodeIDList{node.ID()}})
|
||||
require.NoError(t, err)
|
||||
audits.Reporter.RecordAudits(ctx, audit.Report{Offlines: storj.NodeIDList{node.ID()}})
|
||||
|
||||
info, err := reputationService.Get(ctx, node.ID())
|
||||
require.NoError(t, err)
|
||||
|
@ -1089,8 +1089,7 @@ func TestReverifySlowDownload(t *testing.T) {
|
||||
assert.Len(t, report.Unknown, 0)
|
||||
assert.Equal(t, report.PendingAudits[0].NodeID, slowNode)
|
||||
|
||||
_, err = audits.Reporter.RecordAudits(ctx, report)
|
||||
assert.NoError(t, err)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
_, err = containment.Get(ctx, slowNode)
|
||||
assert.NoError(t, err)
|
||||
@ -1275,8 +1274,7 @@ func TestMaxReverifyCount(t *testing.T) {
|
||||
assert.Len(t, report.Unknown, 0)
|
||||
assert.Equal(t, report.PendingAudits[0].NodeID, slowNode)
|
||||
|
||||
_, err = audits.Reporter.RecordAudits(ctx, report)
|
||||
assert.NoError(t, err)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
_, err = containment.Get(ctx, slowNode)
|
||||
assert.NoError(t, err)
|
||||
@ -1292,8 +1290,7 @@ func TestMaxReverifyCount(t *testing.T) {
|
||||
assert.Len(t, report.Unknown, 0)
|
||||
assert.Equal(t, report.PendingAudits[0].NodeID, slowNode)
|
||||
|
||||
_, err = audits.Reporter.RecordAudits(ctx, report)
|
||||
assert.NoError(t, err)
|
||||
audits.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
_, err = containment.Get(ctx, slowNode)
|
||||
assert.True(t, audit.ErrContainedNotFound.Has(err))
|
||||
|
@ -121,11 +121,7 @@ func (worker *Worker) work(ctx context.Context, segment Segment) (err error) {
|
||||
errlist.Add(err)
|
||||
}
|
||||
|
||||
// TODO(moby) we need to decide if we want to do something with nodes that the reporter failed to update
|
||||
_, err = worker.reporter.RecordAudits(ctx, report)
|
||||
if err != nil {
|
||||
errlist.Add(err)
|
||||
}
|
||||
worker.reporter.RecordAudits(ctx, report)
|
||||
|
||||
if err != nil {
|
||||
if metabase.ErrSegmentNotFound.Has(err) {
|
||||
@ -161,11 +157,7 @@ func (worker *Worker) work(ctx context.Context, segment Segment) (err error) {
|
||||
errlist.Add(err)
|
||||
}
|
||||
|
||||
// TODO(moby) we need to decide if we want to do something with nodes that the reporter failed to update
|
||||
_, err = worker.reporter.RecordAudits(ctx, report)
|
||||
if err != nil {
|
||||
errlist.Add(err)
|
||||
}
|
||||
worker.reporter.RecordAudits(ctx, report)
|
||||
|
||||
return errlist.Err()
|
||||
}
|
||||
|
@ -427,6 +427,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
|
||||
peer.Audit.Reporter = audit.NewReporter(log.Named("audit:reporter"),
|
||||
peer.Reputation.Service,
|
||||
peer.Overlay.Service,
|
||||
peer.DB.Containment(),
|
||||
peer.DB.NewContainment(),
|
||||
config.MaxRetriesStatDB,
|
||||
|
@ -453,11 +453,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
|
||||
report.Unknown = append(report.Unknown, outcome.Piece.StorageNode)
|
||||
}
|
||||
if repairer.reputationUpdateEnabled {
|
||||
_, reportErr := repairer.reporter.RecordAudits(ctx, report)
|
||||
if reportErr != nil {
|
||||
// failed updates should not affect repair, therefore we will not return the error
|
||||
repairer.log.Debug("failed to record audit", zap.Error(reportErr))
|
||||
}
|
||||
repairer.reporter.RecordAudits(ctx, report)
|
||||
}
|
||||
|
||||
// Upload the repaired pieces
|
||||
|
@ -219,6 +219,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
|
||||
peer.Audit.Reporter = audit.NewReporter(
|
||||
log.Named("reporter"),
|
||||
peer.Reputation,
|
||||
peer.Overlay,
|
||||
containmentDB,
|
||||
newContainmentDB,
|
||||
config.Audit.MaxRetriesStatDB,
|
||||
|
@ -186,8 +186,7 @@ func TestAuditSuspendExceedGracePeriod(t *testing.T) {
|
||||
NodesReputation: nodesStatus,
|
||||
}
|
||||
auditService := planet.Satellites[0].Audit
|
||||
_, err := auditService.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
auditService.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
// success and offline nodes should not be disqualified
|
||||
// fail and unknown nodes should be disqualified
|
||||
@ -253,8 +252,7 @@ func TestAuditSuspendDQDisabled(t *testing.T) {
|
||||
NodesReputation: nodesStatus,
|
||||
}
|
||||
auditService := planet.Satellites[0].Audit
|
||||
_, err := auditService.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
auditService.Reporter.RecordAudits(ctx, report)
|
||||
|
||||
// successful node should not be suspended or disqualified
|
||||
n, err := oc.Get(ctx, successNodeID)
|
||||
|
@ -23,10 +23,14 @@ type containment struct {
|
||||
db *satelliteDB
|
||||
}
|
||||
|
||||
var _ audit.Containment = &containment{}
|
||||
|
||||
type newContainment struct {
|
||||
reverifyQueue audit.ReverifyQueue
|
||||
}
|
||||
|
||||
var _ audit.NewContainment = &newContainment{}
|
||||
|
||||
// Get gets the pending audit by node id.
|
||||
func (containment *containment) Get(ctx context.Context, id pb.NodeID) (_ *audit.PendingAudit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
Loading…
Reference in New Issue
Block a user