satellite/audit: delete now-unused code

Now that we are doing scalable piecewise reverifications, the code for
handling the old way of doing things (containment, pending audits,
reporting, testing) can now be removed.

Refs: https://github.com/storj/storj/issues/5230
Change-Id: Ief1a75f423eff682e8f3d57804e343b3409a6631
This commit is contained in:
paul cannon 2022-11-23 09:14:27 -06:00 committed by Storj Robot
parent a66503b444
commit 0342ca1aa6
17 changed files with 43 additions and 491 deletions

View File

@ -91,7 +91,6 @@ func cmdFetchPieces(cmd *cobra.Command, args []string) (err error) {
db.OverlayCache(),
db.NodeEvents(),
db.Reputation(),
db.Containment(),
db.NewContainment(),
rollupsWriteCache,
version.Build,

View File

@ -71,7 +71,6 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
db.OverlayCache(),
db.NodeEvents(),
db.Reputation(),
db.Containment(),
db.NewContainment(),
rollupsWriteCache,
version.Build,

View File

@ -700,7 +700,7 @@ func (planet *Planet) newRepairer(ctx context.Context, index int, identity *iden
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
return satellite.NewRepairer(log, identity, metabaseDB, revocationDB, db.RepairQueue(), db.Buckets(), db.OverlayCache(), db.NodeEvents(), db.Reputation(), db.Containment(), db.NewContainment(), rollupsWriteCache, versionInfo, &config, nil)
return satellite.NewRepairer(log, identity, metabaseDB, revocationDB, db.RepairQueue(), db.Buckets(), db.OverlayCache(), db.NodeEvents(), db.Reputation(), db.NewContainment(), rollupsWriteCache, versionInfo, &config, nil)
}
type rollupsWriteCacheCloser struct {

View File

@ -9,9 +9,6 @@ import (
"github.com/zeebo/errs"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/metabase"
)
var (
@ -25,34 +22,15 @@ var (
ErrContainDelete = errs.Class("unable to delete pending audit")
)
// PendingAudit contains info needed for retrying an audit for a contained node.
type PendingAudit struct {
NodeID storj.NodeID
PieceID storj.PieceID
StripeIndex int32
ShareSize int32
ExpectedShareHash []byte
ReverifyCount int32
StreamID uuid.UUID
Position metabase.SegmentPosition
}
// Containment holds information about pending audits for contained nodes.
//
// architecture: Database
type Containment interface {
Get(ctx context.Context, nodeID pb.NodeID) (*PendingAudit, error)
IncrementPending(ctx context.Context, pendingAudit *PendingAudit) error
Delete(ctx context.Context, nodeID pb.NodeID) (bool, error)
}
// NewContainment holds information about pending audits for contained nodes.
//
// It will exist side by side with Containment for a few commits in this
// commit chain, to allow the change in reverifications to be made over
// several smaller commits.
//
// Later in the commit chain, NewContainment will replace Containment.
// Later in the commit chain, NewContainment will be renamed to Containment.
//
// architecture: Database
type NewContainment interface {
Get(ctx context.Context, nodeID pb.NodeID) (*ReverificationJob, error)
Insert(ctx context.Context, job *PieceLocator) error

View File

@ -1,142 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package audit_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/pkcrypto"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation"
)
func TestContainIncrementAndGet(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
containment := planet.Satellites[0].DB.Containment()
input := &audit.PendingAudit{
NodeID: planet.StorageNodes[0].ID(),
ExpectedShareHash: pkcrypto.SHA256Hash(testrand.Bytes(10)),
}
err := containment.IncrementPending(ctx, input)
require.NoError(t, err)
output, err := containment.Get(ctx, input.NodeID)
require.NoError(t, err)
assert.Equal(t, input, output)
nodeID1 := planet.StorageNodes[1].ID()
_, err = containment.Get(ctx, nodeID1)
require.Error(t, err, audit.ErrContainedNotFound.New("%v", nodeID1))
assert.True(t, audit.ErrContainedNotFound.Has(err))
})
}
func TestContainIncrementPendingEntryExists(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
containment := planet.Satellites[0].DB.Containment()
info1 := &audit.PendingAudit{
NodeID: planet.StorageNodes[0].ID(),
ExpectedShareHash: pkcrypto.SHA256Hash(testrand.Bytes(10)),
}
err := containment.IncrementPending(ctx, info1)
require.NoError(t, err)
// expect reverify count for an entry to be 0 after first IncrementPending call
pending, err := containment.Get(ctx, info1.NodeID)
require.NoError(t, err)
assert.EqualValues(t, 0, pending.ReverifyCount)
// expect reverify count to be 1 after second IncrementPending call
err = containment.IncrementPending(ctx, info1)
require.NoError(t, err)
pending, err = containment.Get(ctx, info1.NodeID)
require.NoError(t, err)
assert.EqualValues(t, 1, pending.ReverifyCount)
})
}
func TestContainDelete(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
containment := planet.Satellites[0].DB.Containment()
cache := planet.Satellites[0].DB.OverlayCache()
info1 := &audit.PendingAudit{
NodeID: planet.StorageNodes[0].ID(),
ExpectedShareHash: pkcrypto.SHA256Hash(testrand.Bytes(10)),
}
err := containment.IncrementPending(ctx, info1)
require.NoError(t, err)
// delete the node from containment db
isDeleted, err := containment.Delete(ctx, info1.NodeID)
require.NoError(t, err)
assert.True(t, isDeleted)
// check contained flag set to false
node, err := cache.Get(ctx, info1.NodeID)
require.NoError(t, err)
assert.False(t, node.Contained)
// get pending audit that doesn't exist
_, err = containment.Get(ctx, info1.NodeID)
assert.Error(t, err, audit.ErrContainedNotFound.New("%v", info1.NodeID))
assert.True(t, audit.ErrContainedNotFound.Has(err))
// delete pending audit that doesn't exist
isDeleted, err = containment.Delete(ctx, info1.NodeID)
require.NoError(t, err)
assert.False(t, isDeleted)
})
}
// UpdateStats used to remove nodes from containment. It doesn't anymore.
// This is a sanity check.
func TestContainUpdateStats(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
containment := planet.Satellites[0].DB.Containment()
cache := planet.Satellites[0].DB.OverlayCache()
info1 := &audit.PendingAudit{
NodeID: planet.StorageNodes[0].ID(),
ExpectedShareHash: pkcrypto.SHA256Hash(testrand.Bytes(10)),
}
err := containment.IncrementPending(ctx, info1)
require.NoError(t, err)
// update node stats
err = planet.Satellites[0].Reputation.Service.ApplyAudit(ctx, info1.NodeID, overlay.ReputationStatus{}, reputation.AuditSuccess)
require.NoError(t, err)
// check contained flag set to false
node, err := cache.Get(ctx, info1.NodeID)
require.NoError(t, err)
assert.False(t, node.Contained)
// get pending audit
_, err = containment.Get(ctx, info1.NodeID)
require.NoError(t, err)
})
}

View File

@ -61,7 +61,6 @@ func reformVerifierWithMockConnector(t testing.TB, sat *testplanet.Satellite, mo
sat.Metabase.DB,
newDialer,
sat.Overlay.Service,
sat.DB.Containment(),
sat.DB.NewContainment(),
sat.Orders.Service,
sat.Identity,

View File

@ -22,8 +22,7 @@ type reporter struct {
log *zap.Logger
reputations *reputation.Service
overlay *overlay.Service
containment Containment
// newContainment is temporary, and will replace containment
// newContainment will be renamed to containment.
newContainment NewContainment
maxRetries int
maxReverifyCount int32
@ -42,23 +41,21 @@ type Reporter interface {
// succeeded, failed, were offline, have pending audits, or failed for unknown
// reasons and their current reputation status.
type Report struct {
Successes storj.NodeIDList
Fails storj.NodeIDList
Offlines storj.NodeIDList
PendingAudits []*PendingAudit
// PieceAudits is temporary and will replace PendingAudits.
Successes storj.NodeIDList
Fails storj.NodeIDList
Offlines storj.NodeIDList
// PieceAudits will be renamed to PendingAudits.
PieceAudits []*ReverificationJob
Unknown storj.NodeIDList
NodesReputation map[storj.NodeID]overlay.ReputationStatus
}
// NewReporter instantiates a reporter.
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, containment Containment, newContainment NewContainment, maxRetries int, maxReverifyCount int32) Reporter {
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, newContainment NewContainment, maxRetries int, maxReverifyCount int32) Reporter {
return &reporter{
log: log,
reputations: reputations,
overlay: overlay,
containment: containment,
newContainment: newContainment,
maxRetries: maxRetries,
maxReverifyCount: maxReverifyCount,
@ -75,8 +72,7 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
fails := req.Fails
unknowns := req.Unknown
offlines := req.Offlines
pendingAudits := req.PendingAudits
pieceAudits := req.PieceAudits
pendingAudits := req.PieceAudits
reporter.log.Debug("Reporting audits",
zap.Int("successes", len(successes)),
@ -84,12 +80,11 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
zap.Int("unknowns", len(unknowns)),
zap.Int("offlines", len(offlines)),
zap.Int("pending", len(pendingAudits)),
zap.Int("piece-pending", len(pieceAudits)),
)
nodesReputation := req.NodesReputation
reportFailures := func(tries int, resultType string, err error, nodes storj.NodeIDList, pending []*PendingAudit, pieces []*ReverificationJob) {
reportFailures := func(tries int, resultType string, err error, nodes storj.NodeIDList, pending []*ReverificationJob) {
if err == nil || tries < reporter.maxRetries {
// don't need to report anything until the last time through
return
@ -98,26 +93,25 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
zap.String("result type", resultType),
zap.Error(err),
zap.String("node IDs", strings.Join(nodes.Strings(), ", ")),
zap.Any("pending segment audits", pending),
zap.Any("pending piece audits", pieces))
zap.Any("pending segment audits", pending))
}
var err error
for tries := 0; tries <= reporter.maxRetries; tries++ {
if len(successes) == 0 && len(fails) == 0 && len(unknowns) == 0 && len(offlines) == 0 && len(pendingAudits) == 0 && len(pieceAudits) == 0 {
if len(successes) == 0 && len(fails) == 0 && len(unknowns) == 0 && len(offlines) == 0 && len(pendingAudits) == 0 {
return
}
successes, err = reporter.recordAuditStatus(ctx, successes, nodesReputation, reputation.AuditSuccess)
reportFailures(tries, "successful", err, successes, nil, nil)
reportFailures(tries, "successful", err, successes, nil)
fails, err = reporter.recordAuditStatus(ctx, fails, nodesReputation, reputation.AuditFailure)
reportFailures(tries, "failed", err, fails, nil, nil)
reportFailures(tries, "failed", err, fails, nil)
unknowns, err = reporter.recordAuditStatus(ctx, unknowns, nodesReputation, reputation.AuditUnknown)
reportFailures(tries, "unknown", err, unknowns, nil, nil)
reportFailures(tries, "unknown", err, unknowns, nil)
offlines, err = reporter.recordAuditStatus(ctx, offlines, nodesReputation, reputation.AuditOffline)
reportFailures(tries, "offline", err, offlines, nil, nil)
pieceAudits, err = reporter.recordPendingPieceAudits(ctx, pieceAudits, nodesReputation)
reportFailures(tries, "pending", err, nil, nil, pieceAudits)
reportFailures(tries, "offline", err, offlines, nil)
pendingAudits, err = reporter.recordPendingPieceAudits(ctx, pendingAudits, nodesReputation)
reportFailures(tries, "pending", err, nil, pendingAudits)
}
}
@ -139,7 +133,7 @@ func (reporter *reporter) recordAuditStatus(ctx context.Context, nodeIDs storj.N
}
// recordPendingPieceAudits updates the containment status of nodes with pending piece audits.
// This function is temporary and will replace recordPendingAudits later in this commit chain.
// This function is temporary and will be renamed to recordPendingAudits later in this commit chain.
func (reporter *reporter) recordPendingPieceAudits(ctx context.Context, pendingAudits []*ReverificationJob, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failed []*ReverificationJob, err error) {
defer mon.Task()(&ctx)(&err)
var errlist errs.Group

View File

@ -273,7 +273,6 @@ func TestReverifyOfflineDialTimeout(t *testing.T) {
satellite.Metabase.DB,
dialer,
satellite.Overlay.Service,
satellite.DB.Containment(),
satellite.DB.NewContainment(),
satellite.Orders.Service,
satellite.Identity,

View File

@ -53,13 +53,13 @@ type Share struct {
//
// architecture: Worker
type Verifier struct {
log *zap.Logger
metabase *metabase.DB
orders *orders.Service
auditor *identity.PeerIdentity
dialer rpc.Dialer
overlay *overlay.Service
containment Containment
log *zap.Logger
metabase *metabase.DB
orders *orders.Service
auditor *identity.PeerIdentity
dialer rpc.Dialer
overlay *overlay.Service
// newContainment will be renamed to containment.
newContainment NewContainment
minBytesPerSecond memory.Size
minDownloadTimeout time.Duration
@ -69,7 +69,7 @@ type Verifier struct {
}
// NewVerifier creates a Verifier.
func NewVerifier(log *zap.Logger, metabase *metabase.DB, dialer rpc.Dialer, overlay *overlay.Service, containment Containment, newContainment NewContainment, orders *orders.Service, id *identity.FullIdentity, minBytesPerSecond memory.Size, minDownloadTimeout time.Duration) *Verifier {
func NewVerifier(log *zap.Logger, metabase *metabase.DB, dialer rpc.Dialer, overlay *overlay.Service, newContainment NewContainment, orders *orders.Service, id *identity.FullIdentity, minBytesPerSecond memory.Size, minDownloadTimeout time.Duration) *Verifier {
return &Verifier{
log: log,
metabase: metabase,
@ -77,7 +77,6 @@ func NewVerifier(log *zap.Logger, metabase *metabase.DB, dialer rpc.Dialer, over
auditor: id.PeerIdentity(),
dialer: dialer,
overlay: overlay,
containment: containment,
newContainment: newContainment,
minBytesPerSecond: minBytesPerSecond,
minDownloadTimeout: minDownloadTimeout,
@ -614,7 +613,7 @@ func recordStats(report Report, totalPieces int, verifyErr error) {
numOffline := len(report.Offlines)
numSuccessful := len(report.Successes)
numFailed := len(report.Fails)
numContained := len(report.PendingAudits)
numContained := len(report.PieceAudits)
numUnknown := len(report.Unknown)
totalAudited := numSuccessful + numFailed + numOffline + numContained

View File

@ -257,7 +257,6 @@ func TestDownloadSharesDialTimeout(t *testing.T) {
satellite.Metabase.DB,
dialer,
satellite.Overlay.Service,
satellite.DB.Containment(),
satellite.DB.NewContainment(),
satellite.Orders.Service,
satellite.Identity,
@ -333,7 +332,6 @@ func TestDownloadSharesDownloadTimeout(t *testing.T) {
satellite.Metabase.DB,
satellite.Dialer,
satellite.Overlay.Service,
satellite.DB.Containment(),
satellite.DB.NewContainment(),
satellite.Orders.Service,
satellite.Identity,
@ -392,7 +390,7 @@ func TestVerifierHappyPath(t *testing.T) {
assert.Len(t, report.Successes, len(segment.Pieces))
assert.Len(t, report.Fails, 0)
assert.Len(t, report.Offlines, 0)
assert.Len(t, report.PendingAudits, 0)
assert.Len(t, report.PieceAudits, 0)
})
}
@ -429,7 +427,7 @@ func TestVerifierExpired(t *testing.T) {
assert.Len(t, report.Successes, 0)
assert.Len(t, report.Fails, 0)
assert.Len(t, report.Offlines, 0)
assert.Len(t, report.PendingAudits, 0)
assert.Len(t, report.PieceAudits, 0)
})
}
@ -472,7 +470,7 @@ func TestVerifierOfflineNode(t *testing.T) {
assert.Len(t, report.Successes, len(segment.Pieces)-1)
assert.Len(t, report.Fails, 0)
assert.Len(t, report.Offlines, 1)
assert.Len(t, report.PendingAudits, 0)
assert.Len(t, report.PieceAudits, 0)
})
}
@ -517,7 +515,7 @@ func TestVerifierMissingPiece(t *testing.T) {
assert.Len(t, report.Successes, origNumPieces-1)
assert.Len(t, report.Fails, 1)
assert.Len(t, report.Offlines, 0)
assert.Len(t, report.PendingAudits, 0)
assert.Len(t, report.PieceAudits, 0)
})
}
@ -635,7 +633,6 @@ func TestVerifierDialTimeout(t *testing.T) {
satellite.Metabase.DB,
dialer,
satellite.Overlay.Service,
satellite.DB.Containment(),
satellite.DB.NewContainment(),
satellite.Orders.Service,
satellite.Identity,
@ -648,7 +645,7 @@ func TestVerifierDialTimeout(t *testing.T) {
assert.Len(t, report.Successes, 0)
assert.Len(t, report.Fails, 0)
assert.Len(t, report.Offlines, len(segment.Pieces))
assert.Len(t, report.PendingAudits, 0)
assert.Len(t, report.PieceAudits, 0)
})
}
@ -683,7 +680,7 @@ func TestVerifierDeletedSegment(t *testing.T) {
assert.Zero(t, report.Successes)
assert.Zero(t, report.Fails)
assert.Zero(t, report.Offlines)
assert.Zero(t, report.PendingAudits)
assert.Zero(t, report.PieceAudits)
assert.Zero(t, report.Unknown)
})
}
@ -734,7 +731,7 @@ func TestVerifierModifiedSegment(t *testing.T) {
assert.Zero(t, report.Successes)
assert.Zero(t, report.Fails)
assert.Zero(t, report.Offlines)
assert.Zero(t, report.PendingAudits)
assert.Zero(t, report.PieceAudits)
assert.Zero(t, report.Unknown)
})
}
@ -772,7 +769,7 @@ func TestVerifierReplacedSegment(t *testing.T) {
assert.Zero(t, report.Successes)
assert.Zero(t, report.Fails)
assert.Zero(t, report.Offlines)
assert.Zero(t, report.PendingAudits)
assert.Zero(t, report.PieceAudits)
assert.Zero(t, report.Unknown)
})
}
@ -819,7 +816,7 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
require.Len(t, report.Fails, 1)
assert.Equal(t, report.Fails[0], piece.StorageNode)
assert.Len(t, report.Offlines, 0)
require.Len(t, report.PendingAudits, 0)
require.Len(t, report.PieceAudits, 0)
})
}
@ -928,7 +925,7 @@ func TestVerifierUnknownError(t *testing.T) {
assert.Len(t, report.Successes, 3)
assert.Len(t, report.Fails, 0)
assert.Len(t, report.Offlines, 0)
assert.Len(t, report.PendingAudits, 0)
assert.Len(t, report.PieceAudits, 0)
require.Len(t, report.Unknown, 1)
assert.Equal(t, report.Unknown[0], badNode.ID())
})

View File

@ -415,7 +415,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Metainfo.Metabase,
dialer,
peer.Overlay.Service,
peer.DB.Containment(),
peer.DB.NewContainment(),
peer.Orders.Service,
peer.Identity,
@ -430,7 +429,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Audit.Reporter = audit.NewReporter(log.Named("audit:reporter"),
peer.Reputation.Service,
peer.Overlay.Service,
peer.DB.Containment(),
peer.DB.NewContainment(),
config.MaxRetriesStatDB,
int32(config.MaxReverifyCount),

View File

@ -112,9 +112,8 @@ type DB interface {
OIDC() oidc.DB
// Orders returns database for orders
Orders() orders.DB
// NewContainment is temporary and will be renamed to Containment later in the commit chain.
// Containment returns database for containment
Containment() audit.Containment
// NewContainment is temporary and will replace Containment later in the commit chain.
NewContainment() audit.NewContainment
// Buckets returns the database to interact with buckets
Buckets() buckets.DB

View File

@ -85,7 +85,6 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
overlayCache overlay.DB,
nodeEvents nodeevents.DB,
reputationdb reputation.DB,
containmentDB audit.Containment,
newContainmentDB audit.NewContainment,
rollupsWriteCache *orders.RollupsWriteCache,
versionInfo version.Info, config *Config, atomicLogLevel *zap.AtomicLevel,
@ -220,7 +219,6 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
log.Named("reporter"),
peer.Reputation,
peer.Overlay,
containmentDB,
newContainmentDB,
config.Audit.MaxRetriesStatDB,
int32(config.Audit.MaxReverifyCount))

View File

@ -4,147 +4,19 @@
package satellitedb
import (
"bytes"
"context"
"database/sql"
"errors"
"go.uber.org/zap"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/satellitedb/dbx"
)
type containment struct {
db *satelliteDB
}
var _ audit.Containment = &containment{}
// newContainment will be renamed to containment.
type newContainment struct {
reverifyQueue audit.ReverifyQueue
}
var _ audit.NewContainment = &newContainment{}
// Get gets the pending audit by node id.
func (containment *containment) Get(ctx context.Context, id pb.NodeID) (_ *audit.PendingAudit, err error) {
defer mon.Task()(&ctx)(&err)
if id.IsZero() {
return nil, audit.ContainError.New("node ID empty")
}
pending, err := containment.db.Get_SegmentPendingAudits_By_NodeId(ctx, dbx.SegmentPendingAudits_NodeId(id.Bytes()))
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, audit.ErrContainedNotFound.New("%v", id)
}
return nil, audit.ContainError.Wrap(err)
}
return convertDBPending(ctx, pending)
}
// IncrementPending creates a new pending audit entry, or increases its reverify count if it already exists.
func (containment *containment) IncrementPending(ctx context.Context, pendingAudit *audit.PendingAudit) (err error) {
defer mon.Task()(&ctx)(&err)
err = containment.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
existingAudit, err := tx.Get_SegmentPendingAudits_By_NodeId(ctx, dbx.SegmentPendingAudits_NodeId(pendingAudit.NodeID.Bytes()))
switch {
case errors.Is(err, sql.ErrNoRows):
statement := containment.db.Rebind(
`INSERT INTO segment_pending_audits (
node_id, piece_id, stripe_index, share_size, expected_share_hash, reverify_count, stream_id, position
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
)
_, err = tx.Tx.ExecContext(ctx, statement,
pendingAudit.NodeID.Bytes(), pendingAudit.PieceID.Bytes(), pendingAudit.StripeIndex,
pendingAudit.ShareSize, pendingAudit.ExpectedShareHash, pendingAudit.ReverifyCount,
pendingAudit.StreamID, pendingAudit.Position.Encode())
if err != nil {
return err
}
case err == nil:
if !bytes.Equal(existingAudit.ExpectedShareHash, pendingAudit.ExpectedShareHash) {
containment.db.log.Info("pending audit already exists",
zap.String("node id", pendingAudit.NodeID.String()),
zap.String("segment streamid", pendingAudit.StreamID.String()),
zap.Uint64("segment position", pendingAudit.Position.Encode()),
)
return nil
}
statement := tx.Rebind(
`UPDATE segment_pending_audits SET reverify_count = segment_pending_audits.reverify_count + 1
WHERE segment_pending_audits.node_id=?`,
)
_, err = tx.Tx.ExecContext(ctx, statement, pendingAudit.NodeID.Bytes())
if err != nil {
return err
}
default:
return err
}
return nil
})
return audit.ContainError.Wrap(err)
}
// Delete deletes the pending audit.
func (containment *containment) Delete(ctx context.Context, id pb.NodeID) (isDeleted bool, err error) {
defer mon.Task()(&ctx)(&err)
if id.IsZero() {
return false, audit.ContainError.New("node ID empty")
}
err = containment.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) (err error) {
isDeleted, err = tx.Delete_SegmentPendingAudits_By_NodeId(ctx, dbx.SegmentPendingAudits_NodeId(id.Bytes()))
return err
})
return isDeleted, audit.ContainError.Wrap(err)
}
func convertDBPending(ctx context.Context, info *dbx.SegmentPendingAudits) (_ *audit.PendingAudit, err error) {
defer mon.Task()(&ctx)(&err)
if info == nil {
return nil, Error.New("missing info")
}
nodeID, err := storj.NodeIDFromBytes(info.NodeId)
if err != nil {
return nil, audit.ContainError.Wrap(err)
}
pieceID, err := storj.PieceIDFromBytes(info.PieceId)
if err != nil {
return nil, audit.ContainError.Wrap(err)
}
streamID, err := uuid.FromBytes(info.StreamId)
if err != nil {
return nil, audit.ContainError.Wrap(err)
}
position := metabase.SegmentPositionFromEncoded(info.Position)
pending := &audit.PendingAudit{
NodeID: nodeID,
PieceID: pieceID,
StripeIndex: int32(info.StripeIndex),
ShareSize: int32(info.ShareSize),
ExpectedShareHash: info.ExpectedShareHash,
ReverifyCount: int32(info.ReverifyCount),
StreamID: streamID,
Position: position,
}
return pending, nil
}
// Get gets a pending reverification audit by node id. If there are
// multiple pending reverification audits, an arbitrary one is returned.
// If there are none, an error wrapped by audit.ErrContainedNotFound is

View File

@ -276,12 +276,7 @@ func (dbc *satelliteDBCollection) Orders() orders.DB {
return &ordersDB{db: db}
}
// Containment returns database for storing pending audit info.
func (dbc *satelliteDBCollection) Containment() audit.Containment {
return &containment{db: dbc.getByName("containment")}
}
// NewContainment is temporary and will replace Containment later in the commit chain.
// NewContainment will be renamed to Containment later in the commit chain.
func (dbc *satelliteDBCollection) NewContainment() audit.NewContainment {
return &newContainment{reverifyQueue: dbc.ReverifyQueue()}
}

View File

@ -94,12 +94,6 @@ model segment_pending_audits (
field reverify_count int64 ( updatable )
)
delete segment_pending_audits ( where segment_pending_audits.node_id = ? )
read one (
select segment_pending_audits
where segment_pending_audits.node_id = ?
)
//--- auditing ---//
model verification_audits (

View File

@ -14363,28 +14363,6 @@ func (obj *pgxImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context
}
func (obj *pgxImpl) Get_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
segment_pending_audits *SegmentPendingAudits, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT segment_pending_audits.node_id, segment_pending_audits.stream_id, segment_pending_audits.position, segment_pending_audits.piece_id, segment_pending_audits.stripe_index, segment_pending_audits.share_size, segment_pending_audits.expected_share_hash, segment_pending_audits.reverify_count FROM segment_pending_audits WHERE segment_pending_audits.node_id = ?")
var __values []interface{}
__values = append(__values, segment_pending_audits_node_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
segment_pending_audits = &SegmentPendingAudits{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&segment_pending_audits.NodeId, &segment_pending_audits.StreamId, &segment_pending_audits.Position, &segment_pending_audits.PieceId, &segment_pending_audits.StripeIndex, &segment_pending_audits.ShareSize, &segment_pending_audits.ExpectedShareHash, &segment_pending_audits.ReverifyCount)
if err != nil {
return (*SegmentPendingAudits)(nil), obj.makeErr(err)
}
return segment_pending_audits, nil
}
func (obj *pgxImpl) First_ReverificationAudits_By_NodeId_OrderBy_Asc_StreamId_Asc_Position(ctx context.Context,
reverification_audits_node_id ReverificationAudits_NodeId_Field) (
reverification_audits *ReverificationAudits, err error) {
@ -19798,33 +19776,6 @@ func (obj *pgxImpl) Delete_NodeEvent_By_CreatedAt_Less(ctx context.Context,
}
func (obj *pgxImpl) Delete_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
deleted bool, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("DELETE FROM segment_pending_audits WHERE segment_pending_audits.node_id = ?")
var __values []interface{}
__values = append(__values, segment_pending_audits_node_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
if err != nil {
return false, obj.makeErr(err)
}
__count, err := __res.RowsAffected()
if err != nil {
return false, obj.makeErr(err)
}
return __count > 0, nil
}
func (obj *pgxImpl) Delete_ReverificationAudits_By_NodeId_And_StreamId_And_Position(ctx context.Context,
reverification_audits_node_id ReverificationAudits_NodeId_Field,
reverification_audits_stream_id ReverificationAudits_StreamId_Field,
@ -22363,28 +22314,6 @@ func (obj *pgxcockroachImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ct
}
func (obj *pgxcockroachImpl) Get_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
segment_pending_audits *SegmentPendingAudits, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT segment_pending_audits.node_id, segment_pending_audits.stream_id, segment_pending_audits.position, segment_pending_audits.piece_id, segment_pending_audits.stripe_index, segment_pending_audits.share_size, segment_pending_audits.expected_share_hash, segment_pending_audits.reverify_count FROM segment_pending_audits WHERE segment_pending_audits.node_id = ?")
var __values []interface{}
__values = append(__values, segment_pending_audits_node_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
segment_pending_audits = &SegmentPendingAudits{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&segment_pending_audits.NodeId, &segment_pending_audits.StreamId, &segment_pending_audits.Position, &segment_pending_audits.PieceId, &segment_pending_audits.StripeIndex, &segment_pending_audits.ShareSize, &segment_pending_audits.ExpectedShareHash, &segment_pending_audits.ReverifyCount)
if err != nil {
return (*SegmentPendingAudits)(nil), obj.makeErr(err)
}
return segment_pending_audits, nil
}
func (obj *pgxcockroachImpl) First_ReverificationAudits_By_NodeId_OrderBy_Asc_StreamId_Asc_Position(ctx context.Context,
reverification_audits_node_id ReverificationAudits_NodeId_Field) (
reverification_audits *ReverificationAudits, err error) {
@ -27798,33 +27727,6 @@ func (obj *pgxcockroachImpl) Delete_NodeEvent_By_CreatedAt_Less(ctx context.Cont
}
func (obj *pgxcockroachImpl) Delete_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
deleted bool, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("DELETE FROM segment_pending_audits WHERE segment_pending_audits.node_id = ?")
var __values []interface{}
__values = append(__values, segment_pending_audits_node_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
if err != nil {
return false, obj.makeErr(err)
}
__count, err := __res.RowsAffected()
if err != nil {
return false, obj.makeErr(err)
}
return __count > 0, nil
}
func (obj *pgxcockroachImpl) Delete_ReverificationAudits_By_NodeId_And_StreamId_And_Position(ctx context.Context,
reverification_audits_node_id ReverificationAudits_NodeId_Field,
reverification_audits_stream_id ReverificationAudits_StreamId_Field,
@ -29767,16 +29669,6 @@ func (rx *Rx) Delete_ReverificationAudits_By_NodeId_And_StreamId_And_Position(ct
return tx.Delete_ReverificationAudits_By_NodeId_And_StreamId_And_Position(ctx, reverification_audits_node_id, reverification_audits_stream_id, reverification_audits_position)
}
func (rx *Rx) Delete_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
deleted bool, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Delete_SegmentPendingAudits_By_NodeId(ctx, segment_pending_audits_node_id)
}
func (rx *Rx) Delete_StorjscanPayment_By_Status(ctx context.Context,
storjscan_payment_status StorjscanPayment_Status_Field) (
count int64, err error) {
@ -30241,16 +30133,6 @@ func (rx *Rx) Get_ResetPasswordToken_By_Secret(ctx context.Context,
return tx.Get_ResetPasswordToken_By_Secret(ctx, reset_password_token_secret)
}
func (rx *Rx) Get_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
segment_pending_audits *SegmentPendingAudits, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Get_SegmentPendingAudits_By_NodeId(ctx, segment_pending_audits_node_id)
}
func (rx *Rx) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
storagenode_paystub_period StoragenodePaystub_Period_Field) (
@ -31356,10 +31238,6 @@ type Methods interface {
reverification_audits_position ReverificationAudits_Position_Field) (
deleted bool, err error)
Delete_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
deleted bool, err error)
Delete_StorjscanPayment_By_Status(ctx context.Context,
storjscan_payment_status StorjscanPayment_Status_Field) (
count int64, err error)
@ -31552,10 +31430,6 @@ type Methods interface {
reset_password_token_secret ResetPasswordToken_Secret_Field) (
reset_password_token *ResetPasswordToken, err error)
Get_SegmentPendingAudits_By_NodeId(ctx context.Context,
segment_pending_audits_node_id SegmentPendingAudits_NodeId_Field) (
segment_pending_audits *SegmentPendingAudits, err error)
Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
storagenode_paystub_period StoragenodePaystub_Period_Field) (