2022-12-09 17:04:05 +00:00
|
|
|
// Copyright (C) 2022 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/spf13/cobra"
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
|
|
|
"storj.io/common/uuid"
|
|
|
|
"storj.io/private/process"
|
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
"storj.io/storj/satellite/overlay"
|
|
|
|
"storj.io/storj/satellite/satellitedb"
|
|
|
|
)
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
func verifySegmentsNodeCheck(cmd *cobra.Command, args []string) error {
|
2022-12-09 17:04:05 +00:00
|
|
|
ctx, _ := process.Ctx(cmd)
|
|
|
|
log := zap.L()
|
|
|
|
|
|
|
|
// open default satellite database
|
|
|
|
db, err := satellitedb.Open(ctx, log.Named("db"), satelliteCfg.Database, satellitedb.Options{
|
|
|
|
ApplicationName: "segment-verify",
|
|
|
|
SaveRollupBatchSize: satelliteCfg.Tally.SaveRollupBatchSize,
|
|
|
|
ReadRollupBatchSize: satelliteCfg.Tally.ReadRollupBatchSize,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errs.New("Error starting master database on satellite: %+v", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, db.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
// open metabase
|
|
|
|
metabaseDB, err := metabase.Open(ctx, log.Named("metabase"), satelliteCfg.Metainfo.DatabaseURL,
|
|
|
|
satelliteCfg.Config.Metainfo.Metabase("satellite-core"))
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() { _ = metabaseDB.Close() }()
|
|
|
|
|
|
|
|
// check whether satellite and metabase versions match
|
|
|
|
versionErr := db.CheckVersion(ctx)
|
|
|
|
if versionErr != nil {
|
|
|
|
log.Error("versions skewed", zap.Error(versionErr))
|
|
|
|
return Error.Wrap(versionErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
versionErr = metabaseDB.CheckVersion(ctx)
|
|
|
|
if versionErr != nil {
|
|
|
|
log.Error("versions skewed", zap.Error(versionErr))
|
|
|
|
return Error.Wrap(versionErr)
|
|
|
|
}
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
service, err := NewNodeCheckService(log, metabaseDB, db.OverlayCache(), nodeCheckCfg)
|
2022-12-09 17:04:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, service.Close()) }()
|
|
|
|
|
|
|
|
return service.ProcessAll(ctx)
|
|
|
|
}
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
// NodeCheckConfig defines configuration for verifying segment existence.
|
|
|
|
type NodeCheckConfig struct {
|
|
|
|
BatchSize int `help:"number of segments to process per batch" default:"10000"`
|
|
|
|
DuplicatesLimit int `help:"maximum duplicates allowed" default:"3"`
|
|
|
|
UnvettedLimit int `help:"maximum unvetted allowed" default:"9"`
|
|
|
|
IncludeAllNodes bool `help:"include disqualified and exited nodes in the node check" default:"false"`
|
2022-12-09 17:04:05 +00:00
|
|
|
|
|
|
|
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
|
|
|
|
}
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
|
|
|
|
type NodeCheckOverlayDB interface {
|
2022-12-09 17:04:05 +00:00
|
|
|
IterateAllContactedNodes(context.Context, func(context.Context, *overlay.SelectedNode) error) error
|
2022-12-13 22:14:39 +00:00
|
|
|
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
|
2022-12-09 17:04:05 +00:00
|
|
|
}
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
// NodeCheckService implements a service for checking duplicate nets being used in a segment.
|
|
|
|
type NodeCheckService struct {
|
2022-12-09 17:04:05 +00:00
|
|
|
log *zap.Logger
|
2022-12-13 22:14:39 +00:00
|
|
|
config NodeCheckConfig
|
2022-12-09 17:04:05 +00:00
|
|
|
|
|
|
|
metabase Metabase
|
2022-12-13 22:14:39 +00:00
|
|
|
overlay NodeCheckOverlayDB
|
2022-12-09 17:04:05 +00:00
|
|
|
|
|
|
|
// lookup tables for nodes
|
|
|
|
aliasMap *metabase.NodeAliasMap
|
|
|
|
|
|
|
|
// alias table for lastNet lookups
|
2022-12-13 22:14:39 +00:00
|
|
|
netAlias map[string]netAlias
|
|
|
|
nodeInfoByNetAlias map[metabase.NodeAlias]nodeInfo
|
2022-12-09 17:04:05 +00:00
|
|
|
|
|
|
|
// table for converting a node alias to a net alias
|
|
|
|
nodeAliasToNetAlias []netAlias
|
|
|
|
|
|
|
|
scratch bitset
|
|
|
|
}
|
|
|
|
|
|
|
|
// netAlias represents a unique ID for a given subnet.
|
|
|
|
type netAlias int
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
type nodeInfo struct {
|
|
|
|
vetted bool
|
|
|
|
disqualified bool
|
|
|
|
exited bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewNodeCheckService returns a new service for verifying segments.
|
|
|
|
func NewNodeCheckService(log *zap.Logger, metabaseDB Metabase, overlay NodeCheckOverlayDB, config NodeCheckConfig) (*NodeCheckService, error) {
|
|
|
|
return &NodeCheckService{
|
2022-12-09 17:04:05 +00:00
|
|
|
log: log,
|
|
|
|
config: config,
|
|
|
|
|
|
|
|
metabase: metabaseDB,
|
|
|
|
overlay: overlay,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the service.
|
2022-12-13 22:14:39 +00:00
|
|
|
func (service *NodeCheckService) Close() (err error) {
|
2022-12-09 17:04:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// init sets up tables for quick verification.
|
2022-12-13 22:14:39 +00:00
|
|
|
func (service *NodeCheckService) init(ctx context.Context) (err error) {
|
2022-12-09 17:04:05 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
service.aliasMap, err = service.metabase.LatestNodesAliasMap(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
service.netAlias = make(map[string]netAlias)
|
2022-12-13 22:14:39 +00:00
|
|
|
service.nodeInfoByNetAlias = make(map[metabase.NodeAlias]nodeInfo)
|
2022-12-09 17:04:05 +00:00
|
|
|
service.nodeAliasToNetAlias = make([]netAlias, service.aliasMap.Max()+1)
|
|
|
|
|
2022-12-13 22:14:39 +00:00
|
|
|
err = service.overlay.IterateAllNodeDossiers(ctx, func(ctx context.Context, node *overlay.NodeDossier) error {
|
|
|
|
nodeAlias, ok := service.aliasMap.Alias(node.Id)
|
2022-12-09 17:04:05 +00:00
|
|
|
if !ok {
|
|
|
|
// some nodes aren't in the metabase
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// assign unique ID-s for all nets
|
|
|
|
net := node.LastNet
|
|
|
|
alias, ok := service.netAlias[net]
|
|
|
|
if !ok {
|
2022-12-13 22:14:39 +00:00
|
|
|
alias = netAlias(len(service.netAlias))
|
2022-12-09 17:04:05 +00:00
|
|
|
service.netAlias[net] = alias
|
|
|
|
}
|
2022-12-13 22:14:39 +00:00
|
|
|
nodeInfo := nodeInfo{
|
|
|
|
vetted: node.Reputation.Status.VettedAt != nil,
|
|
|
|
disqualified: node.Reputation.Status.Disqualified != nil,
|
|
|
|
exited: node.ExitStatus.ExitFinishedAt != nil,
|
|
|
|
}
|
|
|
|
service.nodeInfoByNetAlias[nodeAlias] = nodeInfo
|
2022-12-09 17:04:05 +00:00
|
|
|
service.nodeAliasToNetAlias[nodeAlias] = alias
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
service.scratch = newBitSet(len(service.netAlias))
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessAll processes all segments with the specified batchSize.
|
2022-12-13 22:14:39 +00:00
|
|
|
func (service *NodeCheckService) ProcessAll(ctx context.Context) (err error) {
|
2022-12-09 17:04:05 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := service.init(ctx); err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursorStreamID uuid.UUID
|
|
|
|
var cursorPosition metabase.SegmentPosition
|
|
|
|
|
|
|
|
var progress int64
|
|
|
|
for {
|
|
|
|
result, err := service.metabase.ListVerifySegments(ctx, metabase.ListVerifySegments{
|
|
|
|
CursorStreamID: cursorStreamID,
|
|
|
|
CursorPosition: cursorPosition,
|
|
|
|
Limit: service.config.BatchSize,
|
|
|
|
|
|
|
|
AsOfSystemInterval: service.config.AsOfSystemInterval,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
segments := result.Segments
|
|
|
|
|
|
|
|
// All done?
|
|
|
|
if len(segments) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
last := &segments[len(segments)-1]
|
|
|
|
cursorStreamID, cursorPosition = last.StreamID, last.Position
|
|
|
|
|
|
|
|
service.log.Info("processing segments",
|
|
|
|
zap.Int64("progress", progress),
|
|
|
|
zap.Int("count", len(segments)),
|
|
|
|
zap.Stringer("first", segments[0].StreamID),
|
|
|
|
zap.Stringer("last", segments[len(segments)-1].StreamID),
|
|
|
|
)
|
|
|
|
progress += int64(len(segments))
|
|
|
|
|
|
|
|
// Process the data.
|
|
|
|
for _, segment := range segments {
|
|
|
|
if err := service.Verify(ctx, segment); err != nil {
|
|
|
|
service.log.Warn("found",
|
|
|
|
zap.Stringer("stream-id", segment.StreamID),
|
|
|
|
zap.Uint64("position", segment.Position.Encode()),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify verifies a single segment.
|
2022-12-13 22:14:39 +00:00
|
|
|
func (service *NodeCheckService) Verify(ctx context.Context, segment metabase.VerifySegment) (err error) {
|
2022-12-09 17:04:05 +00:00
|
|
|
// intentionally no monitoring for performance
|
|
|
|
scratch := service.scratch
|
|
|
|
scratch.Clear()
|
|
|
|
|
|
|
|
count := 0
|
2022-12-13 22:14:39 +00:00
|
|
|
unvetted := 0
|
2022-12-09 17:04:05 +00:00
|
|
|
for _, alias := range segment.AliasPieces {
|
|
|
|
if alias.Alias >= metabase.NodeAlias(len(service.nodeAliasToNetAlias)) {
|
|
|
|
continue
|
|
|
|
}
|
2022-12-13 22:14:39 +00:00
|
|
|
|
|
|
|
nodeInfo := service.nodeInfoByNetAlias[alias.Alias]
|
|
|
|
if !service.config.IncludeAllNodes &&
|
|
|
|
(nodeInfo.disqualified || nodeInfo.exited) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !nodeInfo.vetted {
|
|
|
|
unvetted++
|
|
|
|
}
|
|
|
|
|
2022-12-09 17:04:05 +00:00
|
|
|
netAlias := service.nodeAliasToNetAlias[alias.Alias]
|
|
|
|
if scratch.Include(int(netAlias)) {
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
2022-12-13 22:14:39 +00:00
|
|
|
if count > service.config.DuplicatesLimit || unvetted > service.config.UnvettedLimit {
|
|
|
|
fmt.Printf("%s\t%d\t%d\t%d\t%v\t%v\n", segment.StreamID, segment.Position.Encode(), count, unvetted, segment.CreatedAt, segment.RepairedAt)
|
2022-12-09 17:04:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type bitset []uint32
|
|
|
|
|
|
|
|
func newBitSet(size int) bitset {
|
|
|
|
return bitset(make([]uint32, (size+31)/32))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (set bitset) offset(index int) (bucket, bitmask uint32) {
|
|
|
|
return uint32(index / 32), uint32(1 << (index % 32))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (set bitset) Include(index int) bool {
|
|
|
|
bucket, bitmask := set.offset(index)
|
|
|
|
had := set[bucket]&bitmask != 0
|
|
|
|
set[bucket] |= bitmask
|
|
|
|
return had
|
|
|
|
}
|
|
|
|
|
|
|
|
func (set bitset) Clear() {
|
|
|
|
for i := range set {
|
|
|
|
set[i] = 0
|
|
|
|
}
|
|
|
|
}
|