satellite/gc/bloomfilter: use int64 to count pieces

Pieces count in DB are stored as int64 and we would like to align bloom
filter processing with this type.

Change-Id: Iaec767e609a40d802077ae057520541805a7c44f
This commit is contained in:
Michal Niewrzal 2022-09-13 10:37:54 +02:00 committed by Storj Robot
parent c210776a36
commit a22e6bdf67
11 changed files with 25 additions and 25 deletions

2
go.mod
View File

@ -51,7 +51,7 @@ require (
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
gopkg.in/segmentio/analytics-go.v3 v3.1.0
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
storj.io/common v0.0.0-20220902050723-654fd98b53c6
storj.io/common v0.0.0-20220912074536-0fff01212055
storj.io/drpc v0.0.32
storj.io/monkit-jaeger v0.0.0-20220726162929-c3a9898b5bca
storj.io/private v0.0.0-20220915132359-957cab776577

4
go.sum
View File

@ -952,8 +952,8 @@ sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3
storj.io/common v0.0.0-20220708152916-e2f08365ed65/go.mod h1:PdwPrX+QWAm4vgVyka5U13vA0jKk49MpV4tzW4HTaz0=
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20220802175255-aae0c09ec9d4/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220902050723-654fd98b53c6 h1:qjerTUC3dl6XzF44LkOEGKFpyz7oDkbtyr/tZUa+CFU=
storj.io/common v0.0.0-20220902050723-654fd98b53c6/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220912074536-0fff01212055 h1:snQ1bxkJCCBnWJ37ngBDxPEoKwRGfhkZ9fxSbfFA5YI=
storj.io/common v0.0.0-20220912074536-0fff01212055/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/drpc v0.0.32 h1:5p5ZwsK/VOgapaCu+oxaPVwO6UwIs+iwdMiD50+R4PI=
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/monkit-jaeger v0.0.0-20220726162929-c3a9898b5bca h1:+dSyCu45DLiNoWl/LNZ/kO0MLvSGQwjiM8zwdknwJbg=

View File

@ -31,7 +31,7 @@ type PieceTracker struct {
log *zap.Logger
config Config
// TODO: should we use int or int64 consistently for piece count (db type is int64)?
pieceCounts map[storj.NodeID]int
pieceCounts map[storj.NodeID]int64
startTime time.Time
RetainInfos map[storj.NodeID]*RetainInfo
@ -42,7 +42,7 @@ type PieceTracker struct {
}
// NewPieceTracker instantiates a new gc piece tracker to be subscribed to the segments loop.
func NewPieceTracker(log *zap.Logger, config Config, pieceCounts map[storj.NodeID]int) *PieceTracker {
func NewPieceTracker(log *zap.Logger, config Config, pieceCounts map[storj.NodeID]int64) *PieceTracker {
return &PieceTracker{
log: log,
config: config,

View File

@ -31,7 +31,7 @@ type Config struct {
Enabled bool `help:"set if garbage collection bloom filters is enabled or not" default:"true" testDefault:"false"`
// value for InitialPieces currently based on average pieces per node
InitialPieces int `help:"the initial number of pieces expected for a storage node to have, used for creating a filter" releaseDefault:"400000" devDefault:"10"`
InitialPieces int64 `help:"the initial number of pieces expected for a storage node to have, used for creating a filter" releaseDefault:"400000" devDefault:"10"`
FalsePositiveRate float64 `help:"the false positive rate used for creating a garbage collection bloom filter" releaseDefault:"0.1" devDefault:"0.1"`
AccessGrant string `help:"Access Grant which will be used to upload bloom filters to the bucket" default:""`
@ -94,7 +94,7 @@ func (service *Service) RunOnce(ctx context.Context) (err error) {
err = nil
}
if lastPieceCounts == nil {
lastPieceCounts = make(map[storj.NodeID]int)
lastPieceCounts = make(map[storj.NodeID]int64)
}
pieceTracker := NewPieceTracker(service.log.Named("gc observer"), service.config, lastPieceCounts)

View File

@ -26,13 +26,13 @@ func TestDB_PieceCounts(t *testing.T) {
type TestNode struct {
ID storj.NodeID
PieceCount int // TODO: fix to int64
PieceCount int64
}
nodes := make([]TestNode, 100)
for i := range nodes {
nodes[i].ID = testrand.NodeID()
nodes[i].PieceCount = int(math.Pow10(i + 1))
nodes[i].PieceCount = int64(math.Pow10(i + 1))
}
for i, node := range nodes {
@ -57,7 +57,7 @@ func TestDB_PieceCounts(t *testing.T) {
// since it will keep the logic slightly clearer.
// update counts
counts := make(map[storj.NodeID]int)
counts := make(map[storj.NodeID]int64)
for _, node := range nodes {
counts[node.ID] = node.PieceCount
}
@ -89,9 +89,9 @@ func BenchmarkDB_PieceCounts(b *testing.B) {
overlaydb := db.OverlayCache()
counts := make(map[storj.NodeID]int)
counts := make(map[storj.NodeID]int64)
for i := 0; i < NumberOfNodes; i++ {
counts[testrand.NodeID()] = testrand.Intn(100000)
counts[testrand.NodeID()] = testrand.Int63n(100000)
}
var i int

View File

@ -75,9 +75,9 @@ type DB interface {
UpdateCheckIn(ctx context.Context, node NodeCheckInInfo, timestamp time.Time, config NodeSelectionConfig) (err error)
// AllPieceCounts returns a map of node IDs to piece counts from the db.
AllPieceCounts(ctx context.Context) (pieceCounts map[storj.NodeID]int, err error)
AllPieceCounts(ctx context.Context) (pieceCounts map[storj.NodeID]int64, err error)
// UpdatePieceCounts sets the piece count field for the given node IDs.
UpdatePieceCounts(ctx context.Context, pieceCounts map[storj.NodeID]int) (err error)
UpdatePieceCounts(ctx context.Context, pieceCounts map[storj.NodeID]int64) (err error)
// UpdateExitStatus is used to update a node's graceful exit status.
UpdateExitStatus(ctx context.Context, request *ExitStatusRequest) (_ *NodeDossier, err error)

View File

@ -751,7 +751,7 @@ func (cache *overlaycache) TestUnsuspendNodeUnknownAudit(ctx context.Context, no
// AllPieceCounts returns a map of node IDs to piece counts from the db.
// NB: a valid, partial piece map can be returned even if node ID parsing error(s) are returned.
func (cache *overlaycache) AllPieceCounts(ctx context.Context) (_ map[storj.NodeID]int, err error) {
func (cache *overlaycache) AllPieceCounts(ctx context.Context) (_ map[storj.NodeID]int64, err error) {
defer mon.Task()(&ctx)(&err)
// NB: `All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number` selects node
@ -761,7 +761,7 @@ func (cache *overlaycache) AllPieceCounts(ctx context.Context) (_ map[storj.Node
return nil, Error.Wrap(err)
}
pieceCounts := make(map[storj.NodeID]int)
pieceCounts := make(map[storj.NodeID]int64)
nodeIDErrs := errs.Group{}
for _, row := range rows {
nodeID, err := storj.NodeIDFromBytes(row.Id)
@ -769,13 +769,13 @@ func (cache *overlaycache) AllPieceCounts(ctx context.Context) (_ map[storj.Node
nodeIDErrs.Add(err)
continue
}
pieceCounts[nodeID] = int(row.PieceCount)
pieceCounts[nodeID] = row.PieceCount
}
return pieceCounts, nodeIDErrs.Err()
}
func (cache *overlaycache) UpdatePieceCounts(ctx context.Context, pieceCounts map[storj.NodeID]int) (err error) {
func (cache *overlaycache) UpdatePieceCounts(ctx context.Context, pieceCounts map[storj.NodeID]int64) (err error) {
defer mon.Task()(&ctx)(&err)
if len(pieceCounts) == 0 {
return nil
@ -791,7 +791,7 @@ func (cache *overlaycache) UpdatePieceCounts(ctx context.Context, pieceCounts ma
for nodeid, count := range pieceCounts {
counts = append(counts, NodeCount{
ID: nodeid,
Count: int64(count),
Count: count,
})
}
sort.Slice(counts, func(i, k int) bool {

View File

@ -10,7 +10,7 @@ require (
github.com/zeebo/errs v1.3.0
go.uber.org/zap v1.21.0
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
storj.io/common v0.0.0-20220902050723-654fd98b53c6
storj.io/common v0.0.0-20220912074536-0fff01212055
storj.io/private v0.0.0-20220915132359-957cab776577
storj.io/storj v1.63.1
storj.io/storjscan v0.0.0-20220909003402-28d145ea1272

View File

@ -1257,8 +1257,8 @@ storj.io/common v0.0.0-20220708152916-e2f08365ed65/go.mod h1:PdwPrX+QWAm4vgVyka5
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20220802175255-aae0c09ec9d4/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220829171748-14b0a3c9565e/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220902050723-654fd98b53c6 h1:qjerTUC3dl6XzF44LkOEGKFpyz7oDkbtyr/tZUa+CFU=
storj.io/common v0.0.0-20220902050723-654fd98b53c6/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220912074536-0fff01212055 h1:snQ1bxkJCCBnWJ37ngBDxPEoKwRGfhkZ9fxSbfFA5YI=
storj.io/common v0.0.0-20220912074536-0fff01212055/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/drpc v0.0.32 h1:5p5ZwsK/VOgapaCu+oxaPVwO6UwIs+iwdMiD50+R4PI=
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/monkit-jaeger v0.0.0-20220726162929-c3a9898b5bca h1:+dSyCu45DLiNoWl/LNZ/kO0MLvSGQwjiM8zwdknwJbg=

View File

@ -10,7 +10,7 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
go.uber.org/zap v1.17.0
storj.io/common v0.0.0-20220902050723-654fd98b53c6
storj.io/common v0.0.0-20220912074536-0fff01212055
storj.io/gateway-mt v1.18.1-0.20211210081136-cada9a567d31
storj.io/private v0.0.0-20220915132359-957cab776577
storj.io/storj v0.12.1-0.20220705102727-0f626a59c103

View File

@ -1495,8 +1495,8 @@ storj.io/common v0.0.0-20211102144601-401a79f0706a/go.mod h1:a2Kw7Uipu929OFANfWK
storj.io/common v0.0.0-20220708152916-e2f08365ed65/go.mod h1:PdwPrX+QWAm4vgVyka5U13vA0jKk49MpV4tzW4HTaz0=
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20220802175255-aae0c09ec9d4/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220902050723-654fd98b53c6 h1:qjerTUC3dl6XzF44LkOEGKFpyz7oDkbtyr/tZUa+CFU=
storj.io/common v0.0.0-20220902050723-654fd98b53c6/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/common v0.0.0-20220912074536-0fff01212055 h1:snQ1bxkJCCBnWJ37ngBDxPEoKwRGfhkZ9fxSbfFA5YI=
storj.io/common v0.0.0-20220912074536-0fff01212055/go.mod h1:+gF7jbVvpjVIVHhK+EJFhfPbudX395lnPq/dKkj/Qys=
storj.io/dotworld v0.0.0-20210324183515-0d11aeccd840/go.mod h1:KU9YvEgRrMMiWLvH8pzn1UkoCoxggKIPvQxmNdx7aXQ=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.24/go.mod h1:ofQUDPQbbIymRDKE0tms48k8bLP5Y+dsI9CbXGv3gko=