2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-11-08 16:18:28 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-11-14 01:22:18 +00:00
|
|
|
package tally
|
2018-11-08 16:18:28 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2019-02-01 18:50:12 +00:00
|
|
|
"github.com/zeebo/errs"
|
2018-11-08 16:18:28 +00:00
|
|
|
"go.uber.org/zap"
|
2018-12-07 09:59:31 +00:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/sync2"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/accounting"
|
2022-10-05 11:53:02 +01:00
|
|
|
"storj.io/storj/satellite/buckets"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2018-11-08 16:18:28 +00:00
|
|
|
)
|
|
|
|
|
2019-10-04 20:09:52 +01:00
|
|
|
// Error is a standard error class for this package.
|
|
|
|
var (
|
2021-04-28 09:06:17 +01:00
|
|
|
Error = errs.Class("tally")
|
2019-10-04 20:09:52 +01:00
|
|
|
mon = monkit.Package()
|
|
|
|
)
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Config contains configurable values for the tally service.
|
2019-01-23 19:58:44 +00:00
|
|
|
type Config struct {
|
testplanet/satellite: reduce the number of places default values need to be configured
Satellites set their configuration values to default values using
cfgstruct, however, it turns out our tests don't test these values
at all! Instead, they have a completely separate definition system
that is easy to forget about.
As is to be expected, these values have drifted, and it appears
in a few cases test planet is testing unreasonable values that we
won't see in production, or perhaps worse, features enabled in
production were missed and weren't enabled in testplanet.
This change makes it so all values are configured the same,
systematic way, so it's easy to see when test values are different
than dev values or release values, and it's less hard to forget
to enable features in testplanet.
In terms of reviewing, this change should be actually fairly
easy to review, considering private/testplanet/satellite.go keeps
the current config system and the new one and confirms that they
result in identical configurations, so you can be certain that
nothing was missed and the config is all correct.
You can also check the config lock to see what actual config
values changed.
Change-Id: I6715d0794887f577e21742afcf56fd2b9d12170e
2021-05-31 22:15:00 +01:00
|
|
|
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s" testDefault:"$TESTINTERVAL"`
|
2020-11-28 20:54:52 +00:00
|
|
|
SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"`
|
2020-11-29 16:13:06 +00:00
|
|
|
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
|
2022-11-17 13:06:17 +00:00
|
|
|
UseObjectsLoop bool `help:"flag to switch between calculating bucket tallies using objects loop or custom query" default:"false"`
|
2021-07-01 12:29:25 +01:00
|
|
|
|
|
|
|
ListLimit int `help:"how many objects to query in a batch" default:"2500"`
|
|
|
|
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-12-05 16:01:42 +00:00
|
|
|
// Service is the tally service for data stored on each storage node.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Chore
|
2019-04-04 16:20:59 +01:00
|
|
|
type Service struct {
|
2021-07-01 12:29:25 +01:00
|
|
|
log *zap.Logger
|
|
|
|
config Config
|
|
|
|
Loop *sync2.Cycle
|
2019-10-07 21:55:20 +01:00
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
metabase *metabase.DB
|
2022-10-05 11:53:02 +01:00
|
|
|
bucketsDB buckets.DB
|
2019-10-16 17:50:29 +01:00
|
|
|
liveAccounting accounting.Cache
|
2019-05-10 20:05:42 +01:00
|
|
|
storagenodeAccountingDB accounting.StoragenodeAccounting
|
|
|
|
projectAccountingDB accounting.ProjectAccounting
|
2020-04-10 18:35:58 +01:00
|
|
|
nowFn func() time.Time
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// New creates a new tally Service.
|
2022-10-05 11:53:02 +01:00
|
|
|
func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.ProjectAccounting, liveAccounting accounting.Cache, metabase *metabase.DB, bucketsDB buckets.DB, config Config) *Service {
|
2019-04-04 16:20:59 +01:00
|
|
|
return &Service{
|
2021-07-01 12:29:25 +01:00
|
|
|
log: log,
|
|
|
|
config: config,
|
|
|
|
Loop: sync2.NewCycle(config.Interval),
|
2019-10-07 21:55:20 +01:00
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
metabase: metabase,
|
2022-10-05 11:53:02 +01:00
|
|
|
bucketsDB: bucketsDB,
|
2019-10-07 21:55:20 +01:00
|
|
|
liveAccounting: liveAccounting,
|
2019-05-10 20:05:42 +01:00
|
|
|
storagenodeAccountingDB: sdb,
|
|
|
|
projectAccountingDB: pdb,
|
2020-04-10 18:35:58 +01:00
|
|
|
nowFn: time.Now,
|
2018-12-05 14:03:23 +00:00
|
|
|
}
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Run the tally service loop.
|
2019-10-07 21:55:20 +01:00
|
|
|
func (service *Service) Run(ctx context.Context) (err error) {
|
2018-11-08 16:18:28 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
return service.Loop.Run(ctx, func(ctx context.Context) error {
|
|
|
|
err := service.Tally(ctx)
|
2019-09-09 17:48:24 +01:00
|
|
|
if err != nil {
|
2019-10-07 21:55:20 +01:00
|
|
|
service.log.Error("tally failed", zap.Error(err))
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
2019-09-09 17:48:24 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close stops the service and releases any resources.
|
2019-10-07 21:55:20 +01:00
|
|
|
func (service *Service) Close() error {
|
|
|
|
service.Loop.Close()
|
2019-09-09 17:48:24 +01:00
|
|
|
return nil
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-10 18:35:58 +01:00
|
|
|
// SetNow allows tests to have the Service act as if the current time is whatever
|
|
|
|
// they want. This avoids races and sleeping, making tests more reliable and efficient.
|
|
|
|
func (service *Service) SetNow(now func() time.Time) {
|
|
|
|
service.nowFn = now
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Tally calculates data-at-rest usage once.
|
2020-11-03 14:04:24 +00:00
|
|
|
//
|
|
|
|
// How live accounting is calculated:
|
|
|
|
//
|
|
|
|
// At the beginning of the tally iteration, we get a map containing the current
|
|
|
|
// project totals from the cache- initialLiveTotals (our current estimation of
|
|
|
|
// the project totals). At the end of the tally iteration, we have the totals
|
|
|
|
// from what we saw during the metainfo loop.
|
|
|
|
//
|
|
|
|
// However, data which was uploaded during the loop may or may not have been
|
|
|
|
// seen in the metainfo loop. For this reason, we also read the live accounting
|
|
|
|
// totals again at the end of the tally iteration- latestLiveTotals.
|
|
|
|
//
|
|
|
|
// The difference between latest and initial indicates how much data was
|
|
|
|
// uploaded during the metainfo loop and is assigned to delta. However, again,
|
|
|
|
// we aren't certain how much of the delta is accounted for in the metainfo
|
|
|
|
// totals. For the reason we make an assumption that 50% of the data is
|
|
|
|
// accounted for. So to calculate the new live accounting totals, we sum the
|
|
|
|
// metainfo totals and 50% of the deltas.
|
2019-10-07 21:55:20 +01:00
|
|
|
func (service *Service) Tally(ctx context.Context) (err error) {
|
2019-06-04 12:36:27 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-10-07 21:55:20 +01:00
|
|
|
|
2020-12-23 11:08:08 +00:00
|
|
|
// No-op unless that there isn't an error getting the
|
|
|
|
// liveAccounting.GetAllProjectTotals
|
2022-05-10 09:22:54 +01:00
|
|
|
updateLiveAccountingTotals := func(_ map[uuid.UUID]accounting.Usage) {}
|
2020-12-23 11:08:08 +00:00
|
|
|
|
2019-10-31 17:27:38 +00:00
|
|
|
initialLiveTotals, err := service.liveAccounting.GetAllProjectTotals(ctx)
|
2019-10-16 17:50:29 +01:00
|
|
|
if err != nil {
|
2020-12-23 11:08:08 +00:00
|
|
|
service.log.Error(
|
|
|
|
"tally won't update the live accounting storage usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
} else {
|
2022-05-10 09:22:54 +01:00
|
|
|
updateLiveAccountingTotals = func(tallyProjectTotals map[uuid.UUID]accounting.Usage) {
|
2020-12-23 11:08:08 +00:00
|
|
|
latestLiveTotals, err := service.liveAccounting.GetAllProjectTotals(ctx)
|
|
|
|
if err != nil {
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting storage usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// empty projects are not returned by the metainfo observer. If a project exists
|
|
|
|
// in live accounting, but not in tally projects, we would not update it in live accounting.
|
|
|
|
// Thus, we add them and set the total to 0.
|
|
|
|
for projectID := range latestLiveTotals {
|
|
|
|
if _, ok := tallyProjectTotals[projectID]; !ok {
|
2022-05-10 09:22:54 +01:00
|
|
|
tallyProjectTotals[projectID] = accounting.Usage{}
|
2020-12-23 11:08:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for projectID, tallyTotal := range tallyProjectTotals {
|
2022-05-02 07:09:51 +01:00
|
|
|
delta := latestLiveTotals[projectID].Storage - initialLiveTotals[projectID].Storage
|
2020-12-23 11:08:08 +00:00
|
|
|
if delta < 0 {
|
|
|
|
delta = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// read the method documentation why the increase passed to this method
|
|
|
|
// is calculated in this way
|
2022-05-10 09:22:54 +01:00
|
|
|
err = service.liveAccounting.AddProjectStorageUsage(ctx, projectID, -latestLiveTotals[projectID].Storage+tallyTotal.Storage+(delta/2))
|
2020-12-23 11:08:08 +00:00
|
|
|
if err != nil {
|
|
|
|
if accounting.ErrSystemOrNetError.Has(err) {
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting storage usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting storage usage of the project in this cycle",
|
2022-05-10 09:22:54 +01:00
|
|
|
zap.String("projectID", projectID.String()),
|
2020-12-23 11:08:08 +00:00
|
|
|
zap.Error(err),
|
2022-05-10 09:22:54 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// difference between cached project totals and latest tally collector
|
|
|
|
increment := tallyTotal.Segments - latestLiveTotals[projectID].Segments
|
|
|
|
|
|
|
|
err = service.liveAccounting.UpdateProjectSegmentUsage(ctx, projectID, increment)
|
|
|
|
if err != nil {
|
|
|
|
if accounting.ErrSystemOrNetError.Has(err) {
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting segment usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting segment usage of the project in this cycle",
|
2020-12-23 11:08:08 +00:00
|
|
|
zap.String("projectID", projectID.String()),
|
2022-05-10 09:22:54 +01:00
|
|
|
zap.Error(err),
|
2020-12-23 11:08:08 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-16 17:50:29 +01:00
|
|
|
}
|
2020-12-23 11:08:08 +00:00
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
// add up all buckets
|
2022-10-05 11:53:02 +01:00
|
|
|
collector := NewBucketTallyCollector(service.log.Named("observer"), service.nowFn(), service.metabase, service.bucketsDB, service.config)
|
2021-07-01 12:29:25 +01:00
|
|
|
err = collector.Run(ctx)
|
2019-10-07 21:55:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2020-04-10 18:35:58 +01:00
|
|
|
finishTime := service.nowFn()
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
// save the new results
|
2021-06-01 17:44:09 +01:00
|
|
|
var errAtRest error
|
2021-07-01 12:29:25 +01:00
|
|
|
if len(collector.Bucket) > 0 {
|
2019-10-31 17:27:38 +00:00
|
|
|
// record bucket tallies to DB
|
2021-07-01 12:29:25 +01:00
|
|
|
err = service.projectAccountingDB.SaveTallies(ctx, finishTime, collector.Bucket)
|
2019-10-07 21:55:20 +01:00
|
|
|
if err != nil {
|
2021-06-01 17:44:09 +01:00
|
|
|
errAtRest = Error.New("ProjectAccounting.SaveTallies failed: %v", err)
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
2019-10-31 17:27:38 +00:00
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
updateLiveAccountingTotals(projectTotalsFromBuckets(collector.Bucket))
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2019-06-13 17:58:40 +01:00
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
if len(collector.Bucket) > 0 {
|
2019-10-07 21:55:20 +01:00
|
|
|
var total accounting.BucketTally
|
2021-08-23 12:03:01 +01:00
|
|
|
// TODO for now we don't have access to inline/remote stats per bucket
|
|
|
|
// but that may change in the future. To get back those stats we would
|
|
|
|
// most probably need to add inline/remote information to object in
|
|
|
|
// metabase. We didn't decide yet if that is really needed right now.
|
2021-07-01 12:29:25 +01:00
|
|
|
for _, bucket := range collector.Bucket {
|
|
|
|
monAccounting.IntVal("bucket_objects").Observe(bucket.ObjectCount) //mon:locked
|
|
|
|
monAccounting.IntVal("bucket_segments").Observe(bucket.Segments()) //mon:locked
|
|
|
|
// monAccounting.IntVal("bucket_inline_segments").Observe(bucket.InlineSegments) //mon:locked
|
|
|
|
// monAccounting.IntVal("bucket_remote_segments").Observe(bucket.RemoteSegments) //mon:locked
|
|
|
|
|
|
|
|
monAccounting.IntVal("bucket_bytes").Observe(bucket.Bytes()) //mon:locked
|
|
|
|
// monAccounting.IntVal("bucket_inline_bytes").Observe(bucket.InlineBytes) //mon:locked
|
|
|
|
// monAccounting.IntVal("bucket_remote_bytes").Observe(bucket.RemoteBytes) //mon:locked
|
2019-10-07 21:55:20 +01:00
|
|
|
total.Combine(bucket)
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
2020-12-02 22:17:59 +00:00
|
|
|
monAccounting.IntVal("total_objects").Observe(total.ObjectCount) //mon:locked
|
2021-07-01 12:29:25 +01:00
|
|
|
monAccounting.IntVal("total_segments").Observe(total.Segments()) //mon:locked
|
2021-08-23 12:03:01 +01:00
|
|
|
monAccounting.IntVal("total_bytes").Observe(total.Bytes()) //mon:locked
|
2022-05-10 10:54:52 +01:00
|
|
|
monAccounting.IntVal("total_pending_objects").Observe(total.PendingObjectCount)
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
2019-09-09 17:48:24 +01:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// return errors if something went wrong.
|
2021-06-01 17:44:09 +01:00
|
|
|
return errAtRest
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
|
|
|
|
2022-10-05 15:23:24 +01:00
|
|
|
var objectFunc = mon.Task()
|
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
// BucketTallyCollector collects and adds up tallies for buckets.
|
|
|
|
type BucketTallyCollector struct {
|
2020-04-10 18:35:58 +01:00
|
|
|
Now time.Time
|
2019-10-07 21:55:20 +01:00
|
|
|
Log *zap.Logger
|
2020-08-31 11:14:20 +01:00
|
|
|
Bucket map[metabase.BucketLocation]*accounting.BucketTally
|
2021-07-01 12:29:25 +01:00
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
metabase *metabase.DB
|
|
|
|
bucketsDB buckets.DB
|
|
|
|
config Config
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2019-02-26 15:17:51 +00:00
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
// NewBucketTallyCollector returns a collector that adds up totals for buckets.
|
2021-07-01 12:29:25 +01:00
|
|
|
// The now argument controls when the collector considers objects to be expired.
|
2022-10-05 11:53:02 +01:00
|
|
|
func NewBucketTallyCollector(log *zap.Logger, now time.Time, db *metabase.DB, bucketsDB buckets.DB, config Config) *BucketTallyCollector {
|
2021-07-01 12:29:25 +01:00
|
|
|
return &BucketTallyCollector{
|
2020-04-10 18:35:58 +01:00
|
|
|
Now: now,
|
2019-10-07 21:55:20 +01:00
|
|
|
Log: log,
|
2020-08-31 11:14:20 +01:00
|
|
|
Bucket: make(map[metabase.BucketLocation]*accounting.BucketTally),
|
2021-07-01 12:29:25 +01:00
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
metabase: db,
|
|
|
|
bucketsDB: bucketsDB,
|
|
|
|
config: config,
|
2018-12-18 17:18:42 +00:00
|
|
|
}
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2019-02-26 15:17:51 +00:00
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
// Run runs collecting bucket tallies.
|
|
|
|
func (observer *BucketTallyCollector) Run(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
startingTime, err := observer.metabase.Now(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
if !observer.config.UseObjectsLoop {
|
|
|
|
return observer.fillBucketTallies(ctx, startingTime)
|
|
|
|
}
|
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
return observer.metabase.IterateLoopObjects(ctx, metabase.IterateLoopObjects{
|
|
|
|
BatchSize: observer.config.ListLimit,
|
|
|
|
AsOfSystemTime: startingTime,
|
|
|
|
AsOfSystemInterval: observer.config.AsOfSystemInterval,
|
|
|
|
}, func(ctx context.Context, it metabase.LoopObjectsIterator) (err error) {
|
|
|
|
var entry metabase.LoopObjectEntry
|
|
|
|
for it.Next(ctx, &entry) {
|
|
|
|
err = observer.object(ctx, entry)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
// fillBucketTallies collects all bucket tallies and fills observer's buckets map with results.
|
2022-11-16 10:26:38 +00:00
|
|
|
func (observer *BucketTallyCollector) fillBucketTallies(ctx context.Context, startingTime time.Time) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2022-10-05 11:53:02 +01:00
|
|
|
var lastBucketLocation metabase.BucketLocation
|
|
|
|
var bucketLocationsSize int
|
|
|
|
|
|
|
|
for {
|
|
|
|
err := observer.bucketsDB.IterateBucketLocations(ctx, lastBucketLocation.ProjectID, lastBucketLocation.BucketName, observer.config.ListLimit, func(bucketLocations []metabase.BucketLocation) (err error) {
|
|
|
|
if len(bucketLocations) < 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
tallies, err := observer.metabase.CollectBucketTallies(ctx, metabase.CollectBucketTallies{
|
|
|
|
From: bucketLocations[0],
|
|
|
|
To: bucketLocations[len(bucketLocations)-1],
|
|
|
|
AsOfSystemTime: startingTime,
|
|
|
|
AsOfSystemInterval: observer.config.AsOfSystemInterval,
|
2022-11-17 13:06:17 +00:00
|
|
|
Now: observer.Now,
|
2022-10-05 11:53:02 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tally := range tallies {
|
|
|
|
bucket := observer.ensureBucket(metabase.ObjectLocation{
|
|
|
|
ProjectID: tally.ProjectID,
|
|
|
|
BucketName: tally.BucketName,
|
|
|
|
})
|
|
|
|
bucket.TotalSegments = tally.TotalSegments
|
|
|
|
bucket.TotalBytes = tally.TotalBytes
|
|
|
|
bucket.MetadataSize = tally.MetadataSize
|
|
|
|
bucket.ObjectCount = tally.ObjectCount
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketLocationsSize = len(bucketLocations)
|
|
|
|
|
|
|
|
lastBucketLocation = bucketLocations[len(bucketLocations)-1]
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if bucketLocationsSize < observer.config.ListLimit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// ensureBucket returns bucket corresponding to the passed in path.
|
2022-05-10 09:22:54 +01:00
|
|
|
func (observer *BucketTallyCollector) ensureBucket(location metabase.ObjectLocation) *accounting.BucketTally {
|
2020-09-02 08:16:58 +01:00
|
|
|
bucketLocation := location.Bucket()
|
2020-08-31 11:14:20 +01:00
|
|
|
bucket, exists := observer.Bucket[bucketLocation]
|
2019-10-07 21:55:20 +01:00
|
|
|
if !exists {
|
|
|
|
bucket = &accounting.BucketTally{}
|
2020-08-31 11:14:20 +01:00
|
|
|
bucket.BucketLocation = bucketLocation
|
|
|
|
observer.Bucket[bucketLocation] = bucket
|
2018-12-18 17:18:42 +00:00
|
|
|
}
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
return bucket
|
|
|
|
}
|
|
|
|
|
|
|
|
// Object is called for each object once.
|
2022-10-05 15:23:24 +01:00
|
|
|
func (observer *BucketTallyCollector) object(ctx context.Context, object metabase.LoopObjectEntry) error {
|
|
|
|
defer objectFunc(&ctx)(nil)
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
if object.Expired(observer.Now) {
|
2020-04-10 18:35:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-10 09:22:54 +01:00
|
|
|
bucket := observer.ensureBucket(object.ObjectStream.Location())
|
2021-07-01 12:29:25 +01:00
|
|
|
bucket.TotalSegments += int64(object.SegmentCount)
|
|
|
|
bucket.TotalBytes += object.TotalEncryptedSize
|
2021-03-01 17:01:49 +00:00
|
|
|
bucket.MetadataSize += int64(object.EncryptedMetadataSize)
|
2019-10-07 21:55:20 +01:00
|
|
|
bucket.ObjectCount++
|
2022-05-10 10:54:52 +01:00
|
|
|
if object.Status == metabase.Pending {
|
|
|
|
bucket.PendingObjectCount++
|
|
|
|
}
|
2020-10-27 06:59:14 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-10 09:22:54 +01:00
|
|
|
func projectTotalsFromBuckets(buckets map[metabase.BucketLocation]*accounting.BucketTally) map[uuid.UUID]accounting.Usage {
|
|
|
|
projectTallyTotals := make(map[uuid.UUID]accounting.Usage)
|
2019-10-31 17:27:38 +00:00
|
|
|
for _, bucket := range buckets {
|
2022-05-10 09:22:54 +01:00
|
|
|
projectUsage := projectTallyTotals[bucket.ProjectID]
|
|
|
|
projectUsage.Storage += bucket.TotalBytes
|
|
|
|
projectUsage.Segments += bucket.TotalSegments
|
|
|
|
projectTallyTotals[bucket.ProjectID] = projectUsage
|
2019-10-31 17:27:38 +00:00
|
|
|
}
|
|
|
|
return projectTallyTotals
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// using custom name to avoid breaking monitoring.
|
2019-10-04 20:09:52 +01:00
|
|
|
var monAccounting = monkit.ScopeNamed("storj.io/storj/satellite/accounting")
|