2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-11-08 16:18:28 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-11-14 01:22:18 +00:00
|
|
|
package tally
|
2018-11-08 16:18:28 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2019-02-01 18:50:12 +00:00
|
|
|
"github.com/zeebo/errs"
|
2018-11-08 16:18:28 +00:00
|
|
|
"go.uber.org/zap"
|
2018-12-07 09:59:31 +00:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/sync2"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/accounting"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2021-04-22 10:07:18 +01:00
|
|
|
"storj.io/storj/satellite/metabase/metaloop"
|
2018-11-08 16:18:28 +00:00
|
|
|
)
|
|
|
|
|
2019-10-04 20:09:52 +01:00
|
|
|
// Error is a standard error class for this package.
|
|
|
|
var (
|
2021-04-28 09:06:17 +01:00
|
|
|
Error = errs.Class("tally")
|
2019-10-04 20:09:52 +01:00
|
|
|
mon = monkit.Package()
|
|
|
|
)
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Config contains configurable values for the tally service.
|
2019-01-23 19:58:44 +00:00
|
|
|
type Config struct {
|
testplanet/satellite: reduce the number of places default values need to be configured
Satellites set their configuration values to default values using
cfgstruct, however, it turns out our tests don't test these values
at all! Instead, they have a completely separate definition system
that is easy to forget about.
As is to be expected, these values have drifted, and it appears
in a few cases test planet is testing unreasonable values that we
won't see in production, or perhaps worse, features enabled in
production were missed and weren't enabled in testplanet.
This change makes it so all values are configured the same,
systematic way, so it's easy to see when test values are different
than dev values or release values, and it's less hard to forget
to enable features in testplanet.
In terms of reviewing, this change should be actually fairly
easy to review, considering private/testplanet/satellite.go keeps
the current config system and the new one and confirms that they
result in identical configurations, so you can be certain that
nothing was missed and the config is all correct.
You can also check the config lock to see what actual config
values changed.
Change-Id: I6715d0794887f577e21742afcf56fd2b9d12170e
2021-05-31 22:15:00 +01:00
|
|
|
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s" testDefault:"$TESTINTERVAL"`
|
2020-11-28 20:54:52 +00:00
|
|
|
SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"`
|
2020-11-29 16:13:06 +00:00
|
|
|
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-12-05 16:01:42 +00:00
|
|
|
// Service is the tally service for data stored on each storage node.
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Chore
|
2019-04-04 16:20:59 +01:00
|
|
|
type Service struct {
|
2019-10-07 21:55:20 +01:00
|
|
|
log *zap.Logger
|
2020-01-30 13:06:43 +00:00
|
|
|
Loop *sync2.Cycle
|
2019-10-07 21:55:20 +01:00
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
metainfoLoop *metaloop.Service
|
2019-10-16 17:50:29 +01:00
|
|
|
liveAccounting accounting.Cache
|
2019-05-10 20:05:42 +01:00
|
|
|
storagenodeAccountingDB accounting.StoragenodeAccounting
|
|
|
|
projectAccountingDB accounting.ProjectAccounting
|
2020-04-10 18:35:58 +01:00
|
|
|
nowFn func() time.Time
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// New creates a new tally Service.
|
2021-03-23 12:14:38 +00:00
|
|
|
func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.ProjectAccounting, liveAccounting accounting.Cache, metainfoLoop *metaloop.Service, interval time.Duration) *Service {
|
2019-04-04 16:20:59 +01:00
|
|
|
return &Service{
|
2019-10-07 21:55:20 +01:00
|
|
|
log: log,
|
2020-01-30 13:06:43 +00:00
|
|
|
Loop: sync2.NewCycle(interval),
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
metainfoLoop: metainfoLoop,
|
|
|
|
liveAccounting: liveAccounting,
|
2019-05-10 20:05:42 +01:00
|
|
|
storagenodeAccountingDB: sdb,
|
|
|
|
projectAccountingDB: pdb,
|
2020-04-10 18:35:58 +01:00
|
|
|
nowFn: time.Now,
|
2018-12-05 14:03:23 +00:00
|
|
|
}
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Run the tally service loop.
|
2019-10-07 21:55:20 +01:00
|
|
|
func (service *Service) Run(ctx context.Context) (err error) {
|
2018-11-08 16:18:28 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
return service.Loop.Run(ctx, func(ctx context.Context) error {
|
|
|
|
err := service.Tally(ctx)
|
2019-09-09 17:48:24 +01:00
|
|
|
if err != nil {
|
2019-10-07 21:55:20 +01:00
|
|
|
service.log.Error("tally failed", zap.Error(err))
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
2019-09-09 17:48:24 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close stops the service and releases any resources.
|
2019-10-07 21:55:20 +01:00
|
|
|
func (service *Service) Close() error {
|
|
|
|
service.Loop.Close()
|
2019-09-09 17:48:24 +01:00
|
|
|
return nil
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-10 18:35:58 +01:00
|
|
|
// SetNow allows tests to have the Service act as if the current time is whatever
|
|
|
|
// they want. This avoids races and sleeping, making tests more reliable and efficient.
|
|
|
|
func (service *Service) SetNow(now func() time.Time) {
|
|
|
|
service.nowFn = now
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Tally calculates data-at-rest usage once.
|
2020-11-03 14:04:24 +00:00
|
|
|
//
|
|
|
|
// How live accounting is calculated:
|
|
|
|
//
|
|
|
|
// At the beginning of the tally iteration, we get a map containing the current
|
|
|
|
// project totals from the cache- initialLiveTotals (our current estimation of
|
|
|
|
// the project totals). At the end of the tally iteration, we have the totals
|
|
|
|
// from what we saw during the metainfo loop.
|
|
|
|
//
|
|
|
|
// However, data which was uploaded during the loop may or may not have been
|
|
|
|
// seen in the metainfo loop. For this reason, we also read the live accounting
|
|
|
|
// totals again at the end of the tally iteration- latestLiveTotals.
|
|
|
|
//
|
|
|
|
// The difference between latest and initial indicates how much data was
|
|
|
|
// uploaded during the metainfo loop and is assigned to delta. However, again,
|
|
|
|
// we aren't certain how much of the delta is accounted for in the metainfo
|
|
|
|
// totals. For the reason we make an assumption that 50% of the data is
|
|
|
|
// accounted for. So to calculate the new live accounting totals, we sum the
|
|
|
|
// metainfo totals and 50% of the deltas.
|
2019-10-07 21:55:20 +01:00
|
|
|
func (service *Service) Tally(ctx context.Context) (err error) {
|
2019-06-04 12:36:27 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-10-07 21:55:20 +01:00
|
|
|
|
2020-12-23 11:08:08 +00:00
|
|
|
// No-op unless that there isn't an error getting the
|
|
|
|
// liveAccounting.GetAllProjectTotals
|
|
|
|
updateLiveAccountingTotals := func(_ map[uuid.UUID]int64) {}
|
|
|
|
|
2019-10-31 17:27:38 +00:00
|
|
|
initialLiveTotals, err := service.liveAccounting.GetAllProjectTotals(ctx)
|
2019-10-16 17:50:29 +01:00
|
|
|
if err != nil {
|
2020-12-23 11:08:08 +00:00
|
|
|
service.log.Error(
|
|
|
|
"tally won't update the live accounting storage usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
updateLiveAccountingTotals = func(tallyProjectTotals map[uuid.UUID]int64) {
|
|
|
|
latestLiveTotals, err := service.liveAccounting.GetAllProjectTotals(ctx)
|
|
|
|
if err != nil {
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting storage usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// empty projects are not returned by the metainfo observer. If a project exists
|
|
|
|
// in live accounting, but not in tally projects, we would not update it in live accounting.
|
|
|
|
// Thus, we add them and set the total to 0.
|
|
|
|
for projectID := range latestLiveTotals {
|
|
|
|
if _, ok := tallyProjectTotals[projectID]; !ok {
|
|
|
|
tallyProjectTotals[projectID] = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for projectID, tallyTotal := range tallyProjectTotals {
|
|
|
|
delta := latestLiveTotals[projectID] - initialLiveTotals[projectID]
|
|
|
|
if delta < 0 {
|
|
|
|
delta = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// read the method documentation why the increase passed to this method
|
|
|
|
// is calculated in this way
|
|
|
|
err = service.liveAccounting.AddProjectStorageUsage(ctx, projectID, -latestLiveTotals[projectID]+tallyTotal+(delta/2))
|
|
|
|
if err != nil {
|
|
|
|
if accounting.ErrSystemOrNetError.Has(err) {
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting storage usages of the projects in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
service.log.Error(
|
|
|
|
"tally isn't updating the live accounting storage usage of the project in this cycle",
|
|
|
|
zap.Error(err),
|
|
|
|
zap.String("projectID", projectID.String()),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-16 17:50:29 +01:00
|
|
|
}
|
2020-12-23 11:08:08 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// Fetch when the last tally happened so we can roughly calculate the byte-hours.
|
|
|
|
lastTime, err := service.storagenodeAccountingDB.LastTimestamp(ctx, accounting.LastAtRestTally)
|
2019-02-01 18:50:12 +00:00
|
|
|
if err != nil {
|
2019-10-07 21:55:20 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if lastTime.IsZero() {
|
2020-04-10 18:35:58 +01:00
|
|
|
lastTime = service.nowFn()
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// add up all nodes and buckets
|
2020-04-10 18:35:58 +01:00
|
|
|
observer := NewObserver(service.log.Named("observer"), service.nowFn())
|
2019-10-07 21:55:20 +01:00
|
|
|
err = service.metainfoLoop.Join(ctx, observer)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2020-04-10 18:35:58 +01:00
|
|
|
finishTime := service.nowFn()
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
// calculate byte hours, not just bytes
|
|
|
|
hours := time.Since(lastTime).Hours()
|
2021-01-25 20:07:24 +00:00
|
|
|
var totalSum float64
|
|
|
|
for id, pieceSize := range observer.Node {
|
|
|
|
totalSum += pieceSize
|
|
|
|
observer.Node[id] = pieceSize * hours
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2021-01-25 20:07:24 +00:00
|
|
|
mon.IntVal("nodetallies.totalsum").Observe(int64(totalSum)) //mon:locked
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
// save the new results
|
|
|
|
var errAtRest, errBucketInfo error
|
|
|
|
if len(observer.Node) > 0 {
|
|
|
|
err = service.storagenodeAccountingDB.SaveTallies(ctx, finishTime, observer.Node)
|
|
|
|
if err != nil {
|
|
|
|
errAtRest = errs.New("StorageNodeAccounting.SaveTallies failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(observer.Bucket) > 0 {
|
2019-10-31 17:27:38 +00:00
|
|
|
// record bucket tallies to DB
|
2019-10-07 21:55:20 +01:00
|
|
|
err = service.projectAccountingDB.SaveTallies(ctx, finishTime, observer.Bucket)
|
|
|
|
if err != nil {
|
|
|
|
errAtRest = errs.New("ProjectAccounting.SaveTallies failed: %v", err)
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
2019-10-31 17:27:38 +00:00
|
|
|
|
2020-12-23 11:08:08 +00:00
|
|
|
updateLiveAccountingTotals(projectTotalsFromBuckets(observer.Bucket))
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2019-06-13 17:58:40 +01:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// report bucket metrics
|
|
|
|
if len(observer.Bucket) > 0 {
|
|
|
|
var total accounting.BucketTally
|
|
|
|
for _, bucket := range observer.Bucket {
|
2020-12-02 22:17:59 +00:00
|
|
|
monAccounting.IntVal("bucket_objects").Observe(bucket.ObjectCount) //mon:locked
|
|
|
|
monAccounting.IntVal("bucket_segments").Observe(bucket.Segments()) //mon:locked
|
|
|
|
monAccounting.IntVal("bucket_inline_segments").Observe(bucket.InlineSegments) //mon:locked
|
|
|
|
monAccounting.IntVal("bucket_remote_segments").Observe(bucket.RemoteSegments) //mon:locked
|
|
|
|
|
|
|
|
monAccounting.IntVal("bucket_bytes").Observe(bucket.Bytes()) //mon:locked
|
|
|
|
monAccounting.IntVal("bucket_inline_bytes").Observe(bucket.InlineBytes) //mon:locked
|
|
|
|
monAccounting.IntVal("bucket_remote_bytes").Observe(bucket.RemoteBytes) //mon:locked
|
2019-10-07 21:55:20 +01:00
|
|
|
total.Combine(bucket)
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
2020-12-02 22:17:59 +00:00
|
|
|
monAccounting.IntVal("total_objects").Observe(total.ObjectCount) //mon:locked
|
2019-10-15 18:00:14 +01:00
|
|
|
|
2020-12-02 22:17:59 +00:00
|
|
|
monAccounting.IntVal("total_segments").Observe(total.Segments()) //mon:locked
|
|
|
|
monAccounting.IntVal("total_inline_segments").Observe(total.InlineSegments) //mon:locked
|
|
|
|
monAccounting.IntVal("total_remote_segments").Observe(total.RemoteSegments) //mon:locked
|
2019-10-15 18:00:14 +01:00
|
|
|
|
2020-12-02 22:17:59 +00:00
|
|
|
monAccounting.IntVal("total_bytes").Observe(total.Bytes()) //mon:locked
|
|
|
|
monAccounting.IntVal("total_inline_bytes").Observe(total.InlineBytes) //mon:locked
|
|
|
|
monAccounting.IntVal("total_remote_bytes").Observe(total.RemoteBytes) //mon:locked
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
2019-09-09 17:48:24 +01:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// return errors if something went wrong.
|
2019-04-04 16:20:59 +01:00
|
|
|
return errs.Combine(errAtRest, errBucketInfo)
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
|
|
|
|
2021-03-23 12:14:38 +00:00
|
|
|
var _ metaloop.Observer = (*Observer)(nil)
|
2019-01-16 19:30:33 +00:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Observer observes metainfo and adds up tallies for nodes and buckets.
|
2019-10-07 21:55:20 +01:00
|
|
|
type Observer struct {
|
2020-04-10 18:35:58 +01:00
|
|
|
Now time.Time
|
2019-10-07 21:55:20 +01:00
|
|
|
Log *zap.Logger
|
|
|
|
Node map[storj.NodeID]float64
|
2020-08-31 11:14:20 +01:00
|
|
|
Bucket map[metabase.BucketLocation]*accounting.BucketTally
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2019-02-26 15:17:51 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// NewObserver returns an metainfo loop observer that adds up totals for buckets and nodes.
|
2020-04-10 18:35:58 +01:00
|
|
|
// The now argument controls when the observer considers pointers to be expired.
|
|
|
|
func NewObserver(log *zap.Logger, now time.Time) *Observer {
|
2019-10-07 21:55:20 +01:00
|
|
|
return &Observer{
|
2020-04-10 18:35:58 +01:00
|
|
|
Now: now,
|
2019-10-07 21:55:20 +01:00
|
|
|
Log: log,
|
|
|
|
Node: make(map[storj.NodeID]float64),
|
2020-08-31 11:14:20 +01:00
|
|
|
Bucket: make(map[metabase.BucketLocation]*accounting.BucketTally),
|
2018-12-18 17:18:42 +00:00
|
|
|
}
|
2019-10-07 21:55:20 +01:00
|
|
|
}
|
2019-02-26 15:17:51 +00:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// ensureBucket returns bucket corresponding to the passed in path.
|
2020-10-27 06:59:14 +00:00
|
|
|
func (observer *Observer) ensureBucket(ctx context.Context, location metabase.ObjectLocation) *accounting.BucketTally {
|
2020-09-02 08:16:58 +01:00
|
|
|
bucketLocation := location.Bucket()
|
2020-08-31 11:14:20 +01:00
|
|
|
bucket, exists := observer.Bucket[bucketLocation]
|
2019-10-07 21:55:20 +01:00
|
|
|
if !exists {
|
|
|
|
bucket = &accounting.BucketTally{}
|
2020-08-31 11:14:20 +01:00
|
|
|
bucket.BucketLocation = bucketLocation
|
|
|
|
observer.Bucket[bucketLocation] = bucket
|
2018-12-18 17:18:42 +00:00
|
|
|
}
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
return bucket
|
|
|
|
}
|
|
|
|
|
2021-04-01 11:56:39 +01:00
|
|
|
// LoopStarted is called at each start of a loop.
|
|
|
|
func (observer *Observer) LoopStarted(context.Context, metaloop.LoopInfo) (err error) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// Object is called for each object once.
|
2021-03-23 12:14:38 +00:00
|
|
|
func (observer *Observer) Object(ctx context.Context, object *metaloop.Object) (err error) {
|
2021-06-15 00:21:20 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
if object.Expired(observer.Now) {
|
2020-04-10 18:35:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-01 17:01:49 +00:00
|
|
|
bucket := observer.ensureBucket(ctx, object.ObjectStream.Location())
|
|
|
|
bucket.MetadataSize += int64(object.EncryptedMetadataSize)
|
2019-10-07 21:55:20 +01:00
|
|
|
bucket.ObjectCount++
|
2020-10-27 06:59:14 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// InlineSegment is called for each inline segment.
|
2021-03-23 12:14:38 +00:00
|
|
|
func (observer *Observer) InlineSegment(ctx context.Context, segment *metaloop.Segment) (err error) {
|
2021-06-15 00:21:20 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
if segment.Expired(observer.Now) {
|
2020-04-10 18:35:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
bucket := observer.ensureBucket(ctx, segment.Location.Object())
|
2019-10-07 21:55:20 +01:00
|
|
|
bucket.InlineSegments++
|
2021-03-02 12:58:23 +00:00
|
|
|
bucket.InlineBytes += int64(segment.EncryptedSize)
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
return nil
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
2019-10-04 20:09:52 +01:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
// RemoteSegment is called for each remote segment.
|
2021-03-23 12:14:38 +00:00
|
|
|
func (observer *Observer) RemoteSegment(ctx context.Context, segment *metaloop.Segment) (err error) {
|
2021-06-15 00:21:20 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
if segment.Expired(observer.Now) {
|
2020-04-10 18:35:58 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
bucket := observer.ensureBucket(ctx, segment.Location.Object())
|
2019-10-07 21:55:20 +01:00
|
|
|
bucket.RemoteSegments++
|
2021-03-02 12:58:23 +00:00
|
|
|
bucket.RemoteBytes += int64(segment.EncryptedSize)
|
2019-10-07 21:55:20 +01:00
|
|
|
|
|
|
|
// add node info
|
2020-10-27 06:59:14 +00:00
|
|
|
minimumRequired := segment.Redundancy.RequiredShares
|
2019-10-07 21:55:20 +01:00
|
|
|
|
2020-10-27 06:59:14 +00:00
|
|
|
if minimumRequired <= 0 {
|
|
|
|
observer.Log.Error("failed sanity check", zap.ByteString("key", segment.Location.Encode()))
|
2019-10-07 21:55:20 +01:00
|
|
|
return nil
|
2019-10-04 20:09:52 +01:00
|
|
|
}
|
|
|
|
|
2021-03-02 12:58:23 +00:00
|
|
|
pieceSize := float64(segment.EncryptedSize / int32(minimumRequired)) // TODO: Add this as a method to RedundancyScheme
|
2020-10-27 06:59:14 +00:00
|
|
|
|
|
|
|
for _, piece := range segment.Pieces {
|
|
|
|
observer.Node[piece.StorageNode] += pieceSize
|
2019-10-04 20:09:52 +01:00
|
|
|
}
|
2020-10-27 06:59:14 +00:00
|
|
|
|
2019-10-07 21:55:20 +01:00
|
|
|
return nil
|
2019-10-04 20:09:52 +01:00
|
|
|
}
|
|
|
|
|
2020-08-31 11:14:20 +01:00
|
|
|
func projectTotalsFromBuckets(buckets map[metabase.BucketLocation]*accounting.BucketTally) map[uuid.UUID]int64 {
|
2019-10-31 17:27:38 +00:00
|
|
|
projectTallyTotals := make(map[uuid.UUID]int64)
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
projectTallyTotals[bucket.ProjectID] += (bucket.InlineBytes + bucket.RemoteBytes)
|
|
|
|
}
|
|
|
|
return projectTallyTotals
|
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// using custom name to avoid breaking monitoring.
|
2019-10-04 20:09:52 +01:00
|
|
|
var monAccounting = monkit.ScopeNamed("storj.io/storj/satellite/accounting")
|