Compare commits
8 Commits
main
...
gui-prebui
Author | SHA1 | Date | |
---|---|---|---|
f75ec5ba34 | |||
|
cb65ebe81c | ||
|
e1f8434a03 | ||
|
433493a935 | ||
|
8f1d4a6506 | ||
|
e2603461ab | ||
|
f4297e42d0 | ||
|
0ad544731d |
@ -6,6 +6,7 @@ storj.io/storj/satellite/accounting."bucket_segments" IntVal
|
|||||||
storj.io/storj/satellite/accounting."total_bytes" IntVal
|
storj.io/storj/satellite/accounting."total_bytes" IntVal
|
||||||
storj.io/storj/satellite/accounting."total_objects" IntVal
|
storj.io/storj/satellite/accounting."total_objects" IntVal
|
||||||
storj.io/storj/satellite/accounting."total_segments" IntVal
|
storj.io/storj/satellite/accounting."total_segments" IntVal
|
||||||
|
storj.io/storj/satellite/accounting/tally."bucket_tally_error" Event
|
||||||
storj.io/storj/satellite/accounting/tally."nodetallies.totalsum" IntVal
|
storj.io/storj/satellite/accounting/tally."nodetallies.totalsum" IntVal
|
||||||
storj.io/storj/satellite/audit."audit_contained_nodes" IntVal
|
storj.io/storj/satellite/audit."audit_contained_nodes" IntVal
|
||||||
storj.io/storj/satellite/audit."audit_contained_nodes_global" Meter
|
storj.io/storj/satellite/audit."audit_contained_nodes_global" Meter
|
||||||
|
@ -6,16 +6,16 @@ package version
|
|||||||
import _ "unsafe" // needed for go:linkname
|
import _ "unsafe" // needed for go:linkname
|
||||||
|
|
||||||
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp
|
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp
|
||||||
var buildTimestamp string
|
var buildTimestamp string = "1687783565"
|
||||||
|
|
||||||
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash
|
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash
|
||||||
var buildCommitHash string
|
var buildCommitHash string = "e1f8434a03290f36202d40a3f887da1e4dc68ee5"
|
||||||
|
|
||||||
//go:linkname buildVersion storj.io/private/version.buildVersion
|
//go:linkname buildVersion storj.io/private/version.buildVersion
|
||||||
var buildVersion string
|
var buildVersion string = "v1.82.1"
|
||||||
|
|
||||||
//go:linkname buildRelease storj.io/private/version.buildRelease
|
//go:linkname buildRelease storj.io/private/version.buildRelease
|
||||||
var buildRelease string
|
var buildRelease string = "true"
|
||||||
|
|
||||||
// ensure that linter understands that the variables are being used.
|
// ensure that linter understands that the variables are being used.
|
||||||
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }
|
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }
|
||||||
|
@ -30,6 +30,7 @@ type Config struct {
|
|||||||
SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"`
|
SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"`
|
||||||
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
|
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
|
||||||
UseRangedLoop bool `help:"whether to enable node tally with ranged loop" default:"true"`
|
UseRangedLoop bool `help:"whether to enable node tally with ranged loop" default:"true"`
|
||||||
|
SaveTalliesBatchSize int `help:"how large should be insert into tallies" default:"10000"`
|
||||||
|
|
||||||
ListLimit int `help:"how many buckets to query in a batch" default:"2500"`
|
ListLimit int `help:"how many buckets to query in a batch" default:"2500"`
|
||||||
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
|
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
|
||||||
@ -75,6 +76,8 @@ func (service *Service) Run(ctx context.Context) (err error) {
|
|||||||
err := service.Tally(ctx)
|
err := service.Tally(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
service.log.Error("tally failed", zap.Error(err))
|
service.log.Error("tally failed", zap.Error(err))
|
||||||
|
|
||||||
|
mon.Event("bucket_tally_error") //mon:locked
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -198,21 +201,35 @@ func (service *Service) Tally(ctx context.Context) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
finishTime := service.nowFn()
|
|
||||||
|
if len(collector.Bucket) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// save the new results
|
// save the new results
|
||||||
var errAtRest error
|
var errAtRest errs.Group
|
||||||
if len(collector.Bucket) > 0 {
|
|
||||||
// record bucket tallies to DB
|
// record bucket tallies to DB
|
||||||
err = service.projectAccountingDB.SaveTallies(ctx, finishTime, collector.Bucket)
|
// TODO we should be able replace map with just slice
|
||||||
if err != nil {
|
intervalStart := service.nowFn()
|
||||||
errAtRest = Error.New("ProjectAccounting.SaveTallies failed: %v", err)
|
buffer := map[metabase.BucketLocation]*accounting.BucketTally{}
|
||||||
|
for location, tally := range collector.Bucket {
|
||||||
|
buffer[location] = tally
|
||||||
|
|
||||||
|
if len(buffer) >= service.config.SaveTalliesBatchSize {
|
||||||
|
// don't stop on error, we would like to store as much as possible
|
||||||
|
errAtRest.Add(service.flushTallies(ctx, intervalStart, buffer))
|
||||||
|
|
||||||
|
for key := range buffer {
|
||||||
|
delete(buffer, key)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errAtRest.Add(service.flushTallies(ctx, intervalStart, buffer))
|
||||||
|
|
||||||
updateLiveAccountingTotals(projectTotalsFromBuckets(collector.Bucket))
|
updateLiveAccountingTotals(projectTotalsFromBuckets(collector.Bucket))
|
||||||
}
|
|
||||||
|
|
||||||
if len(collector.Bucket) > 0 {
|
|
||||||
var total accounting.BucketTally
|
var total accounting.BucketTally
|
||||||
// TODO for now we don't have access to inline/remote stats per bucket
|
// TODO for now we don't have access to inline/remote stats per bucket
|
||||||
// but that may change in the future. To get back those stats we would
|
// but that may change in the future. To get back those stats we would
|
||||||
@ -233,10 +250,16 @@ func (service *Service) Tally(ctx context.Context) (err error) {
|
|||||||
monAccounting.IntVal("total_segments").Observe(total.Segments()) //mon:locked
|
monAccounting.IntVal("total_segments").Observe(total.Segments()) //mon:locked
|
||||||
monAccounting.IntVal("total_bytes").Observe(total.Bytes()) //mon:locked
|
monAccounting.IntVal("total_bytes").Observe(total.Bytes()) //mon:locked
|
||||||
monAccounting.IntVal("total_pending_objects").Observe(total.PendingObjectCount)
|
monAccounting.IntVal("total_pending_objects").Observe(total.PendingObjectCount)
|
||||||
}
|
|
||||||
|
|
||||||
// return errors if something went wrong.
|
return errAtRest.Err()
|
||||||
return errAtRest
|
}
|
||||||
|
|
||||||
|
func (service *Service) flushTallies(ctx context.Context, intervalStart time.Time, tallies map[metabase.BucketLocation]*accounting.BucketTally) error {
|
||||||
|
err := service.projectAccountingDB.SaveTallies(ctx, intervalStart, tallies)
|
||||||
|
if err != nil {
|
||||||
|
return Error.New("ProjectAccounting.SaveTallies failed: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketTallyCollector collects and adds up tallies for buckets.
|
// BucketTallyCollector collects and adds up tallies for buckets.
|
||||||
|
@ -346,7 +346,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
findTally := func(bucket string, tallies []accounting.BucketTally) accounting.BucketTally {
|
findTally := func(t *testing.T, bucket string, tallies []accounting.BucketTally) accounting.BucketTally {
|
||||||
for _, v := range tallies {
|
for _, v := range tallies {
|
||||||
if v.BucketName == bucket {
|
if v.BucketName == bucket {
|
||||||
return v
|
return v
|
||||||
@ -378,7 +378,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
|
|||||||
|
|
||||||
tallies, err := planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx)
|
tallies, err := planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
lastTally := findTally(tc.name, tallies)
|
lastTally := findTally(t, tc.name, tallies)
|
||||||
require.Equal(t, tc.name, lastTally.BucketName)
|
require.Equal(t, tc.name, lastTally.BucketName)
|
||||||
require.Equal(t, tc.expectedTallyAfterCopy.ObjectCount, lastTally.ObjectCount)
|
require.Equal(t, tc.expectedTallyAfterCopy.ObjectCount, lastTally.ObjectCount)
|
||||||
require.Equal(t, tc.expectedTallyAfterCopy.TotalBytes, lastTally.TotalBytes)
|
require.Equal(t, tc.expectedTallyAfterCopy.TotalBytes, lastTally.TotalBytes)
|
||||||
@ -392,7 +392,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
|
|||||||
|
|
||||||
tallies, err = planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx)
|
tallies, err = planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
lastTally = findTally(tc.name, tallies)
|
lastTally = findTally(t, tc.name, tallies)
|
||||||
require.Equal(t, tc.name, lastTally.BucketName)
|
require.Equal(t, tc.name, lastTally.BucketName)
|
||||||
require.Equal(t, tc.expectedTallyAfterDelete.ObjectCount, lastTally.ObjectCount)
|
require.Equal(t, tc.expectedTallyAfterDelete.ObjectCount, lastTally.ObjectCount)
|
||||||
require.Equal(t, tc.expectedTallyAfterDelete.TotalBytes, lastTally.TotalBytes)
|
require.Equal(t, tc.expectedTallyAfterDelete.TotalBytes, lastTally.TotalBytes)
|
||||||
@ -402,7 +402,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTallyBatchSize(t *testing.T) {
|
func TestBucketTallyCollectorListLimit(t *testing.T) {
|
||||||
testplanet.Run(t, testplanet.Config{
|
testplanet.Run(t, testplanet.Config{
|
||||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||||
Reconfigure: testplanet.Reconfigure{
|
Reconfigure: testplanet.Reconfigure{
|
||||||
@ -454,3 +454,58 @@ func TestTallyBatchSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTallySaveTalliesBatchSize(t *testing.T) {
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||||
|
Reconfigure: testplanet.Reconfigure{
|
||||||
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||||
|
config.Metainfo.ProjectLimits.MaxBuckets = 23
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
planet.Satellites[0].Accounting.Tally.Loop.Pause()
|
||||||
|
|
||||||
|
projectID := planet.Uplinks[0].Projects[0].ID
|
||||||
|
|
||||||
|
numberOfBuckets := 23
|
||||||
|
expectedBucketLocations := []metabase.BucketLocation{}
|
||||||
|
for i := 0; i < numberOfBuckets; i++ {
|
||||||
|
data := testrand.Bytes(1*memory.KiB + memory.Size(i))
|
||||||
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket"+strconv.Itoa(i), "test", data)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedBucketLocations = append(expectedBucketLocations, metabase.BucketLocation{
|
||||||
|
ProjectID: projectID,
|
||||||
|
BucketName: "bucket" + strconv.Itoa(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
satellite := planet.Satellites[0]
|
||||||
|
for _, batchSize := range []int{1, 2, 3, numberOfBuckets, 29, planet.Satellites[0].Config.Tally.SaveTalliesBatchSize} {
|
||||||
|
config := satellite.Config.Tally
|
||||||
|
config.SaveTalliesBatchSize = batchSize
|
||||||
|
|
||||||
|
tally := tally.New(zaptest.NewLogger(t), satellite.DB.StoragenodeAccounting(), satellite.DB.ProjectAccounting(),
|
||||||
|
satellite.LiveAccounting.Cache, satellite.Metabase.DB, satellite.DB.Buckets(), config)
|
||||||
|
|
||||||
|
// collect and store tallies in DB
|
||||||
|
err := tally.Tally(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// verify we have in DB expected list of tallies
|
||||||
|
tallies, err := satellite.DB.ProjectAccounting().GetTallies(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = satellite.DB.Testing().RawDB().ExecContext(ctx, "DELETE FROM bucket_storage_tallies")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bucketLocations := []metabase.BucketLocation{}
|
||||||
|
for _, tally := range tallies {
|
||||||
|
bucketLocations = append(bucketLocations, tally.BucketLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.ElementsMatch(t, expectedBucketLocations, bucketLocations)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -822,14 +822,17 @@ func TestCollectBucketTallies(t *testing.T) {
|
|||||||
t.Run("invalid bucket name", func(t *testing.T) {
|
t.Run("invalid bucket name", func(t *testing.T) {
|
||||||
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
|
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
|
||||||
|
|
||||||
|
projectA := uuid.UUID{1}
|
||||||
|
projectB := uuid.UUID{2}
|
||||||
|
|
||||||
metabasetest.CollectBucketTallies{
|
metabasetest.CollectBucketTallies{
|
||||||
Opts: metabase.CollectBucketTallies{
|
Opts: metabase.CollectBucketTallies{
|
||||||
From: metabase.BucketLocation{
|
From: metabase.BucketLocation{
|
||||||
ProjectID: testrand.UUID(),
|
ProjectID: projectA,
|
||||||
BucketName: "a\\",
|
BucketName: "a\\",
|
||||||
},
|
},
|
||||||
To: metabase.BucketLocation{
|
To: metabase.BucketLocation{
|
||||||
ProjectID: testrand.UUID(),
|
ProjectID: projectB,
|
||||||
BucketName: "b\\",
|
BucketName: "b\\",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -29,7 +29,7 @@ var (
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
Enabled bool `help:"whether to run this chore." default:"false"`
|
Enabled bool `help:"whether to run this chore." default:"false"`
|
||||||
Interval time.Duration `help:"How often to run this chore, which is how often unpaid invoices are checked." default:"24h"`
|
Interval time.Duration `help:"How often to run this chore, which is how often unpaid invoices are checked." default:"24h"`
|
||||||
GracePeriod time.Duration `help:"How long to wait between a warning event and freezing an account." default:"720h"`
|
GracePeriod time.Duration `help:"How long to wait between a warning event and freezing an account." default:"360h"`
|
||||||
PriceThreshold int64 `help:"The failed invoice amount (in cents) beyond which an account will not be frozen" default:"10000"`
|
PriceThreshold int64 `help:"The failed invoice amount (in cents) beyond which an account will not be frozen" default:"10000"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -549,67 +549,55 @@ func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid
|
|||||||
func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, projectID uuid.UUID, partnerNames []string, since, before time.Time) (usages map[string]accounting.ProjectUsage, err error) {
|
func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, projectID uuid.UUID, partnerNames []string, since, before time.Time) (usages map[string]accounting.ProjectUsage, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
since = timeTruncateDown(since)
|
since = timeTruncateDown(since)
|
||||||
|
bucketNames, err := db.getBucketsSinceAndBefore(ctx, projectID, since, before)
|
||||||
storageQuery := db.db.Rebind(`
|
|
||||||
SELECT * FROM (
|
|
||||||
SELECT
|
|
||||||
COALESCE(t.bucket_name, rollups.bucket_name) AS bucket_name,
|
|
||||||
COALESCE(t.interval_start, rollups.interval_start) AS interval_start,
|
|
||||||
COALESCE(t.total_bytes, 0) AS total_bytes,
|
|
||||||
COALESCE(t.inline, 0) AS inline,
|
|
||||||
COALESCE(t.remote, 0) AS remote,
|
|
||||||
COALESCE(t.total_segments_count, 0) AS total_segments_count,
|
|
||||||
COALESCE(t.object_count, 0) AS object_count,
|
|
||||||
m.user_agent,
|
|
||||||
COALESCE(rollups.egress, 0) AS egress
|
|
||||||
FROM
|
|
||||||
bucket_storage_tallies AS t
|
|
||||||
FULL OUTER JOIN (
|
|
||||||
SELECT
|
|
||||||
bucket_name,
|
|
||||||
SUM(settled + inline) AS egress,
|
|
||||||
MIN(interval_start) AS interval_start
|
|
||||||
FROM
|
|
||||||
bucket_bandwidth_rollups
|
|
||||||
WHERE
|
|
||||||
project_id = $1 AND
|
|
||||||
interval_start >= $2 AND
|
|
||||||
interval_start < $3 AND
|
|
||||||
action = $4
|
|
||||||
GROUP BY
|
|
||||||
bucket_name
|
|
||||||
) AS rollups ON
|
|
||||||
t.bucket_name = rollups.bucket_name
|
|
||||||
LEFT JOIN bucket_metainfos AS m ON
|
|
||||||
m.project_id = $1 AND
|
|
||||||
m.name = COALESCE(t.bucket_name, rollups.bucket_name)
|
|
||||||
WHERE
|
|
||||||
(t.project_id IS NULL OR t.project_id = $1) AND
|
|
||||||
COALESCE(t.interval_start, rollups.interval_start) >= $2 AND
|
|
||||||
COALESCE(t.interval_start, rollups.interval_start) < $3
|
|
||||||
) AS q` + db.db.impl.AsOfSystemInterval(-10) + ` ORDER BY bucket_name, interval_start DESC`)
|
|
||||||
|
|
||||||
usages = make(map[string]accounting.ProjectUsage)
|
|
||||||
|
|
||||||
storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], since, before, pb.PieceAction_GET)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var prevTallyForBucket = make(map[string]*accounting.BucketStorageTally)
|
|
||||||
var recentBucket string
|
|
||||||
|
|
||||||
for storageTalliesRows.Next() {
|
storageQuery := db.db.Rebind(`
|
||||||
tally := accounting.BucketStorageTally{}
|
SELECT
|
||||||
var userAgent []byte
|
bucket_storage_tallies.interval_start,
|
||||||
var inline, remote, egress int64
|
bucket_storage_tallies.total_bytes,
|
||||||
err = storageTalliesRows.Scan(&tally.BucketName, &tally.IntervalStart, &tally.TotalBytes, &inline, &remote, &tally.TotalSegmentCount, &tally.ObjectCount, &userAgent, &egress)
|
bucket_storage_tallies.inline,
|
||||||
if err != nil {
|
bucket_storage_tallies.remote,
|
||||||
return nil, errs.Combine(err, storageTalliesRows.Close())
|
bucket_storage_tallies.total_segments_count,
|
||||||
|
bucket_storage_tallies.object_count
|
||||||
|
FROM
|
||||||
|
bucket_storage_tallies
|
||||||
|
WHERE
|
||||||
|
bucket_storage_tallies.project_id = ? AND
|
||||||
|
bucket_storage_tallies.bucket_name = ? AND
|
||||||
|
bucket_storage_tallies.interval_start >= ? AND
|
||||||
|
bucket_storage_tallies.interval_start < ?
|
||||||
|
ORDER BY bucket_storage_tallies.interval_start DESC
|
||||||
|
`)
|
||||||
|
|
||||||
|
totalEgressQuery := db.db.Rebind(`
|
||||||
|
SELECT
|
||||||
|
COALESCE(SUM(settled) + SUM(inline), 0)
|
||||||
|
FROM
|
||||||
|
bucket_bandwidth_rollups
|
||||||
|
WHERE
|
||||||
|
project_id = ? AND
|
||||||
|
bucket_name = ? AND
|
||||||
|
interval_start >= ? AND
|
||||||
|
interval_start < ? AND
|
||||||
|
action = ?;
|
||||||
|
`)
|
||||||
|
|
||||||
|
usages = make(map[string]accounting.ProjectUsage)
|
||||||
|
|
||||||
|
for _, bucket := range bucketNames {
|
||||||
|
userAgentRow, err := db.db.Get_BucketMetainfo_UserAgent_By_ProjectId_And_Name(ctx,
|
||||||
|
dbx.BucketMetainfo_ProjectId(projectID[:]),
|
||||||
|
dbx.BucketMetainfo_Name([]byte(bucket)))
|
||||||
|
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var partner string
|
var partner string
|
||||||
if userAgent != nil {
|
if userAgentRow != nil && userAgentRow.UserAgent != nil {
|
||||||
entries, err := useragent.ParseEntries(userAgent)
|
entries, err := useragent.ParseEntries(userAgentRow.UserAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -623,35 +611,40 @@ func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, proje
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := usages[partner]; !ok {
|
if _, ok := usages[partner]; !ok {
|
||||||
usages[partner] = accounting.ProjectUsage{Since: since, Before: before}
|
usages[partner] = accounting.ProjectUsage{Since: since, Before: before}
|
||||||
}
|
}
|
||||||
usage := usages[partner]
|
usage := usages[partner]
|
||||||
|
|
||||||
|
storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var prevTally *accounting.BucketStorageTally
|
||||||
|
for storageTalliesRows.Next() {
|
||||||
|
tally := accounting.BucketStorageTally{}
|
||||||
|
|
||||||
|
var inline, remote int64
|
||||||
|
err = storageTalliesRows.Scan(&tally.IntervalStart, &tally.TotalBytes, &inline, &remote, &tally.TotalSegmentCount, &tally.ObjectCount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.Combine(err, storageTalliesRows.Close())
|
||||||
|
}
|
||||||
if tally.TotalBytes == 0 {
|
if tally.TotalBytes == 0 {
|
||||||
tally.TotalBytes = inline + remote
|
tally.TotalBytes = inline + remote
|
||||||
}
|
}
|
||||||
|
|
||||||
if tally.BucketName != recentBucket {
|
if prevTally == nil {
|
||||||
usage.Egress += egress
|
prevTally = &tally
|
||||||
recentBucket = tally.BucketName
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := prevTallyForBucket[tally.BucketName]; !ok {
|
|
||||||
prevTallyForBucket[tally.BucketName] = &tally
|
|
||||||
usages[partner] = usage
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hours := prevTallyForBucket[tally.BucketName].IntervalStart.Sub(tally.IntervalStart).Hours()
|
hours := prevTally.IntervalStart.Sub(tally.IntervalStart).Hours()
|
||||||
usage.Storage += memory.Size(tally.TotalBytes).Float64() * hours
|
usage.Storage += memory.Size(tally.TotalBytes).Float64() * hours
|
||||||
usage.SegmentCount += float64(tally.TotalSegmentCount) * hours
|
usage.SegmentCount += float64(tally.TotalSegmentCount) * hours
|
||||||
usage.ObjectCount += float64(tally.ObjectCount) * hours
|
usage.ObjectCount += float64(tally.ObjectCount) * hours
|
||||||
|
|
||||||
usages[partner] = usage
|
prevTally = &tally
|
||||||
|
|
||||||
prevTallyForBucket[tally.BucketName] = &tally
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
|
err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
|
||||||
@ -659,6 +652,20 @@ func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, proje
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
totalEgressRow := db.db.QueryRowContext(ctx, totalEgressQuery, projectID[:], []byte(bucket), since, before, pb.PieceAction_GET)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var egress int64
|
||||||
|
if err = totalEgressRow.Scan(&egress); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
usage.Egress += egress
|
||||||
|
|
||||||
|
usages[partner] = usage
|
||||||
|
}
|
||||||
|
|
||||||
return usages, nil
|
return usages, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,31 +207,31 @@ func Test_GetProjectTotal(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
const epsilon = 1e-8
|
const epsilon = 1e-8
|
||||||
require.InDelta(t, float64(tallies[0].Bytes()+tallies[1].Bytes()), usage.Storage, epsilon)
|
require.InDelta(t, usage.Storage, float64(tallies[0].Bytes()+tallies[1].Bytes()), epsilon)
|
||||||
require.InDelta(t, float64(tallies[0].TotalSegmentCount+tallies[1].TotalSegmentCount), usage.SegmentCount, epsilon)
|
require.InDelta(t, usage.SegmentCount, float64(tallies[0].TotalSegmentCount+tallies[1].TotalSegmentCount), epsilon)
|
||||||
require.InDelta(t, float64(tallies[0].ObjectCount+tallies[1].ObjectCount), usage.ObjectCount, epsilon)
|
require.InDelta(t, usage.ObjectCount, float64(tallies[0].ObjectCount+tallies[1].ObjectCount), epsilon)
|
||||||
require.Equal(t, expectedEgress, usage.Egress)
|
require.Equal(t, usage.Egress, expectedEgress)
|
||||||
require.Equal(t, tallies[0].IntervalStart, usage.Since)
|
require.Equal(t, usage.Since, tallies[0].IntervalStart)
|
||||||
require.Equal(t, tallies[2].IntervalStart.Add(time.Minute), usage.Before)
|
require.Equal(t, usage.Before, tallies[2].IntervalStart.Add(time.Minute))
|
||||||
|
|
||||||
// Ensure that GetProjectTotal treats the 'before' arg as exclusive
|
// Ensure that GetProjectTotal treats the 'before' arg as exclusive
|
||||||
usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, tallies[0].IntervalStart, tallies[2].IntervalStart)
|
usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, tallies[0].IntervalStart, tallies[2].IntervalStart)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.InDelta(t, float64(tallies[0].Bytes()), usage.Storage, epsilon)
|
require.InDelta(t, usage.Storage, float64(tallies[0].Bytes()), epsilon)
|
||||||
require.InDelta(t, float64(tallies[0].TotalSegmentCount), usage.SegmentCount, epsilon)
|
require.InDelta(t, usage.SegmentCount, float64(tallies[0].TotalSegmentCount), epsilon)
|
||||||
require.InDelta(t, float64(tallies[0].ObjectCount), usage.ObjectCount, epsilon)
|
require.InDelta(t, usage.ObjectCount, float64(tallies[0].ObjectCount), epsilon)
|
||||||
require.Equal(t, expectedEgress, usage.Egress)
|
require.Equal(t, usage.Egress, expectedEgress)
|
||||||
require.Equal(t, tallies[0].IntervalStart, usage.Since)
|
require.Equal(t, usage.Since, tallies[0].IntervalStart)
|
||||||
require.Equal(t, tallies[2].IntervalStart, usage.Before)
|
require.Equal(t, usage.Before, tallies[2].IntervalStart)
|
||||||
|
|
||||||
usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, rollups[0].IntervalStart, rollups[1].IntervalStart)
|
usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, rollups[0].IntervalStart, rollups[1].IntervalStart)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Zero(t, usage.Storage)
|
require.Zero(t, usage.Storage)
|
||||||
require.Zero(t, usage.SegmentCount)
|
require.Zero(t, usage.SegmentCount)
|
||||||
require.Zero(t, usage.ObjectCount)
|
require.Zero(t, usage.ObjectCount)
|
||||||
require.Equal(t, rollups[0].Inline+rollups[0].Settled, usage.Egress)
|
require.Equal(t, usage.Egress, rollups[0].Inline+rollups[0].Settled)
|
||||||
require.Equal(t, rollups[0].IntervalStart, usage.Since)
|
require.Equal(t, usage.Since, rollups[0].IntervalStart)
|
||||||
require.Equal(t, rollups[1].IntervalStart, usage.Before)
|
require.Equal(t, usage.Before, rollups[1].IntervalStart)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
5
scripts/testdata/satellite-config.yaml.lock
vendored
@ -2,7 +2,7 @@
|
|||||||
# account-freeze.enabled: false
|
# account-freeze.enabled: false
|
||||||
|
|
||||||
# How long to wait between a warning event and freezing an account.
|
# How long to wait between a warning event and freezing an account.
|
||||||
# account-freeze.grace-period: 720h0m0s
|
# account-freeze.grace-period: 360h0m0s
|
||||||
|
|
||||||
# How often to run this chore, which is how often unpaid invoices are checked.
|
# How often to run this chore, which is how often unpaid invoices are checked.
|
||||||
# account-freeze.interval: 24h0m0s
|
# account-freeze.interval: 24h0m0s
|
||||||
@ -1099,6 +1099,9 @@ server.private-address: 127.0.0.1:7778
|
|||||||
# how large of batches SaveRollup should process at a time
|
# how large of batches SaveRollup should process at a time
|
||||||
# tally.save-rollup-batch-size: 1000
|
# tally.save-rollup-batch-size: 1000
|
||||||
|
|
||||||
|
# how large should be insert into tallies
|
||||||
|
# tally.save-tallies-batch-size: 10000
|
||||||
|
|
||||||
# whether to enable node tally with ranged loop
|
# whether to enable node tally with ranged loop
|
||||||
# tally.use-ranged-loop: true
|
# tally.use-ranged-loop: true
|
||||||
|
|
||||||
|
@ -83,9 +83,9 @@ const icon = computed((): string => ObjectType.findIcon(props.itemType));
|
|||||||
const customIconClasses = computed(() => {
|
const customIconClasses = computed(() => {
|
||||||
const classes = {};
|
const classes = {};
|
||||||
if (props.itemType === 'project') {
|
if (props.itemType === 'project') {
|
||||||
if (props.item['owner']) {
|
if (props.item['role'] === ProjectRole.Owner) {
|
||||||
classes['project-owner'] = true;
|
classes['project-owner'] = true;
|
||||||
} else {
|
} else if (props.item['role'] === ProjectRole.Member) {
|
||||||
classes['project-member'] = true;
|
classes['project-member'] = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -215,6 +215,7 @@ import { useNotify } from '@/utils/hooks';
|
|||||||
import { useUsersStore } from '@/store/modules/usersStore';
|
import { useUsersStore } from '@/store/modules/usersStore';
|
||||||
import { useProjectsStore } from '@/store/modules/projectsStore';
|
import { useProjectsStore } from '@/store/modules/projectsStore';
|
||||||
import { useConfigStore } from '@/store/modules/configStore';
|
import { useConfigStore } from '@/store/modules/configStore';
|
||||||
|
import { RouteConfig } from '@/types/router';
|
||||||
|
|
||||||
import VButton from '@/components/common/VButton.vue';
|
import VButton from '@/components/common/VButton.vue';
|
||||||
|
|
||||||
@ -636,6 +637,21 @@ onMounted(async (): Promise<void> => {
|
|||||||
const projectID = projectsStore.state.selectedProject.id;
|
const projectID = projectsStore.state.selectedProject.id;
|
||||||
if (!projectID) return;
|
if (!projectID) return;
|
||||||
|
|
||||||
|
if (projectsStore.state.selectedProject.ownerId !== usersStore.state.user.id) {
|
||||||
|
await router.replace(configStore.state.config.allProjectsDashboard ? RouteConfig.AllProjectsDashboard : RouteConfig.ProjectDashboard.path);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
projectsStore.$onAction(({ name, after }) => {
|
||||||
|
if (name === 'selectProject') {
|
||||||
|
after((_) => {
|
||||||
|
if (projectsStore.state.selectedProject.ownerId !== usersStore.state.user.id) {
|
||||||
|
router.replace(RouteConfig.ProjectDashboard.path);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
if (usersStore.state.user.paidTier) {
|
if (usersStore.state.user.paidTier) {
|
||||||
isPaidTier.value = true;
|
isPaidTier.value = true;
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,7 @@ const icon = computed((): string => {
|
|||||||
border: 1px solid var(--c-yellow-2);
|
border: 1px solid var(--c-yellow-2);
|
||||||
border-radius: 24px;
|
border-radius: 24px;
|
||||||
color: var(--c-yellow-5);
|
color: var(--c-yellow-5);
|
||||||
|
background: var(--c-white);
|
||||||
|
|
||||||
:deep(path) {
|
:deep(path) {
|
||||||
fill: var(--c-yellow-5);
|
fill: var(--c-yellow-5);
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div v-if="isDropdownOpen" v-click-outside="closeDropDown" class="project-item__menu__dropdown">
|
<div v-if="isDropdownOpen" v-click-outside="closeDropDown" class="project-item__menu__dropdown">
|
||||||
<div class="project-item__menu__dropdown__item" @click.stop="goToProjectEdit">
|
<div v-if="isOwner" class="project-item__menu__dropdown__item" @click.stop="goToProjectEdit">
|
||||||
<gear-icon />
|
<gear-icon />
|
||||||
<p class="project-item__menu__dropdown__item__label">Project settings</p>
|
<p class="project-item__menu__dropdown__item__label">Project settings</p>
|
||||||
</div>
|
</div>
|
||||||
|
1
web/storagenode/dist/index.html
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
<!doctype html><html lang="en"><head><meta charset="utf-8"><link rel="apple-touch-icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAL8SURBVHgBxVcxTFNRFL01JvylrRsmNWETWWSybg5I3NTQBCdRFyfrDuoKMgMyOQgdTcDAJmrUrWWCgQIbhJKy0TJQpsc73P/S9/tfP+8/GnqSn1fo77/3nnPufe8nhAR1ETeoy7hJjpj93/ycu0+UuUVuEA5Y2xaifirEVpX/nvknnBFbgpMGUfmIKOkRLRT578oxXy6IJcFCialH0EyaaPoZBy7tEQ3NEY1IKd4/iidHwqYLijLA559cuY6dT0RjBU5AAYm9fiivLFnBKMGBTyeqQ4BXhXDwdqjUiKZkskOzREsbzeeBNRMCEiDgr12uYl1WNbnW/oc2iUys8jrQyyxhHRkM3hdgAMFBHQyGG/GDqyDlsSeS/npQC99jlEBpOnyX2XCF8sGhZLbeMLMZkCDbJ1nYYTfDeMP9fMH5y5vmIKYE8RxUjBXPedDH1Zu6I9QFSzLQxErz4Xn5oNwg+2NSmuv3Lkvz4QlTi8rupDlBmA6tqQLrnYNCvoxSNAOtUEaakwzMv+ALidTP2OlKKiSK75Cs6hy9NYFkjzmG1SBCIuUq0Za8pgydge8R9E+e10qNrGE1ikH5435mo11bQgr4B9LEgVUC0Npm1o+vcuvBxB1NYFsaaeC2XUuW/Xs7msC9Xqa+MMa9jQr1KtXAQoKYHakeskbIhDrVasdTbbVY4s8ZYld/9PWuyeTSHksFBjBFcZ+aH/j/yZk5gcAcgImgIX6MNsKKhKBta1sB2A3HV5pD6iJQIzw/MICwoohc1F6ALBH03XemFYPl+VdzcBNUh6j5gZZEcP341opAAnX/AXl/A0FlrrshgMRR+YUvPPN8CHgAxlqWVYuEdH7V/ZilA6cosFDa53EcmUDKC+7X+IwxHEVhO0DK6aeXH88uHcWQA8xE7Yg69M6xgdWZUEFtNNDyx1s2KnyDIxu22zdZTjgWhANm/vL6clGIsnw3+Fbk94RreS8AMGrBxvwoT0lMPnSNC2JJoAPdgnMBJLjKq5lzAp1C19+OzwFiYzAU5f7eeQAAAABJRU5ErkJggg==" type="image/x-icon"><meta name="apple-mobile-web-app-capable" content="yes"><meta name="apple-mobile-web-app-status-bar-style" content="black"><meta name="viewport" content="width=device-width,initial-scale=1,viewport-fit=cover"><meta name="description" content="Node Dashboard page"><link rel="shortcut icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAL8SURBVHgBxVcxTFNRFL01JvylrRsmNWETWWSybg5I3NTQBCdRFyfrDuoKMgMyOQgdTcDAJmrUrWWCgQIbhJKy0TJQpsc73P/S9/tfP+8/GnqSn1fo77/3nnPufe8nhAR1ETeoy7hJjpj93/ycu0+UuUVuEA5Y2xaifirEVpX/nvknnBFbgpMGUfmIKOkRLRT578oxXy6IJcFCialH0EyaaPoZBy7tEQ3NEY1IKd4/iidHwqYLijLA559cuY6dT0RjBU5AAYm9fiivLFnBKMGBTyeqQ4BXhXDwdqjUiKZkskOzREsbzeeBNRMCEiDgr12uYl1WNbnW/oc2iUys8jrQyyxhHRkM3hdgAMFBHQyGG/GDqyDlsSeS/npQC99jlEBpOnyX2XCF8sGhZLbeMLMZkCDbJ1nYYTfDeMP9fMH5y5vmIKYE8RxUjBXPedDH1Zu6I9QFSzLQxErz4Xn5oNwg+2NSmuv3Lkvz4QlTi8rupDlBmA6tqQLrnYNCvoxSNAOtUEaakwzMv+ALidTP2OlKKiSK75Cs6hy9NYFkjzmG1SBCIuUq0Za8pgydge8R9E+e10qNrGE1ikH5435mo11bQgr4B9LEgVUC0Npm1o+vcuvBxB1NYFsaaeC2XUuW/Xs7msC9Xqa+MMa9jQr1KtXAQoKYHakeskbIhDrVasdTbbVY4s8ZYld/9PWuyeTSHksFBjBFcZ+aH/j/yZk5gcAcgImgIX6MNsKKhKBta1sB2A3HV5pD6iJQIzw/MICwoohc1F6ALBH03XemFYPl+VdzcBNUh6j5gZZEcP341opAAnX/AXl/A0FlrrshgMRR+YUvPPN8CHgAxlqWVYuEdH7V/ZilA6cosFDa53EcmUDKC+7X+IwxHEVhO0DK6aeXH88uHcWQA8xE7Yg69M6xgdWZUEFtNNDyx1s2KnyDIxu22zdZTjgWhANm/vL6clGIsnw3+Fbk94RreS8AMGrBxvwoT0lMPnSNC2JJoAPdgnMBJLjKq5lzAp1C19+OzwFiYzAU5f7eeQAAAABJRU5ErkJggg==" type="image/x-icon"><title>Node Dashboard</title><script defer="defer" src="/static/js/chunk-vendors.2eeef822.js"></script><script defer="defer" src="/static/js/app.c1e19b0f.js"></script><link href="/static/css/app.5e9416d0.css" rel="stylesheet"></head><body><div id="app"></div></body></html>
|
1
web/storagenode/dist/static/css/app.5e9416d0.css
vendored
Normal file
BIN
web/storagenode/dist/static/fonts/font_bold.7810ae44.ttf
vendored
Normal file
BIN
web/storagenode/dist/static/fonts/font_medium.e5f19437.ttf
vendored
Normal file
BIN
web/storagenode/dist/static/fonts/font_regular.a5cd2d13.ttf
vendored
Normal file
BIN
web/storagenode/dist/static/img/404.4cd5d2c6.png
vendored
Normal file
After Width: | Height: | Size: 29 KiB |
BIN
web/storagenode/dist/static/img/404Dark.5c412c23.png
vendored
Normal file
After Width: | Height: | Size: 29 KiB |
BIN
web/storagenode/dist/static/img/BlurredChecks.9caa6897.png
vendored
Normal file
After Width: | Height: | Size: 22 KiB |
BIN
web/storagenode/dist/static/img/BlurredChecksDark.f0134f4e.png
vendored
Normal file
After Width: | Height: | Size: 127 KiB |
BIN
web/storagenode/dist/static/img/EmptyState.9614d68f.png
vendored
Normal file
After Width: | Height: | Size: 9.1 KiB |
BIN
web/storagenode/dist/static/img/EmptyStateLarge.665f645b.png
vendored
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
web/storagenode/dist/static/img/NoData.6e81f145.png
vendored
Normal file
After Width: | Height: | Size: 23 KiB |