2019-05-10 20:05:42 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
2020-07-14 14:04:38 +01:00
|
|
|
"errors"
|
2019-11-28 18:45:31 +00:00
|
|
|
"fmt"
|
2019-05-10 20:05:42 +01:00
|
|
|
"time"
|
|
|
|
|
2022-09-29 14:24:22 +01:00
|
|
|
pgxerrcode "github.com/jackc/pgerrcode"
|
2023-06-14 13:42:43 +01:00
|
|
|
"github.com/jackc/pgx/v5"
|
2019-11-15 14:27:44 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-05-10 20:05:42 +01:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
2023-02-23 16:27:37 +00:00
|
|
|
"storj.io/common/useragent"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil"
|
|
|
|
"storj.io/private/dbutil/pgutil"
|
2022-09-29 14:24:22 +01:00
|
|
|
"storj.io/private/dbutil/pgutil/pgerrcode"
|
2021-12-09 15:05:21 +00:00
|
|
|
"storj.io/private/dbutil/pgxutil"
|
2023-05-19 23:51:54 +01:00
|
|
|
"storj.io/private/tagsql"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/accounting"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2020-11-30 19:34:42 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
2020-01-15 02:29:51 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/dbx"
|
2019-05-10 20:05:42 +01:00
|
|
|
)
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
// ensure that ProjectAccounting implements accounting.ProjectAccounting.
|
|
|
|
var _ accounting.ProjectAccounting = (*ProjectAccounting)(nil)
|
|
|
|
|
2021-05-25 14:12:01 +01:00
|
|
|
var allocatedExpirationInDays = 2
|
|
|
|
|
2020-07-06 21:15:55 +01:00
|
|
|
// ProjectAccounting implements the accounting/db ProjectAccounting interface.
|
2019-05-10 20:05:42 +01:00
|
|
|
type ProjectAccounting struct {
|
2019-12-14 02:29:54 +00:00
|
|
|
db *satelliteDB
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2020-07-06 21:15:55 +01:00
|
|
|
// SaveTallies saves the latest bucket info.
|
2020-08-31 11:14:20 +01:00
|
|
|
func (db *ProjectAccounting) SaveTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[metabase.BucketLocation]*accounting.BucketTally) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
if len(bucketTallies) == 0 {
|
2019-09-12 18:31:50 +01:00
|
|
|
return nil
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2020-01-15 20:35:08 +00:00
|
|
|
var bucketNames, projectIDs [][]byte
|
2021-07-01 12:29:25 +01:00
|
|
|
var totalBytes, metadataSizes []int64
|
|
|
|
var totalSegments, objectCounts []int64
|
2020-01-15 20:35:08 +00:00
|
|
|
for _, info := range bucketTallies {
|
2020-08-31 11:14:20 +01:00
|
|
|
bucketNames = append(bucketNames, []byte(info.BucketName))
|
2020-01-15 20:35:08 +00:00
|
|
|
projectIDs = append(projectIDs, info.ProjectID[:])
|
2021-06-30 10:58:26 +01:00
|
|
|
totalBytes = append(totalBytes, info.TotalBytes)
|
|
|
|
totalSegments = append(totalSegments, info.TotalSegments)
|
2020-06-28 04:56:29 +01:00
|
|
|
objectCounts = append(objectCounts, info.ObjectCount)
|
|
|
|
metadataSizes = append(metadataSizes, info.MetadataSize)
|
2020-01-15 20:35:08 +00:00
|
|
|
}
|
|
|
|
_, err = db.db.DB.ExecContext(ctx, db.db.Rebind(`
|
|
|
|
INSERT INTO bucket_storage_tallies (
|
|
|
|
interval_start,
|
|
|
|
bucket_name, project_id,
|
2021-06-30 10:58:26 +01:00
|
|
|
total_bytes, inline, remote,
|
|
|
|
total_segments_count, remote_segments_count, inline_segments_count,
|
2020-01-15 20:35:08 +00:00
|
|
|
object_count, metadata_size)
|
|
|
|
SELECT
|
|
|
|
$1,
|
|
|
|
unnest($2::bytea[]), unnest($3::bytea[]),
|
2021-07-01 12:29:25 +01:00
|
|
|
unnest($4::int8[]), $5, $6,
|
|
|
|
unnest($7::int8[]), $8, $9,
|
2021-06-30 10:58:26 +01:00
|
|
|
unnest($10::int8[]), unnest($11::int8[])`),
|
2020-01-15 20:35:08 +00:00
|
|
|
intervalStart,
|
2020-06-28 04:56:29 +01:00
|
|
|
pgutil.ByteaArray(bucketNames), pgutil.ByteaArray(projectIDs),
|
2021-07-01 12:29:25 +01:00
|
|
|
pgutil.Int8Array(totalBytes), 0, 0,
|
|
|
|
pgutil.Int8Array(totalSegments), 0, 0,
|
2020-06-28 04:56:29 +01:00
|
|
|
pgutil.Int8Array(objectCounts), pgutil.Int8Array(metadataSizes))
|
2019-05-10 20:05:42 +01:00
|
|
|
|
2020-01-15 20:35:08 +00:00
|
|
|
return Error.Wrap(err)
|
2019-09-12 18:31:50 +01:00
|
|
|
}
|
|
|
|
|
2022-04-20 11:25:54 +01:00
|
|
|
// GetTallies retrieves all tallies ordered by interval start (descending).
|
2019-09-12 18:31:50 +01:00
|
|
|
func (db *ProjectAccounting) GetTallies(ctx context.Context) (tallies []accounting.BucketTally, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2022-04-20 11:25:54 +01:00
|
|
|
dbxTallies, err := db.db.All_BucketStorageTally_OrderBy_Desc_IntervalStart(ctx)
|
2019-08-13 23:13:56 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2019-09-12 18:31:50 +01:00
|
|
|
|
|
|
|
for _, dbxTally := range dbxTallies {
|
2020-03-31 17:49:16 +01:00
|
|
|
projectID, err := uuid.FromBytes(dbxTally.ProjectId)
|
2019-09-13 14:51:41 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
totalBytes := dbxTally.TotalBytes
|
|
|
|
if totalBytes == 0 {
|
|
|
|
totalBytes = dbxTally.Inline + dbxTally.Remote
|
|
|
|
}
|
|
|
|
|
|
|
|
totalSegments := dbxTally.TotalSegmentsCount
|
|
|
|
if totalSegments == 0 {
|
|
|
|
totalSegments = dbxTally.InlineSegmentsCount + dbxTally.RemoteSegmentsCount
|
|
|
|
}
|
|
|
|
|
2019-09-12 18:31:50 +01:00
|
|
|
tallies = append(tallies, accounting.BucketTally{
|
2020-08-31 11:14:20 +01:00
|
|
|
BucketLocation: metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: string(dbxTally.BucketName),
|
|
|
|
},
|
2021-07-01 12:29:25 +01:00
|
|
|
ObjectCount: int64(dbxTally.ObjectCount),
|
|
|
|
TotalSegments: int64(totalSegments),
|
|
|
|
TotalBytes: int64(totalBytes),
|
|
|
|
MetadataSize: int64(dbxTally.MetadataSize),
|
2019-09-12 18:31:50 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return tallies, nil
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2020-07-06 21:15:55 +01:00
|
|
|
// CreateStorageTally creates a record in the bucket_storage_tallies accounting table.
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ProjectAccounting) CreateStorageTally(ctx context.Context, tally accounting.BucketStorageTally) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-09-12 18:31:50 +01:00
|
|
|
|
2021-06-30 10:58:26 +01:00
|
|
|
_, err = db.db.DB.ExecContext(ctx, db.db.Rebind(`
|
|
|
|
INSERT INTO bucket_storage_tallies (
|
|
|
|
interval_start,
|
|
|
|
bucket_name, project_id,
|
|
|
|
total_bytes, inline, remote,
|
|
|
|
total_segments_count, remote_segments_count, inline_segments_count,
|
|
|
|
object_count, metadata_size)
|
|
|
|
VALUES (
|
|
|
|
?,
|
|
|
|
?, ?,
|
|
|
|
?, ?, ?,
|
|
|
|
?, ?, ?,
|
|
|
|
?, ?
|
|
|
|
)`), tally.IntervalStart,
|
|
|
|
[]byte(tally.BucketName), tally.ProjectID,
|
2021-07-01 12:29:25 +01:00
|
|
|
tally.TotalBytes, 0, 0,
|
|
|
|
tally.TotalSegmentCount, 0, 0,
|
2021-06-30 10:58:26 +01:00
|
|
|
tally.ObjectCount, tally.MetadataSize,
|
|
|
|
)
|
|
|
|
|
|
|
|
return Error.Wrap(err)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2023-05-19 23:51:54 +01:00
|
|
|
// GetNonEmptyTallyBucketsInRange returns a list of bucket locations within the given range
|
|
|
|
// whose most recent tally does not represent empty usage.
|
|
|
|
func (db *ProjectAccounting) GetNonEmptyTallyBucketsInRange(ctx context.Context, from, to metabase.BucketLocation) (result []metabase.BucketLocation, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
|
|
|
SELECT project_id, name
|
|
|
|
FROM bucket_metainfos bm
|
|
|
|
WHERE (project_id, name) BETWEEN ($1, $2) AND ($3, $4)
|
|
|
|
AND NOT 0 IN (
|
|
|
|
SELECT object_count FROM bucket_storage_tallies
|
|
|
|
WHERE (project_id, bucket_name) = (bm.project_id, bm.name)
|
|
|
|
ORDER BY interval_start DESC
|
|
|
|
LIMIT 1
|
|
|
|
)
|
2023-06-20 09:53:42 +01:00
|
|
|
`, from.ProjectID, []byte(from.BucketName), to.ProjectID, []byte(to.BucketName)),
|
2023-05-19 23:51:54 +01:00
|
|
|
)(func(r tagsql.Rows) error {
|
|
|
|
for r.Next() {
|
|
|
|
loc := metabase.BucketLocation{}
|
|
|
|
if err := r.Scan(&loc.ProjectID, &loc.BucketName); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
result = append(result, loc)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 14:15:08 +00:00
|
|
|
// GetProjectSettledBandwidthTotal returns the sum of GET bandwidth usage settled for a projectID in the past time frame.
|
|
|
|
func (db *ProjectAccounting) GetProjectSettledBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
var sum *int64
|
2023-01-11 14:15:08 +00:00
|
|
|
query := `SELECT SUM(settled) FROM bucket_bandwidth_rollups WHERE project_id = $1 AND action = $2 AND interval_start >= $3;`
|
2020-04-01 09:47:29 +01:00
|
|
|
err = db.db.QueryRow(ctx, db.db.Rebind(query), projectID[:], pb.PieceAction_GET, from.UTC()).Scan(&sum)
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) || sum == nil {
|
2019-05-10 20:05:42 +01:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return *sum, err
|
|
|
|
}
|
|
|
|
|
2021-05-25 14:12:01 +01:00
|
|
|
// GetProjectBandwidth returns the used bandwidth (settled or allocated) for the specified year, month and day.
|
2021-05-25 18:43:47 +01:00
|
|
|
func (db *ProjectAccounting) GetProjectBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int, asOfSystemInterval time.Duration) (_ int64, err error) {
|
2020-05-01 14:24:12 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var egress *int64
|
|
|
|
|
2021-05-25 14:12:01 +01:00
|
|
|
startOfMonth := time.Date(year, month, 1, 0, 0, 0, 0, time.UTC)
|
2020-05-01 14:24:12 +01:00
|
|
|
|
2021-05-25 14:12:01 +01:00
|
|
|
var expiredSince time.Time
|
|
|
|
if day < allocatedExpirationInDays {
|
|
|
|
expiredSince = startOfMonth
|
|
|
|
} else {
|
2021-06-03 10:09:25 +01:00
|
|
|
expiredSince = time.Date(year, month, day-allocatedExpirationInDays, 0, 0, 0, 0, time.UTC)
|
2021-05-25 14:12:01 +01:00
|
|
|
}
|
2021-06-30 13:54:12 +01:00
|
|
|
periodEnd := time.Date(year, month+1, 1, 0, 0, 0, 0, time.UTC)
|
2021-05-25 14:12:01 +01:00
|
|
|
|
2021-05-25 18:43:47 +01:00
|
|
|
query := `WITH egress AS (
|
2021-05-25 14:12:01 +01:00
|
|
|
SELECT
|
2021-06-03 10:09:25 +01:00
|
|
|
CASE WHEN interval_day < ?
|
2021-05-25 14:12:01 +01:00
|
|
|
THEN egress_settled
|
2021-05-29 23:16:12 +01:00
|
|
|
ELSE egress_allocated-egress_dead
|
2021-05-25 14:12:01 +01:00
|
|
|
END AS amount
|
|
|
|
FROM project_bandwidth_daily_rollups
|
2021-06-01 09:09:37 +01:00
|
|
|
WHERE project_id = ? AND interval_day >= ? AND interval_day < ?
|
2021-05-25 18:43:47 +01:00
|
|
|
) SELECT sum(amount) FROM egress` + db.db.impl.AsOfSystemInterval(asOfSystemInterval)
|
2021-05-25 14:12:01 +01:00
|
|
|
err = db.db.QueryRow(ctx, db.db.Rebind(query), expiredSince, projectID[:], startOfMonth, periodEnd).Scan(&egress)
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) || egress == nil {
|
2020-05-01 14:24:12 +01:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return *egress, err
|
|
|
|
}
|
|
|
|
|
2021-05-17 15:07:59 +01:00
|
|
|
// GetProjectDailyBandwidth returns project bandwidth (allocated and settled) for the specified day.
|
2021-05-29 23:16:12 +01:00
|
|
|
func (db *ProjectAccounting) GetProjectDailyBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int) (allocated int64, settled, dead int64, err error) {
|
2021-05-17 15:07:59 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
interval := time.Date(year, month, day, 0, 0, 0, 0, time.UTC)
|
|
|
|
|
2021-05-29 23:16:12 +01:00
|
|
|
query := `SELECT egress_allocated, egress_settled, egress_dead FROM project_bandwidth_daily_rollups WHERE project_id = ? AND interval_day = ?;`
|
|
|
|
err = db.db.QueryRow(ctx, db.db.Rebind(query), projectID[:], interval).Scan(&allocated, &settled, &dead)
|
2021-05-17 15:07:59 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2021-05-29 23:16:12 +01:00
|
|
|
return 0, 0, 0, nil
|
2021-05-17 15:07:59 +01:00
|
|
|
}
|
|
|
|
|
2021-05-29 23:16:12 +01:00
|
|
|
return allocated, settled, dead, err
|
2021-05-17 15:07:59 +01:00
|
|
|
}
|
|
|
|
|
2021-12-17 14:47:35 +00:00
|
|
|
// GetProjectDailyUsageByDateRange returns project daily allocated, settled bandwidth and storage usage by specific date range.
|
2021-12-09 15:05:21 +00:00
|
|
|
func (db *ProjectAccounting) GetProjectDailyUsageByDateRange(ctx context.Context, projectID uuid.UUID, from, to time.Time, crdbInterval time.Duration) (_ *accounting.ProjectDailyUsage, err error) {
|
2021-12-07 14:41:39 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2022-05-18 13:57:22 +01:00
|
|
|
now := time.Now()
|
|
|
|
nowBeginningOfDay := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
|
2022-05-16 16:47:47 +01:00
|
|
|
fromBeginningOfDay := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, time.UTC)
|
|
|
|
toEndOfDay := time.Date(to.Year(), to.Month(), to.Day(), 23, 59, 59, 0, time.UTC)
|
2021-12-09 15:05:21 +00:00
|
|
|
|
|
|
|
allocatedBandwidth := make([]accounting.ProjectUsageByDay, 0)
|
2021-12-17 14:47:35 +00:00
|
|
|
settledBandwidth := make([]accounting.ProjectUsageByDay, 0)
|
2021-12-09 15:05:21 +00:00
|
|
|
storage := make([]accounting.ProjectUsageByDay, 0)
|
|
|
|
|
|
|
|
err = pgxutil.Conn(ctx, db.db, func(conn *pgx.Conn) error {
|
|
|
|
var batch pgx.Batch
|
|
|
|
|
2022-05-18 13:57:22 +01:00
|
|
|
expiredSince := nowBeginningOfDay.Add(time.Duration(-allocatedExpirationInDays) * time.Hour * 24)
|
|
|
|
|
2021-12-09 15:05:21 +00:00
|
|
|
storageQuery := db.db.Rebind(`
|
|
|
|
WITH project_usage AS (
|
|
|
|
SELECT
|
|
|
|
interval_start,
|
|
|
|
DATE_TRUNC('day',interval_start) AS interval_day,
|
|
|
|
project_id,
|
|
|
|
bucket_name,
|
|
|
|
total_bytes
|
|
|
|
FROM bucket_storage_tallies
|
|
|
|
WHERE project_id = $1 AND
|
|
|
|
interval_start >= $2 AND
|
|
|
|
interval_start <= $3
|
|
|
|
)
|
|
|
|
-- Sum all buckets usage in the same project.
|
|
|
|
SELECT
|
|
|
|
interval_day,
|
|
|
|
SUM(total_bytes) AS total_bytes
|
|
|
|
FROM
|
2023-03-24 15:26:18 +00:00
|
|
|
(SELECT
|
2021-12-09 15:05:21 +00:00
|
|
|
DISTINCT ON (project_id, bucket_name, interval_day)
|
|
|
|
project_id,
|
|
|
|
bucket_name,
|
|
|
|
total_bytes,
|
|
|
|
interval_day,
|
|
|
|
interval_start
|
|
|
|
FROM project_usage
|
|
|
|
ORDER BY project_id, bucket_name, interval_day, interval_start DESC) pu
|
|
|
|
` + db.db.impl.AsOfSystemInterval(crdbInterval) + `
|
|
|
|
GROUP BY project_id, bucket_name, interval_day
|
|
|
|
`)
|
2022-05-16 16:47:47 +01:00
|
|
|
batch.Queue(storageQuery, projectID, fromBeginningOfDay, toEndOfDay)
|
2021-12-09 15:05:21 +00:00
|
|
|
|
2022-07-28 13:45:20 +01:00
|
|
|
batch.Queue(db.db.Rebind(`
|
|
|
|
SELECT interval_day, egress_settled,
|
|
|
|
CASE WHEN interval_day < $1
|
|
|
|
THEN egress_settled
|
2023-03-24 15:26:18 +00:00
|
|
|
ELSE egress_allocated-egress_dead
|
2022-07-28 13:45:20 +01:00
|
|
|
END AS allocated
|
|
|
|
FROM project_bandwidth_daily_rollups
|
|
|
|
WHERE project_id = $2 AND (interval_day BETWEEN $3 AND $4)
|
|
|
|
`), expiredSince, projectID, fromBeginningOfDay, toEndOfDay)
|
|
|
|
|
2021-12-09 15:05:21 +00:00
|
|
|
results := conn.SendBatch(ctx, &batch)
|
|
|
|
defer func() { err = errs.Combine(err, results.Close()) }()
|
|
|
|
|
2021-12-17 14:47:35 +00:00
|
|
|
storageRows, err := results.Query()
|
|
|
|
if err != nil {
|
2022-09-29 14:24:22 +01:00
|
|
|
if pgerrcode.FromError(err) == pgxerrcode.InvalidCatalogName {
|
|
|
|
// this error may happen if database is created in the last 5 minutes (`as of systemtime` points to a time before Genesis).
|
|
|
|
// in this case we can ignore the database not found error and return with no usage.
|
|
|
|
// if the database is really missing --> we have more serious problems than getting 0s from here.
|
|
|
|
return nil
|
|
|
|
}
|
2021-12-17 14:47:35 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-12-09 15:05:21 +00:00
|
|
|
|
2022-05-16 16:47:47 +01:00
|
|
|
var current time.Time
|
|
|
|
var index int
|
|
|
|
|
2021-12-17 14:47:35 +00:00
|
|
|
for storageRows.Next() {
|
|
|
|
var day time.Time
|
|
|
|
var amount int64
|
2021-12-09 15:05:21 +00:00
|
|
|
|
2021-12-17 14:47:35 +00:00
|
|
|
err = storageRows.Scan(&day, &amount)
|
|
|
|
if err != nil {
|
2023-01-16 21:23:30 +00:00
|
|
|
storageRows.Close()
|
2021-12-17 14:47:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-16 16:47:47 +01:00
|
|
|
if len(storage) == 0 {
|
|
|
|
current = day
|
|
|
|
storage = append(storage, accounting.ProjectUsageByDay{
|
|
|
|
Date: day.UTC(),
|
|
|
|
Value: amount,
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if current == day {
|
|
|
|
storage[index].Value += amount
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
current = day
|
|
|
|
index++
|
|
|
|
|
2021-12-17 14:47:35 +00:00
|
|
|
storage = append(storage, accounting.ProjectUsageByDay{
|
|
|
|
Date: day.UTC(),
|
|
|
|
Value: amount,
|
|
|
|
})
|
2021-12-07 14:41:39 +00:00
|
|
|
}
|
|
|
|
|
2023-01-16 21:23:30 +00:00
|
|
|
storageRows.Close()
|
2021-12-17 14:47:35 +00:00
|
|
|
err = storageRows.Err()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-12-07 14:41:39 +00:00
|
|
|
|
2022-07-28 13:45:20 +01:00
|
|
|
bandwidthRows, err := results.Query()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for bandwidthRows.Next() {
|
|
|
|
var day time.Time
|
|
|
|
var settled int64
|
|
|
|
var allocated int64
|
|
|
|
|
|
|
|
err = bandwidthRows.Scan(&day, &settled, &allocated)
|
|
|
|
if err != nil {
|
2023-01-16 21:23:30 +00:00
|
|
|
bandwidthRows.Close()
|
2022-07-28 13:45:20 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
settledBandwidth = append(settledBandwidth, accounting.ProjectUsageByDay{
|
|
|
|
Date: day.UTC(),
|
|
|
|
Value: settled,
|
|
|
|
})
|
|
|
|
|
|
|
|
allocatedBandwidth = append(allocatedBandwidth, accounting.ProjectUsageByDay{
|
|
|
|
Date: day.UTC(),
|
|
|
|
Value: allocated,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-16 21:23:30 +00:00
|
|
|
bandwidthRows.Close()
|
2022-07-28 13:45:20 +01:00
|
|
|
err = bandwidthRows.Err()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-17 14:47:35 +00:00
|
|
|
return nil
|
2021-12-09 15:05:21 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.New("unable to get project daily usage: %w", err)
|
|
|
|
}
|
2021-12-07 14:41:39 +00:00
|
|
|
|
2021-12-09 15:05:21 +00:00
|
|
|
return &accounting.ProjectDailyUsage{
|
2021-12-17 14:47:35 +00:00
|
|
|
StorageUsage: storage,
|
|
|
|
AllocatedBandwidthUsage: allocatedBandwidth,
|
|
|
|
SettledBandwidthUsage: settledBandwidth,
|
2021-12-09 15:05:21 +00:00
|
|
|
}, nil
|
2021-12-07 14:41:39 +00:00
|
|
|
}
|
|
|
|
|
2021-05-25 14:12:01 +01:00
|
|
|
// DeleteProjectBandwidthBefore deletes project bandwidth rollups before the given time.
|
|
|
|
func (db *ProjectAccounting) DeleteProjectBandwidthBefore(ctx context.Context, before time.Time) (err error) {
|
2020-07-07 15:48:09 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-05-25 14:12:01 +01:00
|
|
|
_, err = db.db.DB.ExecContext(ctx, db.db.Rebind("DELETE FROM project_bandwidth_daily_rollups WHERE interval_day < ?"), before)
|
2020-07-07 15:48:09 +01:00
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
// UpdateProjectUsageLimit updates project usage limit.
|
|
|
|
func (db *ProjectAccounting) UpdateProjectUsageLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) (err error) {
|
2019-11-25 14:18:04 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
_, err = db.db.Update_Project_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
dbx.Project_Update_Fields{
|
|
|
|
UsageLimit: dbx.Project_UsageLimit(limit.Int64()),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return err
|
2019-11-25 14:18:04 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:01:15 +01:00
|
|
|
// UpdateProjectBandwidthLimit updates project bandwidth limit.
|
|
|
|
func (db *ProjectAccounting) UpdateProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
_, err = db.db.Update_Project_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
dbx.Project_Update_Fields{
|
|
|
|
BandwidthLimit: dbx.Project_BandwidthLimit(limit.Int64()),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-03 15:06:20 +00:00
|
|
|
// UpdateProjectSegmentLimit updates project segment limit.
|
|
|
|
func (db *ProjectAccounting) UpdateProjectSegmentLimit(ctx context.Context, projectID uuid.UUID, limit int64) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
_, err = db.db.Update_Project_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
dbx.Project_Update_Fields{
|
|
|
|
SegmentLimit: dbx.Project_SegmentLimit(limit),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-11-25 14:18:04 +00:00
|
|
|
// GetProjectStorageLimit returns project storage usage limit.
|
2020-09-06 00:02:12 +01:00
|
|
|
func (db *ProjectAccounting) GetProjectStorageLimit(ctx context.Context, projectID uuid.UUID) (_ *int64, err error) {
|
2019-11-25 14:18:04 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-05-12 14:01:15 +01:00
|
|
|
|
|
|
|
row, err := db.db.Get_Project_UsageLimit_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2020-09-06 00:02:12 +01:00
|
|
|
return nil, err
|
2020-05-12 14:01:15 +01:00
|
|
|
}
|
|
|
|
|
2020-09-06 00:02:12 +01:00
|
|
|
return row.UsageLimit, nil
|
2019-11-25 14:18:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetProjectBandwidthLimit returns project bandwidth usage limit.
|
2020-09-06 00:02:12 +01:00
|
|
|
func (db *ProjectAccounting) GetProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID) (_ *int64, err error) {
|
2019-11-25 14:18:04 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-05-12 14:01:15 +01:00
|
|
|
row, err := db.db.Get_Project_BandwidthLimit_By_Id(ctx,
|
2019-12-10 16:12:49 +00:00
|
|
|
dbx.Project_Id(projectID[:]),
|
2019-11-25 14:18:04 +00:00
|
|
|
)
|
2019-05-28 16:36:52 +01:00
|
|
|
if err != nil {
|
2020-09-06 00:02:12 +01:00
|
|
|
return nil, err
|
2019-05-28 16:36:52 +01:00
|
|
|
}
|
2019-11-25 14:18:04 +00:00
|
|
|
|
2020-09-06 00:02:12 +01:00
|
|
|
return row.BandwidthLimit, nil
|
2019-05-28 16:36:52 +01:00
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2023-03-30 11:58:25 +01:00
|
|
|
// GetProjectObjectsSegments returns project objects and segments number.
|
|
|
|
func (db *ProjectAccounting) GetProjectObjectsSegments(ctx context.Context, projectID uuid.UUID) (objectsSegments accounting.ProjectObjectsSegments, err error) {
|
2021-11-17 15:32:34 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var latestDate time.Time
|
2023-03-30 11:58:25 +01:00
|
|
|
latestDateRow := db.db.QueryRowContext(ctx, db.db.Rebind(`
|
2023-04-17 09:24:10 +01:00
|
|
|
SELECT interval_start FROM bucket_storage_tallies bst
|
2023-03-30 11:58:25 +01:00
|
|
|
WHERE
|
2023-04-17 09:24:10 +01:00
|
|
|
project_id = ?
|
|
|
|
AND EXISTS (SELECT 1 FROM bucket_metainfos bm WHERE bm.project_id = bst.project_id)
|
2023-03-30 11:58:25 +01:00
|
|
|
ORDER BY interval_start DESC
|
|
|
|
LIMIT 1
|
|
|
|
`), projectID[:])
|
2021-11-17 15:32:34 +00:00
|
|
|
if err = latestDateRow.Scan(&latestDate); err != nil {
|
2023-03-30 11:58:25 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return accounting.ProjectObjectsSegments{}, nil
|
|
|
|
}
|
|
|
|
return accounting.ProjectObjectsSegments{}, err
|
2021-11-17 15:32:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// calculate total segments and objects count.
|
|
|
|
storageTalliesRows := db.db.QueryRowContext(ctx, db.db.Rebind(`
|
|
|
|
SELECT
|
|
|
|
SUM(total_segments_count),
|
|
|
|
SUM(object_count)
|
|
|
|
FROM
|
|
|
|
bucket_storage_tallies
|
|
|
|
WHERE
|
2021-12-03 15:06:20 +00:00
|
|
|
project_id = ? AND
|
2021-11-17 15:32:34 +00:00
|
|
|
interval_start = ?
|
|
|
|
`), projectID[:], latestDate)
|
|
|
|
if err = storageTalliesRows.Scan(&objectsSegments.SegmentCount, &objectsSegments.ObjectCount); err != nil {
|
2023-03-30 11:58:25 +01:00
|
|
|
return accounting.ProjectObjectsSegments{}, err
|
2021-11-17 15:32:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return objectsSegments, nil
|
|
|
|
}
|
|
|
|
|
2021-12-03 15:06:20 +00:00
|
|
|
// GetProjectSegmentLimit returns project segment limit.
|
|
|
|
func (db *ProjectAccounting) GetProjectSegmentLimit(ctx context.Context, projectID uuid.UUID) (_ *int64, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
row, err := db.db.Get_Project_SegmentLimit_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return row.SegmentLimit, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
// GetProjectTotal retrieves project usage for a given period.
|
2023-02-23 16:27:37 +00:00
|
|
|
func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ *accounting.ProjectUsage, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
usages, err := db.GetProjectTotalByPartner(ctx, projectID, nil, since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if usage, ok := usages[""]; ok {
|
|
|
|
return &usage, nil
|
|
|
|
}
|
|
|
|
return &accounting.ProjectUsage{Since: since, Before: before}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetProjectTotalByPartner retrieves project usage for a given period categorized by partner name.
|
|
|
|
// Unpartnered usage or usage for a partner not present in partnerNames is mapped to the empty string.
|
|
|
|
func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, projectID uuid.UUID, partnerNames []string, since, before time.Time) (usages map[string]accounting.ProjectUsage, err error) {
|
2019-11-15 14:27:44 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
since = timeTruncateDown(since)
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
storageQuery := db.db.Rebind(`
|
2023-05-31 11:44:24 +01:00
|
|
|
SELECT * FROM (
|
|
|
|
SELECT
|
|
|
|
COALESCE(t.bucket_name, rollups.bucket_name) AS bucket_name,
|
|
|
|
COALESCE(t.interval_start, rollups.interval_start) AS interval_start,
|
|
|
|
COALESCE(t.total_bytes, 0) AS total_bytes,
|
|
|
|
COALESCE(t.inline, 0) AS inline,
|
|
|
|
COALESCE(t.remote, 0) AS remote,
|
|
|
|
COALESCE(t.total_segments_count, 0) AS total_segments_count,
|
|
|
|
COALESCE(t.object_count, 0) AS object_count,
|
|
|
|
m.user_agent,
|
|
|
|
COALESCE(rollups.egress, 0) AS egress
|
|
|
|
FROM
|
|
|
|
bucket_storage_tallies AS t
|
|
|
|
FULL OUTER JOIN (
|
|
|
|
SELECT
|
|
|
|
bucket_name,
|
|
|
|
SUM(settled + inline) AS egress,
|
|
|
|
MIN(interval_start) AS interval_start
|
|
|
|
FROM
|
|
|
|
bucket_bandwidth_rollups
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND
|
|
|
|
interval_start >= $2 AND
|
|
|
|
interval_start < $3 AND
|
|
|
|
action = $4
|
|
|
|
GROUP BY
|
|
|
|
bucket_name
|
|
|
|
) AS rollups ON
|
|
|
|
t.bucket_name = rollups.bucket_name
|
|
|
|
LEFT JOIN bucket_metainfos AS m ON
|
|
|
|
m.project_id = $1 AND
|
|
|
|
m.name = COALESCE(t.bucket_name, rollups.bucket_name)
|
|
|
|
WHERE
|
|
|
|
(t.project_id IS NULL OR t.project_id = $1) AND
|
|
|
|
COALESCE(t.interval_start, rollups.interval_start) >= $2 AND
|
|
|
|
COALESCE(t.interval_start, rollups.interval_start) < $3
|
|
|
|
) AS q` + db.db.impl.AsOfSystemInterval(-10) + ` ORDER BY bucket_name, interval_start DESC`)
|
2023-02-23 16:27:37 +00:00
|
|
|
|
|
|
|
usages = make(map[string]accounting.ProjectUsage)
|
2019-11-28 18:45:31 +00:00
|
|
|
|
2023-05-31 11:44:24 +01:00
|
|
|
storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], since, before, pb.PieceAction_GET)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var prevTallyForBucket = make(map[string]*accounting.BucketStorageTally)
|
|
|
|
var recentBucket string
|
|
|
|
|
|
|
|
for storageTalliesRows.Next() {
|
|
|
|
tally := accounting.BucketStorageTally{}
|
|
|
|
var userAgent []byte
|
|
|
|
var inline, remote, egress int64
|
|
|
|
err = storageTalliesRows.Scan(&tally.BucketName, &tally.IntervalStart, &tally.TotalBytes, &inline, &remote, &tally.TotalSegmentCount, &tally.ObjectCount, &userAgent, &egress)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, storageTalliesRows.Close())
|
2023-02-23 16:27:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var partner string
|
2023-05-31 11:44:24 +01:00
|
|
|
if userAgent != nil {
|
|
|
|
entries, err := useragent.ParseEntries(userAgent)
|
2023-02-23 16:27:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-03-24 15:26:18 +00:00
|
|
|
if len(entries) != 0 {
|
|
|
|
for _, iterPartner := range partnerNames {
|
|
|
|
if entries[0].Product == iterPartner {
|
|
|
|
partner = iterPartner
|
|
|
|
break
|
|
|
|
}
|
2023-02-23 16:27:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-05-31 11:44:24 +01:00
|
|
|
|
2023-02-23 16:27:37 +00:00
|
|
|
if _, ok := usages[partner]; !ok {
|
|
|
|
usages[partner] = accounting.ProjectUsage{Since: since, Before: before}
|
|
|
|
}
|
|
|
|
usage := usages[partner]
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2023-05-31 11:44:24 +01:00
|
|
|
if tally.TotalBytes == 0 {
|
|
|
|
tally.TotalBytes = inline + remote
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
2023-05-31 11:44:24 +01:00
|
|
|
if tally.BucketName != recentBucket {
|
|
|
|
usage.Egress += egress
|
|
|
|
recentBucket = tally.BucketName
|
2019-11-29 15:53:57 +00:00
|
|
|
}
|
|
|
|
|
2023-05-31 11:44:24 +01:00
|
|
|
if _, ok := prevTallyForBucket[tally.BucketName]; !ok {
|
|
|
|
prevTallyForBucket[tally.BucketName] = &tally
|
|
|
|
usages[partner] = usage
|
|
|
|
continue
|
2023-02-23 05:08:29 +00:00
|
|
|
}
|
2019-11-28 18:45:31 +00:00
|
|
|
|
2023-05-31 11:44:24 +01:00
|
|
|
hours := prevTallyForBucket[tally.BucketName].IntervalStart.Sub(tally.IntervalStart).Hours()
|
|
|
|
usage.Storage += memory.Size(tally.TotalBytes).Float64() * hours
|
|
|
|
usage.SegmentCount += float64(tally.TotalSegmentCount) * hours
|
|
|
|
usage.ObjectCount += float64(tally.ObjectCount) * hours
|
2023-02-23 05:08:29 +00:00
|
|
|
|
2023-02-23 16:27:37 +00:00
|
|
|
usages[partner] = usage
|
2023-05-31 11:44:24 +01:00
|
|
|
|
|
|
|
prevTallyForBucket[tally.BucketName] = &tally
|
|
|
|
}
|
|
|
|
|
|
|
|
err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2023-02-23 16:27:37 +00:00
|
|
|
}
|
2023-02-23 05:08:29 +00:00
|
|
|
|
2023-02-23 16:27:37 +00:00
|
|
|
return usages, nil
|
2019-11-28 18:45:31 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 21:15:55 +01:00
|
|
|
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period.
|
2019-11-15 14:27:44 +00:00
|
|
|
func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []accounting.BucketUsageRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-10 18:53:42 +00:00
|
|
|
since = timeTruncateDown(since.UTC())
|
|
|
|
before = before.UTC()
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2021-01-05 19:39:08 +00:00
|
|
|
buckets, err := db.getBucketsSinceAndBefore(ctx, projectID, since, before)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-02-17 13:48:39 +00:00
|
|
|
var bucketUsageRollups []accounting.BucketUsageRollup
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
bucketRollup, err := db.getSingleBucketRollup(ctx, projectID, bucket, since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketUsageRollups = append(bucketUsageRollups, *bucketRollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucketUsageRollups, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetSingleBucketUsageRollup retrieves usage rollup for a single bucket of particular project for a given period.
|
|
|
|
func (db *ProjectAccounting) GetSingleBucketUsageRollup(ctx context.Context, projectID uuid.UUID, bucket string, since, before time.Time) (_ *accounting.BucketUsageRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
since = timeTruncateDown(since.UTC())
|
|
|
|
before = before.UTC()
|
|
|
|
|
|
|
|
bucketRollup, err := db.getSingleBucketRollup(ctx, projectID, bucket, since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucketRollup, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *ProjectAccounting) getSingleBucketRollup(ctx context.Context, projectID uuid.UUID, bucket string, since, before time.Time) (*accounting.BucketUsageRollup, error) {
|
2019-11-15 14:27:44 +00:00
|
|
|
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
|
|
|
|
FROM bucket_bandwidth_rollups
|
|
|
|
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
|
|
|
|
GROUP BY action`)
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
// TODO: should be optimized
|
2019-11-15 14:27:44 +00:00
|
|
|
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
|
|
|
|
|
2022-02-17 13:48:39 +00:00
|
|
|
bucketRollup := &accounting.BucketUsageRollup{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucket,
|
|
|
|
Since: since,
|
|
|
|
Before: before,
|
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2022-02-17 13:48:39 +00:00
|
|
|
// get bucket_bandwidth_rollup
|
|
|
|
rollupRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rollupRows.Close()) }()
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2022-02-17 13:48:39 +00:00
|
|
|
// fill egress
|
|
|
|
for rollupRows.Next() {
|
|
|
|
var action pb.PieceAction
|
|
|
|
var settled, inline int64
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2022-02-17 13:48:39 +00:00
|
|
|
err = rollupRows.Scan(&settled, &inline, &action)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-02-17 13:48:39 +00:00
|
|
|
|
|
|
|
switch action {
|
|
|
|
case pb.PieceAction_GET:
|
|
|
|
bucketRollup.GetEgress += memory.Size(settled + inline).GB()
|
|
|
|
case pb.PieceAction_GET_AUDIT:
|
|
|
|
bucketRollup.AuditEgress += memory.Size(settled + inline).GB()
|
|
|
|
case pb.PieceAction_GET_REPAIR:
|
|
|
|
bucketRollup.RepairEgress += memory.Size(settled + inline).GB()
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := rollupRows.Err(); err != nil {
|
|
|
|
return nil, err
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
2022-02-17 13:48:39 +00:00
|
|
|
bucketStorageTallies, err := storageQuery(ctx,
|
|
|
|
dbx.BucketStorageTally_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketStorageTally_BucketName([]byte(bucket)),
|
|
|
|
dbx.BucketStorageTally_IntervalStart(since),
|
|
|
|
dbx.BucketStorageTally_IntervalStart(before))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill metadata, objects and stored data
|
|
|
|
// hours calculated from previous tallies,
|
|
|
|
// so we skip the most recent one
|
|
|
|
for i := len(bucketStorageTallies) - 1; i > 0; i-- {
|
|
|
|
current := bucketStorageTallies[i]
|
|
|
|
|
|
|
|
hours := bucketStorageTallies[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
|
|
|
|
|
|
|
|
if current.TotalBytes > 0 {
|
|
|
|
bucketRollup.TotalStoredData += memory.Size(current.TotalBytes).GB() * hours
|
|
|
|
} else {
|
|
|
|
bucketRollup.TotalStoredData += memory.Size(current.Remote+current.Inline).GB() * hours
|
|
|
|
}
|
|
|
|
bucketRollup.MetadataSize += memory.Size(current.MetadataSize).GB() * hours
|
|
|
|
if current.TotalSegmentsCount > 0 {
|
|
|
|
bucketRollup.TotalSegments += float64(current.TotalSegmentsCount) * hours
|
|
|
|
} else {
|
|
|
|
bucketRollup.TotalSegments += float64(current.RemoteSegmentsCount+current.InlineSegmentsCount) * hours
|
|
|
|
}
|
|
|
|
bucketRollup.ObjectCount += float64(current.ObjectCount) * hours
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucketRollup, nil
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
// prefixIncrement returns the lexicographically lowest byte string which is
|
|
|
|
// greater than origPrefix and does not have origPrefix as a prefix. If no such
|
|
|
|
// byte string exists (origPrefix is empty, or origPrefix contains only 0xff
|
|
|
|
// bytes), returns false for ok.
|
|
|
|
//
|
|
|
|
// examples: prefixIncrement([]byte("abc")) -> ([]byte("abd", true)
|
|
|
|
//
|
2022-08-10 16:35:58 +01:00
|
|
|
// prefixIncrement([]byte("ab\xff\xff")) -> ([]byte("ac", true)
|
|
|
|
// prefixIncrement([]byte("")) -> (nil, false)
|
|
|
|
// prefixIncrement([]byte("\x00")) -> ([]byte("\x01", true)
|
|
|
|
// prefixIncrement([]byte("\xff\xff\xff")) -> (nil, false)
|
2019-12-10 16:32:54 +00:00
|
|
|
func prefixIncrement(origPrefix []byte) (incremented []byte, ok bool) {
|
|
|
|
incremented = make([]byte, len(origPrefix))
|
|
|
|
copy(incremented, origPrefix)
|
|
|
|
i := len(incremented) - 1
|
|
|
|
for i >= 0 {
|
|
|
|
if incremented[i] != 0xff {
|
|
|
|
incremented[i]++
|
|
|
|
return incremented[:i+1], true
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
|
|
|
|
// there is no byte string which is greater than origPrefix and which does
|
|
|
|
// not have origPrefix as a prefix.
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// prefixMatch creates a SQL expression which
|
|
|
|
// will evaluate to true if and only if the value of expr starts with the value
|
|
|
|
// of prefix.
|
|
|
|
//
|
|
|
|
// Returns also a slice of arguments that should be passed to the corresponding
|
|
|
|
// db.Query* or db.Exec* to fill in parameters in the returned SQL expression.
|
|
|
|
//
|
|
|
|
// The returned SQL expression needs to be passed through Rebind(), as it uses
|
|
|
|
// `?` markers instead of `$N`, because we don't know what N we would need to
|
|
|
|
// use.
|
|
|
|
func (db *ProjectAccounting) prefixMatch(expr string, prefix []byte) (string, []byte, error) {
|
|
|
|
incrementedPrefix, ok := prefixIncrement(prefix)
|
2021-05-11 09:49:26 +01:00
|
|
|
switch db.db.impl {
|
2019-12-10 16:32:54 +00:00
|
|
|
case dbutil.Postgres:
|
|
|
|
if !ok {
|
|
|
|
return fmt.Sprintf(`(%s >= ?)`, expr), nil, nil
|
|
|
|
}
|
|
|
|
return fmt.Sprintf(`(%s >= ? AND %s < ?)`, expr, expr), incrementedPrefix, nil
|
|
|
|
case dbutil.Cockroach:
|
|
|
|
if !ok {
|
|
|
|
return fmt.Sprintf(`(%s >= ?:::BYTEA)`, expr), nil, nil
|
|
|
|
}
|
|
|
|
return fmt.Sprintf(`(%s >= ?:::BYTEA AND %s < ?:::BYTEA)`, expr, expr), incrementedPrefix, nil
|
|
|
|
default:
|
2021-05-11 09:49:26 +01:00
|
|
|
return "", nil, errs.New("unhandled database: %v", db.db.driver)
|
2019-12-10 16:32:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-06 21:15:55 +01:00
|
|
|
// GetBucketTotals retrieves bucket usage totals for period of time.
|
2022-05-04 13:33:47 +01:00
|
|
|
func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor accounting.BucketUsageCursor, before time.Time) (_ *accounting.BucketUsagePage, err error) {
|
2019-11-15 14:27:44 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketPrefix := []byte(cursor.Search)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
if cursor.Limit > 50 {
|
|
|
|
cursor.Limit = 50
|
|
|
|
}
|
|
|
|
if cursor.Page == 0 {
|
|
|
|
return nil, errs.New("page can not be 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
page := &accounting.BucketUsagePage{
|
|
|
|
Search: cursor.Search,
|
|
|
|
Limit: cursor.Limit,
|
|
|
|
Offset: uint64((cursor.Page - 1) * cursor.Limit),
|
|
|
|
}
|
|
|
|
|
2020-04-24 21:25:16 +01:00
|
|
|
bucketNameRange, incrPrefix, err := db.prefixMatch("name", bucketPrefix)
|
2019-12-10 16:32:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-24 21:25:16 +01:00
|
|
|
countQuery := db.db.Rebind(`SELECT COUNT(name) FROM bucket_metainfos
|
|
|
|
WHERE project_id = ? AND ` + bucketNameRange)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
args := []interface{}{
|
2019-11-15 14:27:44 +00:00
|
|
|
projectID[:],
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketPrefix,
|
|
|
|
}
|
|
|
|
if incrPrefix != nil {
|
|
|
|
args = append(args, incrPrefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
countRow := db.db.QueryRowContext(ctx, countQuery, args...)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
err = countRow.Scan(&page.TotalCount)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-10 16:32:54 +00:00
|
|
|
|
2019-11-15 14:27:44 +00:00
|
|
|
if page.TotalCount == 0 {
|
|
|
|
return page, nil
|
|
|
|
}
|
|
|
|
if page.Offset > page.TotalCount-1 {
|
|
|
|
return nil, errs.New("page is out of range")
|
|
|
|
}
|
|
|
|
|
2022-05-04 13:33:47 +01:00
|
|
|
bucketsQuery := db.db.Rebind(`SELECT name, created_at FROM bucket_metainfos
|
2020-04-24 21:25:16 +01:00
|
|
|
WHERE project_id = ? AND ` + bucketNameRange + `ORDER BY name ASC LIMIT ? OFFSET ?`)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
args = []interface{}{
|
2019-11-15 14:27:44 +00:00
|
|
|
projectID[:],
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketPrefix,
|
|
|
|
}
|
|
|
|
if incrPrefix != nil {
|
|
|
|
args = append(args, incrPrefix)
|
|
|
|
}
|
|
|
|
args = append(args, page.Limit, page.Offset)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketRows, err := db.db.QueryContext(ctx, bucketsQuery, args...)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-10 16:32:54 +00:00
|
|
|
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2022-05-04 13:33:47 +01:00
|
|
|
type bucketWithCreationDate struct {
|
|
|
|
name string
|
|
|
|
createdAt time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
var buckets []bucketWithCreationDate
|
2019-11-15 14:27:44 +00:00
|
|
|
for bucketRows.Next() {
|
|
|
|
var bucket string
|
2022-05-04 13:33:47 +01:00
|
|
|
var createdAt time.Time
|
|
|
|
err = bucketRows.Scan(&bucket, &createdAt)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-04 13:33:47 +01:00
|
|
|
buckets = append(buckets, bucketWithCreationDate{
|
|
|
|
name: bucket,
|
|
|
|
createdAt: createdAt,
|
|
|
|
})
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
if err := bucketRows.Err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
rollupsQuery := db.db.Rebind(`SELECT COALESCE(SUM(settled) + SUM(inline), 0)
|
2019-11-15 14:27:44 +00:00
|
|
|
FROM bucket_bandwidth_rollups
|
2020-01-28 14:51:14 +00:00
|
|
|
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ? AND action = ?`)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2021-10-28 16:50:06 +01:00
|
|
|
storageQuery := db.db.Rebind(`SELECT total_bytes, inline, remote, object_count, total_segments_count
|
2019-11-15 14:27:44 +00:00
|
|
|
FROM bucket_storage_tallies
|
|
|
|
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
|
|
|
|
ORDER BY interval_start DESC
|
|
|
|
LIMIT 1`)
|
|
|
|
|
|
|
|
var bucketUsages []accounting.BucketUsage
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
bucketUsage := accounting.BucketUsage{
|
|
|
|
ProjectID: projectID,
|
2022-05-04 13:33:47 +01:00
|
|
|
BucketName: bucket.name,
|
|
|
|
Since: bucket.createdAt,
|
2019-11-15 14:27:44 +00:00
|
|
|
Before: before,
|
|
|
|
}
|
|
|
|
|
|
|
|
// get bucket_bandwidth_rollups
|
2022-05-04 13:33:47 +01:00
|
|
|
rollupRow := db.db.QueryRowContext(ctx, rollupsQuery, projectID[:], []byte(bucket.name), bucket.createdAt, before, pb.PieceAction_GET)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
var egress int64
|
|
|
|
err = rollupRow.Scan(&egress)
|
|
|
|
if err != nil {
|
2020-07-16 16:50:15 +01:00
|
|
|
if !errors.Is(err, sql.ErrNoRows) {
|
2019-11-15 14:27:44 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
bucketUsage.Egress = memory.Size(egress).GB()
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2022-05-04 13:33:47 +01:00
|
|
|
storageRow := db.db.QueryRowContext(ctx, storageQuery, projectID[:], []byte(bucket.name), bucket.createdAt, before)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2021-06-30 10:58:26 +01:00
|
|
|
var tally accounting.BucketStorageTally
|
2021-07-01 12:29:25 +01:00
|
|
|
var inline, remote int64
|
2021-10-28 16:50:06 +01:00
|
|
|
err = storageRow.Scan(&tally.TotalBytes, &inline, &remote, &tally.ObjectCount, &tally.TotalSegmentCount)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
2020-07-16 16:50:15 +01:00
|
|
|
if !errors.Is(err, sql.ErrNoRows) {
|
2019-11-15 14:27:44 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-01 12:29:25 +01:00
|
|
|
if tally.TotalBytes == 0 {
|
|
|
|
tally.TotalBytes = inline + remote
|
|
|
|
}
|
|
|
|
|
2019-11-15 14:27:44 +00:00
|
|
|
// fill storage and object count
|
2021-06-30 10:58:26 +01:00
|
|
|
bucketUsage.Storage = memory.Size(tally.Bytes()).GB()
|
2021-10-20 23:54:34 +01:00
|
|
|
bucketUsage.SegmentCount = tally.TotalSegmentCount
|
2021-10-28 16:50:06 +01:00
|
|
|
bucketUsage.ObjectCount = tally.ObjectCount
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
bucketUsages = append(bucketUsages, bucketUsage)
|
|
|
|
}
|
|
|
|
|
|
|
|
page.PageCount = uint(page.TotalCount / uint64(cursor.Limit))
|
|
|
|
if page.TotalCount%uint64(cursor.Limit) != 0 {
|
|
|
|
page.PageCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
page.BucketUsages = bucketUsages
|
|
|
|
page.CurrentPage = cursor.Page
|
|
|
|
return page, nil
|
|
|
|
}
|
|
|
|
|
2020-11-30 19:34:42 +00:00
|
|
|
// ArchiveRollupsBefore archives rollups older than a given time.
|
2021-07-02 14:41:49 +01:00
|
|
|
func (db *ProjectAccounting) ArchiveRollupsBefore(ctx context.Context, before time.Time, batchSize int) (archivedCount int, err error) {
|
2020-11-30 19:34:42 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if batchSize <= 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2021-05-11 09:49:26 +01:00
|
|
|
switch db.db.impl {
|
2020-11-30 19:34:42 +00:00
|
|
|
case dbutil.Cockroach:
|
|
|
|
|
2021-07-02 14:41:49 +01:00
|
|
|
// We operate one action at a time, because we have an index on `(action, interval_start, project_id)`.
|
|
|
|
for action := range pb.PieceAction_name {
|
|
|
|
count, err := db.archiveRollupsBeforeByAction(ctx, action, before, batchSize)
|
|
|
|
archivedCount += count
|
|
|
|
if err != nil {
|
|
|
|
return archivedCount, Error.Wrap(err)
|
2020-11-30 19:34:42 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-02 14:41:49 +01:00
|
|
|
return archivedCount, nil
|
2020-11-30 19:34:42 +00:00
|
|
|
case dbutil.Postgres:
|
2021-07-02 14:41:49 +01:00
|
|
|
err := db.db.DB.QueryRow(ctx, `
|
2020-11-30 19:34:42 +00:00
|
|
|
WITH rollups_to_move AS (
|
|
|
|
DELETE FROM bucket_bandwidth_rollups
|
|
|
|
WHERE interval_start <= $1
|
|
|
|
RETURNING *
|
|
|
|
), moved_rollups AS (
|
|
|
|
INSERT INTO bucket_bandwidth_rollup_archives(bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
SELECT bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled FROM rollups_to_move
|
|
|
|
RETURNING *
|
2021-03-01 20:04:00 +00:00
|
|
|
)
|
|
|
|
SELECT count(*) FROM moved_rollups
|
2021-07-02 14:41:49 +01:00
|
|
|
`, before).Scan(&archivedCount)
|
|
|
|
return archivedCount, Error.Wrap(err)
|
|
|
|
default:
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *ProjectAccounting) archiveRollupsBeforeByAction(ctx context.Context, action int32, before time.Time, batchSize int) (archivedCount int, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
for {
|
2020-11-30 19:34:42 +00:00
|
|
|
var rowCount int
|
2021-07-02 14:41:49 +01:00
|
|
|
err := db.db.QueryRow(ctx, `
|
|
|
|
WITH rollups_to_move AS (
|
|
|
|
DELETE FROM bucket_bandwidth_rollups
|
|
|
|
WHERE action = $1 AND interval_start <= $2
|
|
|
|
LIMIT $3 RETURNING *
|
|
|
|
), moved_rollups AS (
|
|
|
|
INSERT INTO bucket_bandwidth_rollup_archives(bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
|
|
|
SELECT bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled FROM rollups_to_move
|
|
|
|
RETURNING *
|
|
|
|
)
|
|
|
|
SELECT count(*) FROM moved_rollups
|
|
|
|
`, int(action), before, batchSize).Scan(&rowCount)
|
2020-11-30 19:34:42 +00:00
|
|
|
if err != nil {
|
2021-07-02 14:41:49 +01:00
|
|
|
return archivedCount, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
archivedCount += rowCount
|
|
|
|
|
|
|
|
if rowCount < batchSize {
|
|
|
|
return archivedCount, nil
|
2020-11-30 19:34:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-05 19:39:08 +00:00
|
|
|
// getBucketsSinceAndBefore lists distinct bucket names for a project within a specific timeframe.
|
2023-01-27 05:34:08 +00:00
|
|
|
func (db *ProjectAccounting) getBucketsSinceAndBefore(ctx context.Context, projectID uuid.UUID, since, before time.Time) (buckets []string, err error) {
|
2019-11-15 14:27:44 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2023-01-27 05:34:08 +00:00
|
|
|
|
|
|
|
queryFormat := `SELECT DISTINCT bucket_name
|
|
|
|
FROM %s
|
2021-01-05 19:39:08 +00:00
|
|
|
WHERE project_id = ?
|
2021-03-01 20:04:00 +00:00
|
|
|
AND interval_start >= ?
|
2023-01-27 05:34:08 +00:00
|
|
|
AND interval_start < ?`
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2023-01-27 05:34:08 +00:00
|
|
|
bucketMap := make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, tableName := range []string{"bucket_storage_tallies", "bucket_bandwidth_rollups"} {
|
|
|
|
query := db.db.Rebind(fmt.Sprintf(queryFormat, tableName))
|
|
|
|
|
|
|
|
rows, err := db.db.QueryContext(ctx, query, projectID[:], since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var bucket string
|
|
|
|
err = rows.Scan(&bucket)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Combine(err, rows.Close())
|
|
|
|
}
|
|
|
|
bucketMap[bucket] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = errs.Combine(rows.Err(), rows.Close())
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-01-27 05:34:08 +00:00
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2023-01-27 05:34:08 +00:00
|
|
|
for bucket := range bucketMap {
|
2019-11-15 14:27:44 +00:00
|
|
|
buckets = append(buckets, bucket)
|
|
|
|
}
|
|
|
|
|
2023-01-27 05:34:08 +00:00
|
|
|
return buckets, nil
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 21:15:55 +01:00
|
|
|
// timeTruncateDown truncates down to the hour before to be in sync with orders endpoint.
|
2019-11-15 14:27:44 +00:00
|
|
|
func timeTruncateDown(t time.Time) time.Time {
|
|
|
|
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
|
|
|
|
}
|
2020-09-09 20:20:44 +01:00
|
|
|
|
|
|
|
// GetProjectLimits returns current project limit for both storage and bandwidth.
|
|
|
|
func (db *ProjectAccounting) GetProjectLimits(ctx context.Context, projectID uuid.UUID) (_ accounting.ProjectLimits, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2023-03-13 16:55:30 +00:00
|
|
|
row, err := db.db.Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_Project_RateLimit_Project_BurstLimit_By_Id(ctx,
|
2020-09-09 20:20:44 +01:00
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return accounting.ProjectLimits{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return accounting.ProjectLimits{
|
2020-10-06 13:50:29 +01:00
|
|
|
Usage: row.UsageLimit,
|
|
|
|
Bandwidth: row.BandwidthLimit,
|
2021-12-03 15:06:20 +00:00
|
|
|
Segments: row.SegmentLimit,
|
2023-03-13 16:55:30 +00:00
|
|
|
|
|
|
|
RateLimit: row.RateLimit,
|
|
|
|
BurstLimit: row.BurstLimit,
|
2020-09-09 20:20:44 +01:00
|
|
|
}, nil
|
|
|
|
}
|
2020-11-30 19:34:42 +00:00
|
|
|
|
|
|
|
// GetRollupsSince retrieves all archived rollup records since a given time.
|
|
|
|
func (db *ProjectAccounting) GetRollupsSince(ctx context.Context, since time.Time) (bwRollups []orders.BucketBandwidthRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
pageLimit := db.db.opts.ReadRollupBatchSize
|
|
|
|
if pageLimit <= 0 {
|
|
|
|
pageLimit = 10000
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursor *dbx.Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
|
|
for {
|
|
|
|
dbxRollups, next, err := db.db.Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx,
|
|
|
|
dbx.BucketBandwidthRollup_IntervalStart(since),
|
|
|
|
pageLimit, cursor)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
cursor = next
|
|
|
|
for _, dbxRollup := range dbxRollups {
|
|
|
|
projectID, err := uuid.FromBytes(dbxRollup.ProjectId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bwRollups = append(bwRollups, orders.BucketBandwidthRollup{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: string(dbxRollup.BucketName),
|
|
|
|
Action: pb.PieceAction(dbxRollup.Action),
|
|
|
|
Inline: int64(dbxRollup.Inline),
|
|
|
|
Allocated: int64(dbxRollup.Allocated),
|
|
|
|
Settled: int64(dbxRollup.Settled),
|
|
|
|
})
|
|
|
|
}
|
2021-03-01 20:04:00 +00:00
|
|
|
if cursor == nil {
|
2020-11-30 19:34:42 +00:00
|
|
|
return bwRollups, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetArchivedRollupsSince retrieves all archived rollup records since a given time.
|
|
|
|
func (db *ProjectAccounting) GetArchivedRollupsSince(ctx context.Context, since time.Time) (bwRollups []orders.BucketBandwidthRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
pageLimit := db.db.opts.ReadRollupBatchSize
|
|
|
|
if pageLimit <= 0 {
|
|
|
|
pageLimit = 10000
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursor *dbx.Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
|
|
for {
|
|
|
|
dbxRollups, next, err := db.db.Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx,
|
|
|
|
dbx.BucketBandwidthRollupArchive_IntervalStart(since),
|
|
|
|
pageLimit, cursor)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
cursor = next
|
|
|
|
for _, dbxRollup := range dbxRollups {
|
|
|
|
projectID, err := uuid.FromBytes(dbxRollup.ProjectId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bwRollups = append(bwRollups, orders.BucketBandwidthRollup{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: string(dbxRollup.BucketName),
|
|
|
|
Action: pb.PieceAction(dbxRollup.Action),
|
|
|
|
Inline: int64(dbxRollup.Inline),
|
|
|
|
Allocated: int64(dbxRollup.Allocated),
|
|
|
|
Settled: int64(dbxRollup.Settled),
|
|
|
|
})
|
|
|
|
}
|
2021-03-01 20:04:00 +00:00
|
|
|
if cursor == nil {
|
2020-11-30 19:34:42 +00:00
|
|
|
return bwRollups, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|