2019-05-10 20:05:42 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
2019-11-28 18:45:31 +00:00
|
|
|
"fmt"
|
2019-05-10 20:05:42 +01:00
|
|
|
"time"
|
|
|
|
|
2020-01-15 20:35:08 +00:00
|
|
|
"github.com/lib/pq"
|
2019-05-10 20:05:42 +01:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
2019-11-15 14:27:44 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-05-10 20:05:42 +01:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
2019-12-16 17:59:01 +00:00
|
|
|
"storj.io/storj/private/dbutil"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/accounting"
|
2020-01-15 02:29:51 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/dbx"
|
2019-05-10 20:05:42 +01:00
|
|
|
)
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
// ensure that ProjectAccounting implements accounting.ProjectAccounting.
|
|
|
|
var _ accounting.ProjectAccounting = (*ProjectAccounting)(nil)
|
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
// ProjectAccounting implements the accounting/db ProjectAccounting interface
|
|
|
|
type ProjectAccounting struct {
|
2019-12-14 02:29:54 +00:00
|
|
|
db *satelliteDB
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// SaveTallies saves the latest bucket info
|
2019-09-12 18:31:50 +01:00
|
|
|
func (db *ProjectAccounting) SaveTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
if len(bucketTallies) == 0 {
|
2019-09-12 18:31:50 +01:00
|
|
|
return nil
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2020-01-15 20:35:08 +00:00
|
|
|
var bucketNames, projectIDs [][]byte
|
|
|
|
var inlineBytes, remoteBytes, metadataSizes []uint64
|
|
|
|
var remoteSegments, inlineSegments, objectCounts []uint
|
|
|
|
for _, info := range bucketTallies {
|
|
|
|
bucketNames = append(bucketNames, info.BucketName)
|
|
|
|
projectIDs = append(projectIDs, info.ProjectID[:])
|
|
|
|
inlineBytes = append(inlineBytes, uint64(info.InlineBytes))
|
|
|
|
remoteBytes = append(remoteBytes, uint64(info.RemoteBytes))
|
|
|
|
remoteSegments = append(remoteSegments, uint(info.RemoteSegments))
|
|
|
|
inlineSegments = append(inlineSegments, uint(info.InlineSegments))
|
|
|
|
objectCounts = append(objectCounts, uint(info.ObjectCount))
|
|
|
|
metadataSizes = append(metadataSizes, uint64(info.MetadataSize))
|
|
|
|
}
|
|
|
|
_, err = db.db.DB.ExecContext(ctx, db.db.Rebind(`
|
|
|
|
INSERT INTO bucket_storage_tallies (
|
|
|
|
interval_start,
|
|
|
|
bucket_name, project_id,
|
|
|
|
inline, remote,
|
|
|
|
remote_segments_count, inline_segments_count,
|
|
|
|
object_count, metadata_size)
|
|
|
|
SELECT
|
|
|
|
$1,
|
|
|
|
unnest($2::bytea[]), unnest($3::bytea[]),
|
|
|
|
unnest($4::int8[]), unnest($5::int8[]),
|
|
|
|
unnest($6::int[]), unnest($7::int[]),
|
|
|
|
unnest($8::int[]), unnest($9::int8[])`),
|
|
|
|
intervalStart,
|
|
|
|
pq.ByteaArray(bucketNames), pq.ByteaArray(projectIDs),
|
|
|
|
pq.Array(inlineBytes), pq.Array(remoteBytes),
|
|
|
|
pq.Array(remoteSegments), pq.Array(inlineSegments),
|
|
|
|
pq.Array(objectCounts), pq.Array(metadataSizes))
|
2019-05-10 20:05:42 +01:00
|
|
|
|
2020-01-15 20:35:08 +00:00
|
|
|
return Error.Wrap(err)
|
2019-09-12 18:31:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetTallies saves the latest bucket info
|
|
|
|
func (db *ProjectAccounting) GetTallies(ctx context.Context) (tallies []accounting.BucketTally, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
dbxTallies, err := db.db.All_BucketStorageTally(ctx)
|
2019-08-13 23:13:56 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
2019-09-12 18:31:50 +01:00
|
|
|
|
|
|
|
for _, dbxTally := range dbxTallies {
|
2019-12-16 17:59:01 +00:00
|
|
|
projectID, err := dbutil.BytesToUUID(dbxTally.ProjectId)
|
2019-09-13 14:51:41 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-09-12 18:31:50 +01:00
|
|
|
tallies = append(tallies, accounting.BucketTally{
|
|
|
|
BucketName: dbxTally.BucketName,
|
2019-09-13 14:51:41 +01:00
|
|
|
ProjectID: projectID,
|
|
|
|
ObjectCount: int64(dbxTally.ObjectCount),
|
2019-09-12 18:31:50 +01:00
|
|
|
InlineSegments: int64(dbxTally.InlineSegmentsCount),
|
|
|
|
RemoteSegments: int64(dbxTally.RemoteSegmentsCount),
|
|
|
|
InlineBytes: int64(dbxTally.Inline),
|
|
|
|
RemoteBytes: int64(dbxTally.Remote),
|
|
|
|
MetadataSize: int64(dbxTally.MetadataSize),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return tallies, nil
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateStorageTally creates a record in the bucket_storage_tallies accounting table
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ProjectAccounting) CreateStorageTally(ctx context.Context, tally accounting.BucketStorageTally) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-09-12 18:31:50 +01:00
|
|
|
|
|
|
|
return Error.Wrap(db.db.CreateNoReturn_BucketStorageTally(
|
2019-05-10 20:05:42 +01:00
|
|
|
ctx,
|
|
|
|
dbx.BucketStorageTally_BucketName([]byte(tally.BucketName)),
|
|
|
|
dbx.BucketStorageTally_ProjectId(tally.ProjectID[:]),
|
|
|
|
dbx.BucketStorageTally_IntervalStart(tally.IntervalStart),
|
|
|
|
dbx.BucketStorageTally_Inline(uint64(tally.InlineBytes)),
|
|
|
|
dbx.BucketStorageTally_Remote(uint64(tally.RemoteBytes)),
|
|
|
|
dbx.BucketStorageTally_RemoteSegmentsCount(uint(tally.RemoteSegmentCount)),
|
|
|
|
dbx.BucketStorageTally_InlineSegmentsCount(uint(tally.InlineSegmentCount)),
|
|
|
|
dbx.BucketStorageTally_ObjectCount(uint(tally.ObjectCount)),
|
|
|
|
dbx.BucketStorageTally_MetadataSize(uint64(tally.MetadataSize)),
|
2019-09-12 18:31:50 +01:00
|
|
|
))
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetAllocatedBandwidthTotal returns the sum of GET bandwidth usage allocated for a projectID for a time frame
|
2019-06-25 16:58:42 +01:00
|
|
|
func (db *ProjectAccounting) GetAllocatedBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
var sum *int64
|
|
|
|
query := `SELECT SUM(allocated) FROM bucket_bandwidth_rollups WHERE project_id = ? AND action = ? AND interval_start > ?;`
|
2020-01-17 20:07:00 +00:00
|
|
|
err = db.db.QueryRow(ctx, db.db.Rebind(query), projectID[:], pb.PieceAction_GET, from).Scan(&sum)
|
2019-05-10 20:05:42 +01:00
|
|
|
if err == sql.ErrNoRows || sum == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return *sum, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetStorageTotals returns the current inline and remote storage usage for a projectID
|
2019-06-04 12:55:38 +01:00
|
|
|
func (db *ProjectAccounting) GetStorageTotals(ctx context.Context, projectID uuid.UUID) (inline int64, remote int64, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-10 20:05:42 +01:00
|
|
|
var inlineSum, remoteSum sql.NullInt64
|
|
|
|
var intervalStart time.Time
|
|
|
|
|
|
|
|
// Sum all the inline and remote values for a project that all share the same interval_start.
|
|
|
|
// All records for a project that have the same interval start are part of the same tally run.
|
|
|
|
// This should represent the most recent calculation of a project's total at rest storage.
|
|
|
|
query := `SELECT interval_start, SUM(inline), SUM(remote)
|
|
|
|
FROM bucket_storage_tallies
|
|
|
|
WHERE project_id = ?
|
|
|
|
GROUP BY interval_start
|
|
|
|
ORDER BY interval_start DESC LIMIT 1;`
|
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
err = db.db.QueryRow(ctx, db.db.Rebind(query), projectID[:]).Scan(&intervalStart, &inlineSum, &remoteSum)
|
2019-05-10 20:05:42 +01:00
|
|
|
if err != nil || !inlineSum.Valid || !remoteSum.Valid {
|
|
|
|
return 0, 0, nil
|
|
|
|
}
|
|
|
|
return inlineSum.Int64, remoteSum.Int64, err
|
|
|
|
}
|
2019-05-28 16:36:52 +01:00
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
// UpdateProjectUsageLimit updates project usage limit.
|
|
|
|
func (db *ProjectAccounting) UpdateProjectUsageLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) (err error) {
|
2019-11-25 14:18:04 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
_, err = db.db.Update_Project_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
|
|
|
dbx.Project_Update_Fields{
|
|
|
|
UsageLimit: dbx.Project_UsageLimit(limit.Int64()),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return err
|
2019-11-25 14:18:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetProjectStorageLimit returns project storage usage limit.
|
|
|
|
func (db *ProjectAccounting) GetProjectStorageLimit(ctx context.Context, projectID uuid.UUID) (_ memory.Size, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-12-10 16:12:49 +00:00
|
|
|
return db.getProjectUsageLimit(ctx, projectID)
|
2019-11-25 14:18:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetProjectBandwidthLimit returns project bandwidth usage limit.
|
|
|
|
func (db *ProjectAccounting) GetProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID) (_ memory.Size, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-12-10 16:12:49 +00:00
|
|
|
return db.getProjectUsageLimit(ctx, projectID)
|
2019-11-25 14:18:04 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
// getProjectUsageLimit returns project usage limit.
|
|
|
|
func (db *ProjectAccounting) getProjectUsageLimit(ctx context.Context, projectID uuid.UUID) (_ memory.Size, err error) {
|
2019-11-25 14:18:04 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
row, err := db.db.Get_Project_UsageLimit_By_Id(ctx,
|
|
|
|
dbx.Project_Id(projectID[:]),
|
2019-11-25 14:18:04 +00:00
|
|
|
)
|
2019-05-28 16:36:52 +01:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2019-11-25 14:18:04 +00:00
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
return memory.Size(row.UsageLimit), nil
|
2019-05-28 16:36:52 +01:00
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:12:49 +00:00
|
|
|
// GetProjectTotal retrieves project usage for a given period.
|
2019-11-15 14:27:44 +00:00
|
|
|
func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (usage *accounting.ProjectUsage, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
since = timeTruncateDown(since)
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
bucketNames, err := db.getBuckets(ctx, projectID, since, before)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
storageQuery := db.db.Rebind(`
|
|
|
|
SELECT
|
2020-01-15 07:25:26 +00:00
|
|
|
bucket_storage_tallies.interval_start,
|
2019-11-28 18:45:31 +00:00
|
|
|
bucket_storage_tallies.inline,
|
|
|
|
bucket_storage_tallies.remote,
|
|
|
|
bucket_storage_tallies.object_count
|
2020-01-15 07:25:26 +00:00
|
|
|
FROM
|
|
|
|
bucket_storage_tallies
|
|
|
|
WHERE
|
|
|
|
bucket_storage_tallies.project_id = ? AND
|
|
|
|
bucket_storage_tallies.bucket_name = ? AND
|
|
|
|
bucket_storage_tallies.interval_start >= ? AND
|
|
|
|
bucket_storage_tallies.interval_start <= ?
|
2019-11-28 18:45:31 +00:00
|
|
|
ORDER BY bucket_storage_tallies.interval_start DESC
|
|
|
|
`)
|
|
|
|
|
|
|
|
bucketsTallies := make(map[string][]*accounting.BucketStorageTally)
|
|
|
|
|
|
|
|
for _, bucket := range bucketNames {
|
|
|
|
storageTallies := make([]*accounting.BucketStorageTally, 0)
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2019-11-29 15:53:57 +00:00
|
|
|
storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
// generating tallies for each bucket name.
|
|
|
|
for storageTalliesRows.Next() {
|
|
|
|
tally := accounting.BucketStorageTally{}
|
|
|
|
|
|
|
|
err = storageTalliesRows.Scan(&tally.IntervalStart, &tally.InlineBytes, &tally.RemoteBytes, &tally.ObjectCount)
|
|
|
|
if err != nil {
|
2020-01-16 14:27:24 +00:00
|
|
|
return nil, errs.Combine(err, storageTalliesRows.Close())
|
2019-11-28 18:45:31 +00:00
|
|
|
}
|
|
|
|
tally.BucketName = bucket
|
2019-11-28 21:42:04 +00:00
|
|
|
storageTallies = append(storageTallies, &tally)
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
2020-01-16 14:27:24 +00:00
|
|
|
err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
|
2019-11-29 15:53:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
bucketsTallies[bucket] = storageTallies
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
totalEgress, err := db.getTotalEgress(ctx, projectID, since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
usage = new(accounting.ProjectUsage)
|
|
|
|
usage.Egress = memory.Size(totalEgress).Int64()
|
|
|
|
|
|
|
|
// sum up storage and objects
|
|
|
|
for _, tallies := range bucketsTallies {
|
2019-11-28 18:45:31 +00:00
|
|
|
for i := len(tallies) - 1; i > 0; i-- {
|
|
|
|
current := (tallies)[i]
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
hours := (tallies)[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
usage.Storage += memory.Size(current.InlineBytes).Float64() * hours
|
|
|
|
usage.Storage += memory.Size(current.RemoteBytes).Float64() * hours
|
2019-11-15 14:27:44 +00:00
|
|
|
usage.ObjectCount += float64(current.ObjectCount) * hours
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
usage.Since = since
|
|
|
|
usage.Before = before
|
|
|
|
return usage, nil
|
|
|
|
}
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
// getTotalEgress returns total egress (settled + inline) of each bucket_bandwidth_rollup
|
|
|
|
// in selected time period, project id.
|
2020-01-28 14:51:14 +00:00
|
|
|
// only process PieceAction_GET.
|
2019-11-28 18:45:31 +00:00
|
|
|
func (db *ProjectAccounting) getTotalEgress(ctx context.Context, projectID uuid.UUID, since, before time.Time) (totalEgress int64, err error) {
|
2020-01-28 14:51:14 +00:00
|
|
|
totalEgressQuery := db.db.Rebind(`
|
2020-01-15 07:25:26 +00:00
|
|
|
SELECT
|
|
|
|
COALESCE(SUM(settled) + SUM(inline), 0)
|
|
|
|
FROM
|
|
|
|
bucket_bandwidth_rollups
|
|
|
|
WHERE
|
|
|
|
project_id = ? AND
|
|
|
|
interval_start >= ? AND
|
|
|
|
interval_start <= ? AND
|
2020-01-28 14:51:14 +00:00
|
|
|
action = ?;
|
|
|
|
`)
|
2019-11-28 18:45:31 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
totalEgressRow := db.db.QueryRowContext(ctx, totalEgressQuery, projectID[:], since, before, pb.PieceAction_GET)
|
2019-11-28 18:45:31 +00:00
|
|
|
|
|
|
|
err = totalEgressRow.Scan(&totalEgress)
|
|
|
|
|
|
|
|
return totalEgress, err
|
|
|
|
}
|
|
|
|
|
2019-11-15 14:27:44 +00:00
|
|
|
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period
|
|
|
|
func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []accounting.BucketUsageRollup, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-10 18:53:42 +00:00
|
|
|
since = timeTruncateDown(since.UTC())
|
|
|
|
before = before.UTC()
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
buckets, err := db.getBuckets(ctx, projectID, since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
|
|
|
|
FROM bucket_bandwidth_rollups
|
|
|
|
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
|
|
|
|
GROUP BY action`)
|
|
|
|
|
2019-11-28 18:45:31 +00:00
|
|
|
// TODO: should be optimized
|
2019-11-15 14:27:44 +00:00
|
|
|
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
|
|
|
|
|
|
|
|
var bucketUsageRollups []accounting.BucketUsageRollup
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
bucketRollup := accounting.BucketUsageRollup{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: []byte(bucket),
|
|
|
|
Since: since,
|
|
|
|
Before: before,
|
|
|
|
}
|
|
|
|
|
|
|
|
// get bucket_bandwidth_rollups
|
|
|
|
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
|
|
|
|
|
|
|
|
// fill egress
|
|
|
|
for rollupsRows.Next() {
|
|
|
|
var action pb.PieceAction
|
|
|
|
var settled, inline int64
|
|
|
|
|
|
|
|
err = rollupsRows.Scan(&settled, &inline, &action)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch action {
|
|
|
|
case pb.PieceAction_GET:
|
|
|
|
bucketRollup.GetEgress += memory.Size(settled + inline).GB()
|
|
|
|
case pb.PieceAction_GET_AUDIT:
|
|
|
|
bucketRollup.AuditEgress += memory.Size(settled + inline).GB()
|
|
|
|
case pb.PieceAction_GET_REPAIR:
|
|
|
|
bucketRollup.RepairEgress += memory.Size(settled + inline).GB()
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
if err := rollupsRows.Err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
bucketStorageTallies, err := storageQuery(ctx,
|
|
|
|
dbx.BucketStorageTally_ProjectId(projectID[:]),
|
|
|
|
dbx.BucketStorageTally_BucketName([]byte(bucket)),
|
|
|
|
dbx.BucketStorageTally_IntervalStart(since),
|
|
|
|
dbx.BucketStorageTally_IntervalStart(before))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill metadata, objects and stored data
|
|
|
|
// hours calculated from previous tallies,
|
|
|
|
// so we skip the most recent one
|
|
|
|
for i := len(bucketStorageTallies) - 1; i > 0; i-- {
|
|
|
|
current := bucketStorageTallies[i]
|
|
|
|
|
|
|
|
hours := bucketStorageTallies[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
|
|
|
|
|
|
|
|
bucketRollup.RemoteStoredData += memory.Size(current.Remote).GB() * hours
|
|
|
|
bucketRollup.InlineStoredData += memory.Size(current.Inline).GB() * hours
|
|
|
|
bucketRollup.MetadataSize += memory.Size(current.MetadataSize).GB() * hours
|
|
|
|
bucketRollup.RemoteSegments += float64(current.RemoteSegmentsCount) * hours
|
|
|
|
bucketRollup.InlineSegments += float64(current.InlineSegmentsCount) * hours
|
|
|
|
bucketRollup.ObjectCount += float64(current.ObjectCount) * hours
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucketUsageRollups, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
// prefixIncrement returns the lexicographically lowest byte string which is
|
|
|
|
// greater than origPrefix and does not have origPrefix as a prefix. If no such
|
|
|
|
// byte string exists (origPrefix is empty, or origPrefix contains only 0xff
|
|
|
|
// bytes), returns false for ok.
|
|
|
|
//
|
|
|
|
// examples: prefixIncrement([]byte("abc")) -> ([]byte("abd", true)
|
|
|
|
// prefixIncrement([]byte("ab\xff\xff")) -> ([]byte("ac", true)
|
|
|
|
// prefixIncrement([]byte("")) -> (nil, false)
|
|
|
|
// prefixIncrement([]byte("\x00")) -> ([]byte("\x01", true)
|
|
|
|
// prefixIncrement([]byte("\xff\xff\xff")) -> (nil, false)
|
|
|
|
//
|
|
|
|
func prefixIncrement(origPrefix []byte) (incremented []byte, ok bool) {
|
|
|
|
incremented = make([]byte, len(origPrefix))
|
|
|
|
copy(incremented, origPrefix)
|
|
|
|
i := len(incremented) - 1
|
|
|
|
for i >= 0 {
|
|
|
|
if incremented[i] != 0xff {
|
|
|
|
incremented[i]++
|
|
|
|
return incremented[:i+1], true
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
|
|
|
|
// there is no byte string which is greater than origPrefix and which does
|
|
|
|
// not have origPrefix as a prefix.
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// prefixMatch creates a SQL expression which
|
|
|
|
// will evaluate to true if and only if the value of expr starts with the value
|
|
|
|
// of prefix.
|
|
|
|
//
|
|
|
|
// Returns also a slice of arguments that should be passed to the corresponding
|
|
|
|
// db.Query* or db.Exec* to fill in parameters in the returned SQL expression.
|
|
|
|
//
|
|
|
|
// The returned SQL expression needs to be passed through Rebind(), as it uses
|
|
|
|
// `?` markers instead of `$N`, because we don't know what N we would need to
|
|
|
|
// use.
|
|
|
|
func (db *ProjectAccounting) prefixMatch(expr string, prefix []byte) (string, []byte, error) {
|
|
|
|
incrementedPrefix, ok := prefixIncrement(prefix)
|
|
|
|
switch db.db.implementation {
|
|
|
|
case dbutil.Postgres:
|
|
|
|
if !ok {
|
|
|
|
return fmt.Sprintf(`(%s >= ?)`, expr), nil, nil
|
|
|
|
}
|
|
|
|
return fmt.Sprintf(`(%s >= ? AND %s < ?)`, expr, expr), incrementedPrefix, nil
|
|
|
|
case dbutil.Cockroach:
|
|
|
|
if !ok {
|
|
|
|
return fmt.Sprintf(`(%s >= ?:::BYTEA)`, expr), nil, nil
|
|
|
|
}
|
|
|
|
return fmt.Sprintf(`(%s >= ?:::BYTEA AND %s < ?:::BYTEA)`, expr, expr), incrementedPrefix, nil
|
|
|
|
default:
|
|
|
|
return "", nil, errs.New("invalid dbType: %v", db.db.driver)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-11-15 14:27:44 +00:00
|
|
|
// GetBucketTotals retrieves bucket usage totals for period of time
|
|
|
|
func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor accounting.BucketUsageCursor, since, before time.Time) (_ *accounting.BucketUsagePage, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
since = timeTruncateDown(since)
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketPrefix := []byte(cursor.Search)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
if cursor.Limit > 50 {
|
|
|
|
cursor.Limit = 50
|
|
|
|
}
|
|
|
|
if cursor.Page == 0 {
|
|
|
|
return nil, errs.New("page can not be 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
page := &accounting.BucketUsagePage{
|
|
|
|
Search: cursor.Search,
|
|
|
|
Limit: cursor.Limit,
|
|
|
|
Offset: uint64((cursor.Page - 1) * cursor.Limit),
|
|
|
|
}
|
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketNameRange, incrPrefix, err := db.prefixMatch("bucket_name", bucketPrefix)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
countQuery := db.db.Rebind(`SELECT COUNT(DISTINCT bucket_name)
|
2019-12-10 16:32:54 +00:00
|
|
|
FROM bucket_bandwidth_rollups
|
|
|
|
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
|
|
|
|
AND ` + bucketNameRange)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
args := []interface{}{
|
2019-11-15 14:27:44 +00:00
|
|
|
projectID[:],
|
2019-12-10 16:32:54 +00:00
|
|
|
since,
|
|
|
|
before,
|
|
|
|
bucketPrefix,
|
|
|
|
}
|
|
|
|
if incrPrefix != nil {
|
|
|
|
args = append(args, incrPrefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
countRow := db.db.QueryRowContext(ctx, countQuery, args...)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
err = countRow.Scan(&page.TotalCount)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-10 16:32:54 +00:00
|
|
|
|
2019-11-15 14:27:44 +00:00
|
|
|
if page.TotalCount == 0 {
|
|
|
|
return page, nil
|
|
|
|
}
|
|
|
|
if page.Offset > page.TotalCount-1 {
|
|
|
|
return nil, errs.New("page is out of range")
|
|
|
|
}
|
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
var buckets []string
|
2019-11-15 14:27:44 +00:00
|
|
|
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
|
2019-12-10 16:32:54 +00:00
|
|
|
FROM bucket_bandwidth_rollups
|
|
|
|
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
|
|
|
|
AND ` + bucketNameRange + ` ORDER BY bucket_name ASC
|
|
|
|
LIMIT ? OFFSET ?`)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
args = []interface{}{
|
2019-11-15 14:27:44 +00:00
|
|
|
projectID[:],
|
2019-12-10 16:32:54 +00:00
|
|
|
since,
|
|
|
|
before,
|
|
|
|
bucketPrefix,
|
|
|
|
}
|
|
|
|
if incrPrefix != nil {
|
|
|
|
args = append(args, incrPrefix)
|
|
|
|
}
|
|
|
|
args = append(args, page.Limit, page.Offset)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2019-12-10 16:32:54 +00:00
|
|
|
bucketRows, err := db.db.QueryContext(ctx, bucketsQuery, args...)
|
2019-11-15 14:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-10 16:32:54 +00:00
|
|
|
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2019-11-15 14:27:44 +00:00
|
|
|
for bucketRows.Next() {
|
|
|
|
var bucket string
|
|
|
|
err = bucketRows.Scan(&bucket)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
buckets = append(buckets, bucket)
|
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
if err := bucketRows.Err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
rollupsQuery := db.db.Rebind(`SELECT COALESCE(SUM(settled) + SUM(inline), 0)
|
2019-11-15 14:27:44 +00:00
|
|
|
FROM bucket_bandwidth_rollups
|
2020-01-28 14:51:14 +00:00
|
|
|
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ? AND action = ?`)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
storageQuery := db.db.Rebind(`SELECT inline, remote, object_count
|
|
|
|
FROM bucket_storage_tallies
|
|
|
|
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
|
|
|
|
ORDER BY interval_start DESC
|
|
|
|
LIMIT 1`)
|
|
|
|
|
|
|
|
var bucketUsages []accounting.BucketUsage
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
bucketUsage := accounting.BucketUsage{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucket,
|
|
|
|
Since: since,
|
|
|
|
Before: before,
|
|
|
|
}
|
|
|
|
|
|
|
|
// get bucket_bandwidth_rollups
|
2020-01-28 14:51:14 +00:00
|
|
|
rollupRow := db.db.QueryRowContext(ctx, rollupsQuery, projectID[:], []byte(bucket), since, before, pb.PieceAction_GET)
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
var egress int64
|
|
|
|
err = rollupRow.Scan(&egress)
|
|
|
|
if err != nil {
|
|
|
|
if err != sql.ErrNoRows {
|
2019-11-15 14:27:44 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-16 14:27:24 +00:00
|
|
|
}
|
2019-11-15 14:27:44 +00:00
|
|
|
|
2020-01-28 14:51:14 +00:00
|
|
|
bucketUsage.Egress = memory.Size(egress).GB()
|
2019-11-15 14:27:44 +00:00
|
|
|
|
|
|
|
storageRow := db.db.QueryRowContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
|
|
|
|
|
|
|
|
var inline, remote, objectCount int64
|
|
|
|
err = storageRow.Scan(&inline, &remote, &objectCount)
|
|
|
|
if err != nil {
|
|
|
|
if err != sql.ErrNoRows {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill storage and object count
|
|
|
|
bucketUsage.Storage = memory.Size(inline + remote).GB()
|
|
|
|
bucketUsage.ObjectCount = objectCount
|
|
|
|
|
|
|
|
bucketUsages = append(bucketUsages, bucketUsage)
|
|
|
|
}
|
|
|
|
|
|
|
|
page.PageCount = uint(page.TotalCount / uint64(cursor.Limit))
|
|
|
|
if page.TotalCount%uint64(cursor.Limit) != 0 {
|
|
|
|
page.PageCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
page.BucketUsages = bucketUsages
|
|
|
|
page.CurrentPage = cursor.Page
|
|
|
|
return page, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getBuckets list all bucket of certain project for given period
|
|
|
|
func (db *ProjectAccounting) getBuckets(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []string, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
|
|
|
|
FROM bucket_bandwidth_rollups
|
|
|
|
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?`)
|
|
|
|
|
|
|
|
bucketRows, err := db.db.QueryContext(ctx, bucketsQuery, projectID[:], since, before)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
|
|
|
|
|
|
|
|
var buckets []string
|
|
|
|
for bucketRows.Next() {
|
|
|
|
var bucket string
|
|
|
|
err = bucketRows.Scan(&bucket)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
buckets = append(buckets, bucket)
|
|
|
|
}
|
|
|
|
|
2020-01-16 14:27:24 +00:00
|
|
|
return buckets, bucketRows.Err()
|
2019-11-15 14:27:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// timeTruncateDown truncates down to the hour before to be in sync with orders endpoint
|
|
|
|
func timeTruncateDown(t time.Time) time.Time {
|
|
|
|
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
|
|
|
|
}
|