2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-12-14 14:27:21 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
2019-04-02 19:21:18 +01:00
|
|
|
"bytes"
|
2018-12-14 14:27:21 +00:00
|
|
|
"context"
|
2019-03-01 05:34:46 +00:00
|
|
|
"database/sql"
|
2018-12-14 14:27:21 +00:00
|
|
|
"time"
|
|
|
|
|
2019-04-02 19:21:18 +01:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
2019-02-01 18:50:12 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2018-12-14 14:27:21 +00:00
|
|
|
"storj.io/storj/pkg/accounting"
|
2019-04-02 19:21:18 +01:00
|
|
|
"storj.io/storj/pkg/pb"
|
2018-12-18 17:18:42 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2018-12-14 14:27:21 +00:00
|
|
|
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
|
|
|
)
|
|
|
|
|
|
|
|
//database implements DB
|
|
|
|
type accountingDB struct {
|
|
|
|
db *dbx.DB
|
|
|
|
}
|
|
|
|
|
2019-04-08 22:35:54 +01:00
|
|
|
// ProjectAllocatedBandwidthTotal returns the sum of GET bandwidth usage allocated for a projectID for a time frame
|
|
|
|
func (db *accountingDB) ProjectAllocatedBandwidthTotal(ctx context.Context, bucketID []byte, from time.Time) (int64, error) {
|
2019-04-02 19:21:18 +01:00
|
|
|
pathEl := bytes.Split(bucketID, []byte("/"))
|
|
|
|
_, projectID := pathEl[1], pathEl[0]
|
|
|
|
var sum *int64
|
2019-04-08 22:35:54 +01:00
|
|
|
query := `SELECT SUM(allocated) FROM bucket_bandwidth_rollups WHERE project_id = ? AND action = ? AND interval_start > ?;`
|
2019-04-02 19:21:18 +01:00
|
|
|
err := db.db.QueryRow(db.db.Rebind(query), projectID, pb.PieceAction_GET, from).Scan(&sum)
|
|
|
|
if err == sql.ErrNoRows || sum == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return *sum, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProjectStorageTotals returns the current inline and remote storage usage for a projectID
|
|
|
|
func (db *accountingDB) ProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
2019-04-04 17:01:04 +01:00
|
|
|
var inlineSum, remoteSum sql.NullInt64
|
|
|
|
var intervalStart time.Time
|
|
|
|
|
|
|
|
// Sum all the inline and remote values for a project that all share the same interval_start.
|
|
|
|
// All records for a project that have the same interval start are part of the same tally run.
|
|
|
|
// This should represent the most recent calculation of a project's total at rest storage.
|
|
|
|
query := `SELECT interval_start, SUM(inline), SUM(remote)
|
|
|
|
FROM bucket_storage_tallies
|
|
|
|
WHERE project_id = ?
|
|
|
|
GROUP BY interval_start
|
|
|
|
ORDER BY interval_start DESC LIMIT 1;`
|
|
|
|
|
|
|
|
err := db.db.QueryRow(db.db.Rebind(query), projectID[:]).Scan(&intervalStart, &inlineSum, &remoteSum)
|
|
|
|
if err != nil || !inlineSum.Valid || !remoteSum.Valid {
|
|
|
|
return 0, 0, nil
|
2019-04-02 19:21:18 +01:00
|
|
|
}
|
2019-04-04 17:01:04 +01:00
|
|
|
return inlineSum.Int64, remoteSum.Int64, err
|
2019-04-02 19:21:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucketStorageTally creates a record in the bucket_storage_tallies accounting table
|
|
|
|
func (db *accountingDB) CreateBucketStorageTally(ctx context.Context, tally accounting.BucketStorageTally) error {
|
|
|
|
_, err := db.db.Create_BucketStorageTally(
|
|
|
|
ctx,
|
|
|
|
dbx.BucketStorageTally_BucketName([]byte(tally.BucketName)),
|
|
|
|
dbx.BucketStorageTally_ProjectId(tally.ProjectID[:]),
|
|
|
|
dbx.BucketStorageTally_IntervalStart(tally.IntervalStart),
|
|
|
|
dbx.BucketStorageTally_Inline(uint64(tally.InlineBytes)),
|
|
|
|
dbx.BucketStorageTally_Remote(uint64(tally.RemoteBytes)),
|
|
|
|
dbx.BucketStorageTally_RemoteSegmentsCount(uint(tally.RemoteSegmentCount)),
|
|
|
|
dbx.BucketStorageTally_InlineSegmentsCount(uint(tally.InlineSegmentCount)),
|
|
|
|
dbx.BucketStorageTally_ObjectCount(uint(tally.ObjectCount)),
|
|
|
|
dbx.BucketStorageTally_MetadataSize(uint64(tally.MetadataSize)),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-02-01 18:50:12 +00:00
|
|
|
// LastTimestamp records the greatest last tallied time
|
2019-03-12 13:08:23 +00:00
|
|
|
func (db *accountingDB) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) {
|
|
|
|
lastTally := time.Time{}
|
|
|
|
err := db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
|
|
|
lt, err := tx.Find_AccountingTimestamps_Value_By_Name(ctx, dbx.AccountingTimestamps_Name(timestampType))
|
|
|
|
if lt == nil {
|
|
|
|
update := dbx.AccountingTimestamps_Value(lastTally)
|
|
|
|
_, err = tx.Create_AccountingTimestamps(ctx, dbx.AccountingTimestamps_Name(timestampType), update)
|
|
|
|
return err
|
2019-02-01 18:50:12 +00:00
|
|
|
}
|
2019-03-12 13:08:23 +00:00
|
|
|
lastTally = lt.Value
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return lastTally, err
|
2018-12-14 14:27:21 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 17:18:42 +00:00
|
|
|
// SaveAtRestRaw records raw tallies of at rest data to the database
|
2019-02-07 04:16:24 +00:00
|
|
|
func (db *accountingDB) SaveAtRestRaw(ctx context.Context, latestTally time.Time, created time.Time, nodeData map[storj.NodeID]float64) error {
|
2018-12-18 17:18:42 +00:00
|
|
|
if len(nodeData) == 0 {
|
|
|
|
return Error.New("In SaveAtRestRaw with empty nodeData")
|
|
|
|
}
|
2019-03-12 13:08:23 +00:00
|
|
|
err := db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
|
|
|
for k, v := range nodeData {
|
|
|
|
nID := dbx.AccountingRaw_NodeId(k.Bytes())
|
|
|
|
end := dbx.AccountingRaw_IntervalEndTime(latestTally)
|
|
|
|
total := dbx.AccountingRaw_DataTotal(v)
|
|
|
|
dataType := dbx.AccountingRaw_DataType(accounting.AtRest)
|
|
|
|
timestamp := dbx.AccountingRaw_CreatedAt(created)
|
|
|
|
_, err := tx.Create_AccountingRaw(ctx, nID, end, total, dataType, timestamp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-12-18 17:18:42 +00:00
|
|
|
}
|
2019-03-12 13:08:23 +00:00
|
|
|
update := dbx.AccountingTimestamps_Update_Fields{Value: dbx.AccountingTimestamps_Value(latestTally)}
|
|
|
|
_, err := tx.Update_AccountingTimestamps_By_Name(ctx, dbx.AccountingTimestamps_Name(accounting.LastAtRestTally), update)
|
|
|
|
return err
|
|
|
|
})
|
2018-12-18 17:18:42 +00:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2019-01-16 19:30:33 +00:00
|
|
|
|
|
|
|
// GetRaw retrieves all raw tallies
|
|
|
|
func (db *accountingDB) GetRaw(ctx context.Context) ([]*accounting.Raw, error) {
|
|
|
|
raws, err := db.db.All_AccountingRaw(ctx)
|
|
|
|
out := make([]*accounting.Raw, len(raws))
|
|
|
|
for i, r := range raws {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.NodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
out[i] = &accounting.Raw{
|
|
|
|
ID: r.Id,
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalEndTime: r.IntervalEndTime,
|
|
|
|
DataTotal: r.DataTotal,
|
|
|
|
DataType: r.DataType,
|
|
|
|
CreatedAt: r.CreatedAt,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-04-04 16:20:59 +01:00
|
|
|
// GetRawSince retrieves all raw tallies since latestRollup
|
2019-01-16 19:30:33 +00:00
|
|
|
func (db *accountingDB) GetRawSince(ctx context.Context, latestRollup time.Time) ([]*accounting.Raw, error) {
|
|
|
|
raws, err := db.db.All_AccountingRaw_By_IntervalEndTime_GreaterOrEqual(ctx, dbx.AccountingRaw_IntervalEndTime(latestRollup))
|
|
|
|
out := make([]*accounting.Raw, len(raws))
|
|
|
|
for i, r := range raws {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.NodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
out[i] = &accounting.Raw{
|
|
|
|
ID: r.Id,
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalEndTime: r.IntervalEndTime,
|
|
|
|
DataTotal: r.DataTotal,
|
|
|
|
DataType: r.DataType,
|
|
|
|
CreatedAt: r.CreatedAt,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-04-04 16:20:59 +01:00
|
|
|
// GetStoragenodeBandwidthSince retrieves all storagenode_bandwidth_rollup entires since latestRollup
|
|
|
|
func (db *accountingDB) GetStoragenodeBandwidthSince(ctx context.Context, latestRollup time.Time) ([]*accounting.StoragenodeBandwidthRollup, error) {
|
|
|
|
rollups, err := db.db.All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx, dbx.StoragenodeBandwidthRollup_IntervalStart(latestRollup))
|
|
|
|
out := make([]*accounting.StoragenodeBandwidthRollup, len(rollups))
|
|
|
|
for i, r := range rollups {
|
|
|
|
nodeID, err := storj.NodeIDFromBytes(r.StoragenodeId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
out[i] = &accounting.StoragenodeBandwidthRollup{
|
|
|
|
NodeID: nodeID,
|
|
|
|
IntervalStart: r.IntervalStart,
|
|
|
|
Action: r.Action,
|
|
|
|
Settled: r.Settled,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-01-16 19:30:33 +00:00
|
|
|
// SaveRollup records raw tallies of at rest data to the database
|
2019-02-01 18:50:12 +00:00
|
|
|
func (db *accountingDB) SaveRollup(ctx context.Context, latestRollup time.Time, stats accounting.RollupStats) error {
|
2019-01-16 19:30:33 +00:00
|
|
|
if len(stats) == 0 {
|
|
|
|
return Error.New("In SaveRollup with empty nodeData")
|
|
|
|
}
|
2019-03-12 13:08:23 +00:00
|
|
|
err := db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
|
|
|
for _, arsByDate := range stats {
|
|
|
|
for _, ar := range arsByDate {
|
|
|
|
nID := dbx.AccountingRollup_NodeId(ar.NodeID.Bytes())
|
|
|
|
start := dbx.AccountingRollup_StartTime(ar.StartTime)
|
|
|
|
put := dbx.AccountingRollup_PutTotal(ar.PutTotal)
|
|
|
|
get := dbx.AccountingRollup_GetTotal(ar.GetTotal)
|
|
|
|
audit := dbx.AccountingRollup_GetAuditTotal(ar.GetAuditTotal)
|
|
|
|
getRepair := dbx.AccountingRollup_GetRepairTotal(ar.GetRepairTotal)
|
|
|
|
putRepair := dbx.AccountingRollup_PutRepairTotal(ar.PutRepairTotal)
|
|
|
|
atRest := dbx.AccountingRollup_AtRestTotal(ar.AtRestTotal)
|
|
|
|
_, err := tx.Create_AccountingRollup(ctx, nID, start, put, get, audit, getRepair, putRepair, atRest)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
}
|
2019-03-12 13:08:23 +00:00
|
|
|
update := dbx.AccountingTimestamps_Update_Fields{Value: dbx.AccountingTimestamps_Value(latestRollup)}
|
|
|
|
_, err := tx.Update_AccountingTimestamps_By_Name(ctx, dbx.AccountingTimestamps_Name(accounting.LastRollup), update)
|
|
|
|
return err
|
|
|
|
})
|
2019-01-16 19:30:33 +00:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2019-01-17 18:34:13 +00:00
|
|
|
|
2019-04-01 14:42:17 +01:00
|
|
|
// SaveBucketTallies saves the latest bucket info
|
2019-04-09 14:48:35 +01:00
|
|
|
func (db *accountingDB) SaveBucketTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) ([]accounting.BucketTally, error) {
|
2019-04-01 14:42:17 +01:00
|
|
|
if len(bucketTallies) == 0 {
|
2019-04-09 14:48:35 +01:00
|
|
|
return nil, Error.New("In SaveBucketTallies with empty bucketTallies")
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
2019-04-02 19:21:18 +01:00
|
|
|
|
2019-04-09 14:48:35 +01:00
|
|
|
var result []accounting.BucketTally
|
|
|
|
|
2019-04-01 14:42:17 +01:00
|
|
|
for bucketID, info := range bucketTallies {
|
2019-04-02 19:21:18 +01:00
|
|
|
bucketIDComponents := storj.SplitPath(bucketID)
|
2019-04-04 15:56:20 +01:00
|
|
|
bucketName := dbx.BucketStorageTally_BucketName([]byte(bucketIDComponents[1]))
|
|
|
|
projectID := dbx.BucketStorageTally_ProjectId([]byte(bucketIDComponents[0]))
|
2019-04-01 14:42:17 +01:00
|
|
|
interval := dbx.BucketStorageTally_IntervalStart(intervalStart)
|
|
|
|
inlineBytes := dbx.BucketStorageTally_Inline(uint64(info.InlineBytes))
|
|
|
|
remoteBytes := dbx.BucketStorageTally_Remote(uint64(info.RemoteBytes))
|
|
|
|
rSegments := dbx.BucketStorageTally_RemoteSegmentsCount(uint(info.RemoteSegments))
|
|
|
|
iSegments := dbx.BucketStorageTally_InlineSegmentsCount(uint(info.InlineSegments))
|
|
|
|
objectCount := dbx.BucketStorageTally_ObjectCount(uint(info.Files))
|
|
|
|
meta := dbx.BucketStorageTally_MetadataSize(uint64(info.MetadataSize))
|
2019-04-09 14:48:35 +01:00
|
|
|
dbxTally, err := db.db.Create_BucketStorageTally(ctx, bucketName, projectID, interval, inlineBytes, remoteBytes, rSegments, iSegments, objectCount, meta)
|
2019-04-01 14:42:17 +01:00
|
|
|
if err != nil {
|
2019-04-09 14:48:35 +01:00
|
|
|
return nil, err
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
2019-04-09 14:48:35 +01:00
|
|
|
tally := accounting.BucketTally{
|
|
|
|
BucketName: dbxTally.BucketName,
|
|
|
|
ProjectID: dbxTally.ProjectId,
|
|
|
|
InlineSegments: int64(dbxTally.InlineSegmentsCount),
|
|
|
|
RemoteSegments: int64(dbxTally.RemoteSegmentsCount),
|
|
|
|
Files: int64(dbxTally.ObjectCount),
|
|
|
|
InlineBytes: int64(dbxTally.Inline),
|
|
|
|
RemoteBytes: int64(dbxTally.Remote),
|
|
|
|
MetadataSize: int64(dbxTally.MetadataSize),
|
|
|
|
}
|
|
|
|
result = append(result, tally)
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
2019-04-09 14:48:35 +01:00
|
|
|
return result, nil
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// QueryPaymentInfo queries Overlay, Accounting Rollup on nodeID
|
2019-01-17 18:34:13 +00:00
|
|
|
func (db *accountingDB) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) ([]*accounting.CSVRow, error) {
|
2019-03-01 05:34:46 +00:00
|
|
|
var sqlStmt = `SELECT n.id, n.created_at, n.audit_success_ratio, r.at_rest_total, r.get_repair_total,
|
2019-03-29 08:53:43 +00:00
|
|
|
r.put_repair_total, r.get_audit_total, r.put_total, r.get_total, n.wallet
|
2019-02-07 20:26:55 +00:00
|
|
|
FROM (
|
2019-03-01 05:34:46 +00:00
|
|
|
SELECT node_id, SUM(at_rest_total) AS at_rest_total, SUM(get_repair_total) AS get_repair_total,
|
|
|
|
SUM(put_repair_total) AS put_repair_total, SUM(get_audit_total) AS get_audit_total,
|
2019-02-07 20:26:55 +00:00
|
|
|
SUM(put_total) AS put_total, SUM(get_total) AS get_total
|
|
|
|
FROM accounting_rollups
|
|
|
|
WHERE start_time >= ? AND start_time < ?
|
|
|
|
GROUP BY node_id
|
|
|
|
) r
|
|
|
|
LEFT JOIN nodes n ON n.id = r.node_id
|
|
|
|
ORDER BY n.id`
|
2019-03-14 21:12:47 +00:00
|
|
|
rows, err := db.db.DB.QueryContext(ctx, db.db.Rebind(sqlStmt), start.UTC(), end.UTC())
|
2019-01-17 18:34:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
2019-02-07 20:26:55 +00:00
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
csv := make([]*accounting.CSVRow, 0, 0)
|
|
|
|
for rows.Next() {
|
|
|
|
var nodeID []byte
|
|
|
|
r := &accounting.CSVRow{}
|
2019-03-01 05:34:46 +00:00
|
|
|
var wallet sql.NullString
|
2019-02-07 20:26:55 +00:00
|
|
|
err := rows.Scan(&nodeID, &r.NodeCreationDate, &r.AuditSuccessRatio, &r.AtRestTotal, &r.GetRepairTotal,
|
2019-03-01 05:34:46 +00:00
|
|
|
&r.PutRepairTotal, &r.GetAuditTotal, &r.PutTotal, &r.GetTotal, &wallet)
|
2019-01-17 18:34:13 +00:00
|
|
|
if err != nil {
|
2019-02-07 20:26:55 +00:00
|
|
|
return csv, Error.Wrap(err)
|
2019-01-17 18:34:13 +00:00
|
|
|
}
|
2019-03-01 05:34:46 +00:00
|
|
|
if wallet.Valid {
|
|
|
|
r.Wallet = wallet.String
|
|
|
|
}
|
2019-02-07 20:26:55 +00:00
|
|
|
id, err := storj.NodeIDFromBytes(nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return csv, Error.Wrap(err)
|
2019-01-17 18:34:13 +00:00
|
|
|
}
|
2019-02-07 20:26:55 +00:00
|
|
|
r.NodeID = id
|
|
|
|
csv = append(csv, r)
|
2019-01-17 18:34:13 +00:00
|
|
|
}
|
2019-02-07 20:26:55 +00:00
|
|
|
return csv, nil
|
2019-01-17 18:34:13 +00:00
|
|
|
}
|
2019-03-12 13:08:23 +00:00
|
|
|
|
|
|
|
// DeleteRawBefore deletes all raw tallies prior to some time
|
2019-03-14 21:12:47 +00:00
|
|
|
func (db *accountingDB) DeleteRawBefore(ctx context.Context, latestRollup time.Time) error {
|
2019-03-12 13:08:23 +00:00
|
|
|
var deleteRawSQL = `DELETE FROM accounting_raws WHERE interval_end_time < ?`
|
2019-03-14 21:12:47 +00:00
|
|
|
_, err := db.db.DB.ExecContext(ctx, db.db.Rebind(deleteRawSQL), latestRollup)
|
2019-03-12 13:08:23 +00:00
|
|
|
return err
|
|
|
|
}
|