project usage limiting (#1561)

* reorg uplink cmd files for consistency

* init implementation of usage limiting

* Revert "reorg uplink cmd files for consistency"

This reverts commit 91ced7639bf36fc8af1db237b01e233ca92f1890.

* add changes per CR comments

* fix custom query to use rebind

* updates per convo about what to limit on

* changes per comments

* fix syntax and comments

* add integration test, add db methods for test

* update migration, add rebind to query

* update testdata for psql

* remove unneeded drop index statement

* fix migrations, fix calculate usage limit

* fix comment

* add audit test back

* change methods to use bucketName/projectID, fix tests

* add changes per CR comments

* add test for uplink upload and err ssg

* changes per CR comments

* check get/put limit separately
This commit is contained in:
Jess G 2019-04-02 11:21:18 -07:00 committed by GitHub
parent 2aeab10d50
commit d51bdf14df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 893 additions and 183 deletions

View File

@ -447,7 +447,8 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
Interval: 30 * time.Second, Interval: 30 * time.Second,
}, },
Rollup: rollup.Config{ Rollup: rollup.Config{
Interval: 120 * time.Second, Interval: 2 * time.Minute,
MaxAlphaUsage: 25 * memory.GB,
}, },
Mail: mailservice.Config{ Mail: mailservice.Config{
SMTPServerAddress: "smtp.mail.example.com:587", SMTPServerAddress: "smtp.mail.example.com:587",

View File

@ -58,3 +58,33 @@ type BucketRollup struct {
GetEgress uint64 GetEgress uint64
AuditEgress uint64 AuditEgress uint64
} }
// BucketBandwidthRollup contains data about bandwidth rollup
type BucketBandwidthRollup struct {
BucketName string
ProjectID uuid.UUID
IntervalStart time.Time
IntervalSeconds uint
Action uint
Inline uint64
Allocated uint64
Settled uint64
}
// BucketStorageTally holds data about a bucket tally
type BucketStorageTally struct {
BucketName string
ProjectID uuid.UUID
IntervalStart time.Time
InlineSegmentCount int64
RemoteSegmentCount int64
ObjectCount int64
InlineBytes int64
RemoteBytes int64
MetadataSize int64
}

View File

@ -7,6 +7,8 @@ import (
"context" "context"
"time" "time"
"github.com/skyrings/skyring-common/tools/uuid"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
) )
@ -36,7 +38,7 @@ type Rollup struct {
AtRestTotal float64 AtRestTotal float64
} }
// DB stores information about bandwidth usage // DB stores information about bandwidth and storage usage
type DB interface { type DB interface {
// LastTimestamp records the latest last tallied time. // LastTimestamp records the latest last tallied time.
LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error)
@ -56,4 +58,10 @@ type DB interface {
QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) ([]*CSVRow, error) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) ([]*CSVRow, error)
// DeleteRawBefore deletes all raw tallies prior to some time // DeleteRawBefore deletes all raw tallies prior to some time
DeleteRawBefore(ctx context.Context, latestRollup time.Time) error DeleteRawBefore(ctx context.Context, latestRollup time.Time) error
// CreateBucketStorageTally creates a record for BucketStorageTally in the accounting DB table
CreateBucketStorageTally(ctx context.Context, tally BucketStorageTally) error
// ProjectBandwidthTotal returns the sum of GET bandwidth usage for a projectID in the past time frame
ProjectBandwidthTotal(ctx context.Context, bucketID []byte, from time.Time) (int64, error)
// ProjectStorageTotals returns the current inline and remote storage usage for a projectID
ProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error)
} }

View File

@ -0,0 +1,29 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package accounting
import (
"storj.io/storj/internal/memory"
)
const (
// AverageDaysInMonth is how many days in a month
AverageDaysInMonth = 30
)
// ExceedsAlphaUsage returns true if more than 25GB of storage is currently in use
// or if 25GB of bandwidth or has been used in the past month (30 days)
// TODO(jg): remove this code once we no longer need usage limiting for alpha release
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
func ExceedsAlphaUsage(bandwidthGetTotal, inlineTotal, remoteTotal int64, maxAlphaUsageGB memory.Size) (bool, string) {
if bandwidthGetTotal >= maxAlphaUsageGB.Int64() {
return true, "bandwidth"
}
if inlineTotal+remoteTotal >= maxAlphaUsageGB.Int64() {
return true, "storage"
}
return false, ""
}

View File

@ -0,0 +1,108 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package accounting_test
import (
"crypto/rand"
"testing"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/storj/internal/memory"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
func TestProjectUsage(t *testing.T) {
cases := []struct {
name string
expectedExceeded bool
expectedResource string
expectedErrMsg string
}{
{name: "doesn't exceed storage or bandwidth project limit", expectedExceeded: false, expectedErrMsg: ""},
{name: "exceeds storage project limit", expectedExceeded: true, expectedResource: "storage", expectedErrMsg: "segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit; segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit"},
{name: "exceeds bandwidth project limit", expectedExceeded: true, expectedResource: "bandwidth", expectedErrMsg: "segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit; segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit"},
}
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
saDB := planet.Satellites[0].DB
orderDB := saDB.Orders()
acctDB := saDB.Accounting()
// Setup: This date represents the past 30 days so that we can check
// if the alpha max usage has been exceeded in the past month
from := time.Now().AddDate(0, 0, -accounting.AverageDaysInMonth)
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
// Setup: create a new project to use the projectID
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
projectID := projects[0].ID
require.NoError(t, err)
bucketName := "testbucket"
bucketID := createBucketID(projectID, []byte(bucketName))
// Setup: create a BucketStorageTally record to test exceeding storage project limit
if tt.expectedResource == "storage" {
tally := accounting.BucketStorageTally{
BucketName: bucketName,
ProjectID: projectID,
IntervalStart: time.Now(),
RemoteBytes: 26 * memory.GB.Int64(),
}
err := acctDB.CreateBucketStorageTally(ctx, tally)
require.NoError(t, err)
}
// Setup: create a BucketBandwidthRollup record to test exceeding bandwidth project limit
if tt.expectedResource == "bandwidth" {
amount := 26 * memory.GB.Int64()
action := pb.PieceAction_GET
err := orderDB.UpdateBucketBandwidthSettle(ctx, bucketID, action, amount)
require.NoError(t, err)
}
// Execute test: get storage and bandwidth totals for a project, then check if that exceeds the max usage limit
inlineTotal, remoteTotal, err := acctDB.ProjectStorageTotals(ctx, projectID)
require.NoError(t, err)
bandwidthTotal, err := acctDB.ProjectBandwidthTotal(ctx, bucketID, from)
require.NoError(t, err)
maxAlphaUsage := 25 * memory.GB
actualExceeded, actualResource := accounting.ExceedsAlphaUsage(bandwidthTotal, inlineTotal, remoteTotal, maxAlphaUsage)
require.Equal(t, tt.expectedExceeded, actualExceeded)
require.Equal(t, tt.expectedResource, actualResource)
// Execute test: does the uplink get an error message when the usage limits are exceeded
expectedData := make([]byte, 50*memory.KiB)
_, err = rand.Read(expectedData)
require.NoError(t, err)
actualErr := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "test/path", expectedData)
if tt.expectedResource == "bandwidth" || tt.expectedResource == "storage" {
assert.EqualError(t, actualErr, tt.expectedErrMsg)
} else {
require.NoError(t, actualErr)
}
})
}
})
}
func createBucketID(projectID uuid.UUID, bucket []byte) []byte {
entries := make([]string, 0)
entries = append(entries, projectID.String())
entries = append(entries, string(bucket))
return []byte(storj.JoinPaths(entries...))
}

View File

@ -10,13 +10,15 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/accounting" "storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
) )
// Config contains configurable values for rollup // Config contains configurable values for rollup
type Config struct { type Config struct {
Interval time.Duration `help:"how frequently rollup should run" devDefault:"120s" default:"6h"` Interval time.Duration `help:"how frequently rollup should run" devDefault:"120s" default:"6h"`
MaxAlphaUsage memory.Size `help:"the bandwidth and storage usage limit for the alpha release" default:"25GB"`
} }
// Rollup is the service for totalling data on storage nodes on daily intervals // Rollup is the service for totalling data on storage nodes on daily intervals

View File

@ -8,6 +8,7 @@ import (
"context" "context"
"errors" "errors"
"strconv" "strconv"
"time"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs" "github.com/zeebo/errs"
@ -16,6 +17,8 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2" monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/auth" "storj.io/storj/pkg/auth"
"storj.io/storj/pkg/eestream" "storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/identity" "storj.io/storj/pkg/identity"
@ -41,22 +44,26 @@ type APIKeys interface {
// Endpoint metainfo endpoint // Endpoint metainfo endpoint
type Endpoint struct { type Endpoint struct {
log *zap.Logger log *zap.Logger
pointerdb *pointerdb.Service pointerdb *pointerdb.Service
orders *orders.Service orders *orders.Service
cache *overlay.Cache cache *overlay.Cache
apiKeys APIKeys apiKeys APIKeys
accountingDB accounting.DB
maxAlphaUsage memory.Size
} }
// NewEndpoint creates new metainfo endpoint instance // NewEndpoint creates new metainfo endpoint instance
func NewEndpoint(log *zap.Logger, pointerdb *pointerdb.Service, orders *orders.Service, cache *overlay.Cache, apiKeys APIKeys) *Endpoint { func NewEndpoint(log *zap.Logger, pointerdb *pointerdb.Service, orders *orders.Service, cache *overlay.Cache, apiKeys APIKeys, acctDB accounting.DB, maxAlphaUsage memory.Size) *Endpoint {
// TODO do something with too many params // TODO do something with too many params
return &Endpoint{ return &Endpoint{
log: log, log: log,
pointerdb: pointerdb, pointerdb: pointerdb,
orders: orders, orders: orders,
cache: cache, cache: cache,
apiKeys: apiKeys, apiKeys: apiKeys,
accountingDB: acctDB,
maxAlphaUsage: maxAlphaUsage,
} }
} }
@ -130,6 +137,23 @@ func (endpoint *Endpoint) CreateSegment(ctx context.Context, req *pb.SegmentWrit
return nil, status.Errorf(codes.InvalidArgument, err.Error()) return nil, status.Errorf(codes.InvalidArgument, err.Error())
} }
// Check if this projectID has exceeded alpha usage limits, i.e. 25GB of bandwidth or storage used in the past month
// TODO: remove this code once we no longer need usage limiting for alpha release
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
bucketID := createBucketID(keyInfo.ProjectID, req.Bucket)
inlineTotal, remoteTotal, err := endpoint.accountingDB.ProjectStorageTotals(ctx, keyInfo.ProjectID)
if err != nil {
endpoint.log.Error("retrieving ProjectStorageTotals", zap.Error(err))
}
exceeded, resource := accounting.ExceedsAlphaUsage(0, inlineTotal, remoteTotal, endpoint.maxAlphaUsage)
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for %s for projectID %s.",
endpoint.maxAlphaUsage.String(),
resource, keyInfo.ProjectID,
)
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Alpha Usage Limit")
}
redundancy, err := eestream.NewRedundancyStrategyFromProto(req.GetRedundancy()) redundancy, err := eestream.NewRedundancyStrategyFromProto(req.GetRedundancy())
if err != nil { if err != nil {
return nil, err return nil, err
@ -152,7 +176,6 @@ func (endpoint *Endpoint) CreateSegment(ctx context.Context, req *pb.SegmentWrit
return nil, status.Errorf(codes.Internal, err.Error()) return nil, status.Errorf(codes.Internal, err.Error())
} }
bucketID := createBucketID(keyInfo.ProjectID, req.Bucket)
rootPieceID, addressedLimits, err := endpoint.orders.CreatePutOrderLimits(ctx, uplinkIdentity, bucketID, nodes, req.Expiration, maxPieceSize) rootPieceID, addressedLimits, err := endpoint.orders.CreatePutOrderLimits(ctx, uplinkIdentity, bucketID, nodes, req.Expiration, maxPieceSize)
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
@ -217,6 +240,24 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
return nil, status.Errorf(codes.InvalidArgument, err.Error()) return nil, status.Errorf(codes.InvalidArgument, err.Error())
} }
// Check if this projectID has exceeded alpha usage limits, i.e. 25GB of bandwidth or storage used in the past month
// TODO: remove this code once we no longer need usage limiting for alpha release
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
bucketID := createBucketID(keyInfo.ProjectID, req.Bucket)
from := time.Now().AddDate(0, 0, -accounting.AverageDaysInMonth) // past 30 days
bandwidthTotal, err := endpoint.accountingDB.ProjectBandwidthTotal(ctx, bucketID, from)
if err != nil {
endpoint.log.Error("retrieving ProjectBandwidthTotal", zap.Error(err))
}
exceeded, resource := accounting.ExceedsAlphaUsage(bandwidthTotal, 0, 0, endpoint.maxAlphaUsage)
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for %s for projectID %s.",
endpoint.maxAlphaUsage.String(),
resource, keyInfo.ProjectID,
)
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Alpha Usage Limit")
}
path, err := CreatePath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path) path, err := CreatePath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path)
if err != nil { if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error()) return nil, status.Errorf(codes.InvalidArgument, err.Error())

View File

@ -44,7 +44,6 @@ type DB interface {
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node // UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node
UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64) error UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64) error
// TODO move/reorganize/delete those methods (most probably accounting)
// GetBucketBandwidth gets total bucket bandwidth from period of time // GetBucketBandwidth gets total bucket bandwidth from period of time
GetBucketBandwidth(ctx context.Context, bucketID []byte, from, to time.Time) (int64, error) GetBucketBandwidth(ctx context.Context, bucketID []byte, from, to time.Time) (int64, error)
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time // GetStorageNodeBandwidth gets total storage node bandwidth from period of time

View File

@ -110,7 +110,7 @@ func TestUploadDownloadBandwidth(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
hourBeforeTest := time.Now().UTC().Add(-time.Hour) hourBeforeTest := time.Now().Add(-time.Hour)
for _, storageNode := range planet.StorageNodes { for _, storageNode := range planet.StorageNodes {
storageNode.Storage2.Sender.Loop.Pause() storageNode.Storage2.Sender.Loop.Pause()

View File

@ -328,6 +328,8 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config) (*
peer.Orders.Service, peer.Orders.Service,
peer.Overlay.Service, peer.Overlay.Service,
peer.DB.Console().APIKeys(), peer.DB.Console().APIKeys(),
peer.DB.Accounting(),
config.Rollup.MaxAlphaUsage,
) )
pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2) pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)

View File

@ -4,13 +4,16 @@
package satellitedb package satellitedb
import ( import (
"bytes"
"context" "context"
"database/sql" "database/sql"
"time" "time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/storj/pkg/accounting" "storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
dbx "storj.io/storj/satellite/satellitedb/dbx" dbx "storj.io/storj/satellite/satellitedb/dbx"
) )
@ -20,6 +23,52 @@ type accountingDB struct {
db *dbx.DB db *dbx.DB
} }
// ProjectBandwidthTotal returns the sum of GET bandwidth usage for a projectID for a time frame
func (db *accountingDB) ProjectBandwidthTotal(ctx context.Context, bucketID []byte, from time.Time) (int64, error) {
pathEl := bytes.Split(bucketID, []byte("/"))
_, projectID := pathEl[1], pathEl[0]
var sum *int64
query := `SELECT SUM(settled) FROM bucket_bandwidth_rollups WHERE project_id = ? AND action = ? AND interval_start > ?;`
err := db.db.QueryRow(db.db.Rebind(query), projectID, pb.PieceAction_GET, from).Scan(&sum)
if err == sql.ErrNoRows || sum == nil {
return 0, nil
}
return *sum, err
}
// ProjectStorageTotals returns the current inline and remote storage usage for a projectID
func (db *accountingDB) ProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
rollup, err := db.db.First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(
ctx,
dbx.BucketStorageTally_ProjectId(projectID[:]),
)
if err != nil || rollup == nil {
return 0, 0, err
}
return int64(rollup.Inline), int64(rollup.Remote), err
}
// CreateBucketStorageTally creates a record in the bucket_storage_tallies accounting table
func (db *accountingDB) CreateBucketStorageTally(ctx context.Context, tally accounting.BucketStorageTally) error {
_, err := db.db.Create_BucketStorageTally(
ctx,
dbx.BucketStorageTally_BucketName([]byte(tally.BucketName)),
dbx.BucketStorageTally_ProjectId(tally.ProjectID[:]),
dbx.BucketStorageTally_IntervalStart(tally.IntervalStart),
dbx.BucketStorageTally_Inline(uint64(tally.InlineBytes)),
dbx.BucketStorageTally_Remote(uint64(tally.RemoteBytes)),
dbx.BucketStorageTally_RemoteSegmentsCount(uint(tally.RemoteSegmentCount)),
dbx.BucketStorageTally_InlineSegmentsCount(uint(tally.InlineSegmentCount)),
dbx.BucketStorageTally_ObjectCount(uint(tally.ObjectCount)),
dbx.BucketStorageTally_MetadataSize(uint64(tally.MetadataSize)),
)
if err != nil {
return err
}
return nil
}
// LastTimestamp records the greatest last tallied time // LastTimestamp records the greatest last tallied time
func (db *accountingDB) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) { func (db *accountingDB) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) {
lastTally := time.Time{} lastTally := time.Time{}
@ -167,8 +216,11 @@ func (db *accountingDB) SaveBucketTallies(ctx context.Context, intervalStart tim
if len(bucketTallies) == 0 { if len(bucketTallies) == 0 {
return Error.New("In SaveBucketTallies with empty bucketTallies") return Error.New("In SaveBucketTallies with empty bucketTallies")
} }
for bucketID, info := range bucketTallies { for bucketID, info := range bucketTallies {
bID := dbx.BucketStorageTally_BucketId([]byte(bucketID)) bucketIDComponents := storj.SplitPath(bucketID)
bucketName := dbx.BucketStorageTally_BucketName([]byte(bucketIDComponents[0]))
projectID := dbx.BucketStorageTally_ProjectId([]byte(bucketIDComponents[1]))
interval := dbx.BucketStorageTally_IntervalStart(intervalStart) interval := dbx.BucketStorageTally_IntervalStart(intervalStart)
inlineBytes := dbx.BucketStorageTally_Inline(uint64(info.InlineBytes)) inlineBytes := dbx.BucketStorageTally_Inline(uint64(info.InlineBytes))
remoteBytes := dbx.BucketStorageTally_Remote(uint64(info.RemoteBytes)) remoteBytes := dbx.BucketStorageTally_Remote(uint64(info.RemoteBytes))
@ -176,7 +228,7 @@ func (db *accountingDB) SaveBucketTallies(ctx context.Context, intervalStart tim
iSegments := dbx.BucketStorageTally_InlineSegmentsCount(uint(info.InlineSegments)) iSegments := dbx.BucketStorageTally_InlineSegmentsCount(uint(info.InlineSegments))
objectCount := dbx.BucketStorageTally_ObjectCount(uint(info.Files)) objectCount := dbx.BucketStorageTally_ObjectCount(uint(info.Files))
meta := dbx.BucketStorageTally_MetadataSize(uint64(info.MetadataSize)) meta := dbx.BucketStorageTally_MetadataSize(uint64(info.MetadataSize))
_, err := db.db.Create_BucketStorageTally(ctx, bID, interval, inlineBytes, remoteBytes, rSegments, iSegments, objectCount, meta) _, err := db.db.Create_BucketStorageTally(ctx, bucketName, projectID, interval, inlineBytes, remoteBytes, rSegments, iSegments, objectCount, meta)
if err != nil { if err != nil {
return err return err
} }

View File

@ -392,13 +392,15 @@ create used_serial ()
// --- bucket accounting tables --- // // --- bucket accounting tables --- //
model bucket_bandwidth_rollup ( model bucket_bandwidth_rollup (
key bucket_id interval_start action key bucket_name project_id interval_start action
index ( index (
name bucket_id_interval_start_interval_seconds name bucket_name_project_id_interval_start_interval_seconds
fields bucket_id interval_start interval_seconds fields bucket_name project_id interval_start interval_seconds
) )
field bucket_id blob field bucket_name blob
field project_id blob
field interval_start utimestamp field interval_start utimestamp
field interval_seconds uint field interval_seconds uint
field action uint field action uint
@ -410,15 +412,18 @@ model bucket_bandwidth_rollup (
read scalar ( read scalar (
select bucket_bandwidth_rollup select bucket_bandwidth_rollup
where bucket_bandwidth_rollup.bucket_id = ? where bucket_bandwidth_rollup.bucket_name = ?
where bucket_bandwidth_rollup.project_id = ?
where bucket_bandwidth_rollup.interval_start = ? where bucket_bandwidth_rollup.interval_start = ?
where bucket_bandwidth_rollup.action = ? where bucket_bandwidth_rollup.action = ?
) )
model bucket_storage_tally ( model bucket_storage_tally (
key bucket_id interval_start key bucket_name project_id interval_start
field bucket_name blob
field project_id blob
field bucket_id blob
field interval_start utimestamp field interval_start utimestamp
field inline uint64 field inline uint64
@ -431,7 +436,13 @@ model bucket_storage_tally (
field metadata_size uint64 field metadata_size uint64
) )
create bucket_storage_tally ( ) create bucket_storage_tally ()
read first (
select bucket_storage_tally
where bucket_storage_tally.project_id = ?
orderby desc bucket_storage_tally.interval_start
)
// --- storage node accounting tables --- // // --- storage node accounting tables --- //

View File

@ -10,7 +10,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -300,17 +299,19 @@ CREATE TABLE accounting_timestamps (
PRIMARY KEY ( name ) PRIMARY KEY ( name )
); );
CREATE TABLE bucket_bandwidth_rollups ( CREATE TABLE bucket_bandwidth_rollups (
bucket_id bytea NOT NULL, bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL, interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL, interval_seconds integer NOT NULL,
action integer NOT NULL, action integer NOT NULL,
inline bigint NOT NULL, inline bigint NOT NULL,
allocated bigint NOT NULL, allocated bigint NOT NULL,
settled bigint NOT NULL, settled bigint NOT NULL,
PRIMARY KEY ( bucket_id, interval_start, action ) PRIMARY KEY ( bucket_name, project_id, interval_start, action )
); );
CREATE TABLE bucket_storage_tallies ( CREATE TABLE bucket_storage_tallies (
bucket_id bytea NOT NULL, bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL, interval_start timestamp NOT NULL,
inline bigint NOT NULL, inline bigint NOT NULL,
remote bigint NOT NULL, remote bigint NOT NULL,
@ -318,7 +319,7 @@ CREATE TABLE bucket_storage_tallies (
inline_segments_count integer NOT NULL, inline_segments_count integer NOT NULL,
object_count integer NOT NULL, object_count integer NOT NULL,
metadata_size bigint NOT NULL, metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_id, interval_start ) PRIMARY KEY ( bucket_name, project_id, interval_start )
); );
CREATE TABLE bucket_usages ( CREATE TABLE bucket_usages (
id bytea NOT NULL, id bytea NOT NULL,
@ -454,7 +455,7 @@ CREATE TABLE used_serials (
storage_node_id bytea NOT NULL, storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id ) PRIMARY KEY ( serial_number_id, storage_node_id )
); );
CREATE INDEX bucket_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_id, interval_start, interval_seconds ); CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
@ -549,17 +550,19 @@ CREATE TABLE accounting_timestamps (
PRIMARY KEY ( name ) PRIMARY KEY ( name )
); );
CREATE TABLE bucket_bandwidth_rollups ( CREATE TABLE bucket_bandwidth_rollups (
bucket_id BLOB NOT NULL, bucket_name BLOB NOT NULL,
project_id BLOB NOT NULL,
interval_start TIMESTAMP NOT NULL, interval_start TIMESTAMP NOT NULL,
interval_seconds INTEGER NOT NULL, interval_seconds INTEGER NOT NULL,
action INTEGER NOT NULL, action INTEGER NOT NULL,
inline INTEGER NOT NULL, inline INTEGER NOT NULL,
allocated INTEGER NOT NULL, allocated INTEGER NOT NULL,
settled INTEGER NOT NULL, settled INTEGER NOT NULL,
PRIMARY KEY ( bucket_id, interval_start, action ) PRIMARY KEY ( bucket_name, project_id, interval_start, action )
); );
CREATE TABLE bucket_storage_tallies ( CREATE TABLE bucket_storage_tallies (
bucket_id BLOB NOT NULL, bucket_name BLOB NOT NULL,
project_id BLOB NOT NULL,
interval_start TIMESTAMP NOT NULL, interval_start TIMESTAMP NOT NULL,
inline INTEGER NOT NULL, inline INTEGER NOT NULL,
remote INTEGER NOT NULL, remote INTEGER NOT NULL,
@ -567,7 +570,7 @@ CREATE TABLE bucket_storage_tallies (
inline_segments_count INTEGER NOT NULL, inline_segments_count INTEGER NOT NULL,
object_count INTEGER NOT NULL, object_count INTEGER NOT NULL,
metadata_size INTEGER NOT NULL, metadata_size INTEGER NOT NULL,
PRIMARY KEY ( bucket_id, interval_start ) PRIMARY KEY ( bucket_name, project_id, interval_start )
); );
CREATE TABLE bucket_usages ( CREATE TABLE bucket_usages (
id BLOB NOT NULL, id BLOB NOT NULL,
@ -703,7 +706,7 @@ CREATE TABLE used_serials (
storage_node_id BLOB NOT NULL, storage_node_id BLOB NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id ) PRIMARY KEY ( serial_number_id, storage_node_id )
); );
CREATE INDEX bucket_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_id, interval_start, interval_seconds ); CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
@ -1136,7 +1139,8 @@ func (f AccountingTimestamps_Value_Field) value() interface{} {
func (AccountingTimestamps_Value_Field) _Column() string { return "value" } func (AccountingTimestamps_Value_Field) _Column() string { return "value" }
type BucketBandwidthRollup struct { type BucketBandwidthRollup struct {
BucketId []byte BucketName []byte
ProjectId []byte
IntervalStart time.Time IntervalStart time.Time
IntervalSeconds uint IntervalSeconds uint
Action uint Action uint
@ -1153,24 +1157,43 @@ type BucketBandwidthRollup_Update_Fields struct {
Settled BucketBandwidthRollup_Settled_Field Settled BucketBandwidthRollup_Settled_Field
} }
type BucketBandwidthRollup_BucketId_Field struct { type BucketBandwidthRollup_BucketName_Field struct {
_set bool _set bool
_null bool _null bool
_value []byte _value []byte
} }
func BucketBandwidthRollup_BucketId(v []byte) BucketBandwidthRollup_BucketId_Field { func BucketBandwidthRollup_BucketName(v []byte) BucketBandwidthRollup_BucketName_Field {
return BucketBandwidthRollup_BucketId_Field{_set: true, _value: v} return BucketBandwidthRollup_BucketName_Field{_set: true, _value: v}
} }
func (f BucketBandwidthRollup_BucketId_Field) value() interface{} { func (f BucketBandwidthRollup_BucketName_Field) value() interface{} {
if !f._set || f._null { if !f._set || f._null {
return nil return nil
} }
return f._value return f._value
} }
func (BucketBandwidthRollup_BucketId_Field) _Column() string { return "bucket_id" } func (BucketBandwidthRollup_BucketName_Field) _Column() string { return "bucket_name" }
type BucketBandwidthRollup_ProjectId_Field struct {
_set bool
_null bool
_value []byte
}
func BucketBandwidthRollup_ProjectId(v []byte) BucketBandwidthRollup_ProjectId_Field {
return BucketBandwidthRollup_ProjectId_Field{_set: true, _value: v}
}
func (f BucketBandwidthRollup_ProjectId_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (BucketBandwidthRollup_ProjectId_Field) _Column() string { return "project_id" }
type BucketBandwidthRollup_IntervalStart_Field struct { type BucketBandwidthRollup_IntervalStart_Field struct {
_set bool _set bool
@ -1288,7 +1311,8 @@ func (f BucketBandwidthRollup_Settled_Field) value() interface{} {
func (BucketBandwidthRollup_Settled_Field) _Column() string { return "settled" } func (BucketBandwidthRollup_Settled_Field) _Column() string { return "settled" }
type BucketStorageTally struct { type BucketStorageTally struct {
BucketId []byte BucketName []byte
ProjectId []byte
IntervalStart time.Time IntervalStart time.Time
Inline uint64 Inline uint64
Remote uint64 Remote uint64
@ -1303,24 +1327,43 @@ func (BucketStorageTally) _Table() string { return "bucket_storage_tallies" }
type BucketStorageTally_Update_Fields struct { type BucketStorageTally_Update_Fields struct {
} }
type BucketStorageTally_BucketId_Field struct { type BucketStorageTally_BucketName_Field struct {
_set bool _set bool
_null bool _null bool
_value []byte _value []byte
} }
func BucketStorageTally_BucketId(v []byte) BucketStorageTally_BucketId_Field { func BucketStorageTally_BucketName(v []byte) BucketStorageTally_BucketName_Field {
return BucketStorageTally_BucketId_Field{_set: true, _value: v} return BucketStorageTally_BucketName_Field{_set: true, _value: v}
} }
func (f BucketStorageTally_BucketId_Field) value() interface{} { func (f BucketStorageTally_BucketName_Field) value() interface{} {
if !f._set || f._null { if !f._set || f._null {
return nil return nil
} }
return f._value return f._value
} }
func (BucketStorageTally_BucketId_Field) _Column() string { return "bucket_id" } func (BucketStorageTally_BucketName_Field) _Column() string { return "bucket_name" }
type BucketStorageTally_ProjectId_Field struct {
_set bool
_null bool
_value []byte
}
func BucketStorageTally_ProjectId(v []byte) BucketStorageTally_ProjectId_Field {
return BucketStorageTally_ProjectId_Field{_set: true, _value: v}
}
func (f BucketStorageTally_ProjectId_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (BucketStorageTally_ProjectId_Field) _Column() string { return "project_id" }
type BucketStorageTally_IntervalStart_Field struct { type BucketStorageTally_IntervalStart_Field struct {
_set bool _set bool
@ -3418,10 +3461,54 @@ func __sqlbundle_Render(dialect __sqlbundle_Dialect, sql __sqlbundle_SQL, ops ..
return dialect.Rebind(out) return dialect.Rebind(out)
} }
var __sqlbundle_reSpace = regexp.MustCompile(`\s+`) func __sqlbundle_flattenSQL(x string) string {
// trim whitespace from beginning and end
s, e := 0, len(x)-1
for s < len(x) && (x[s] == ' ' || x[s] == '\t' || x[s] == '\n') {
s++
}
for s <= e && (x[e] == ' ' || x[e] == '\t' || x[e] == '\n') {
e--
}
if s > e {
return ""
}
x = x[s : e+1]
func __sqlbundle_flattenSQL(s string) string { // check for whitespace that needs fixing
return strings.TrimSpace(__sqlbundle_reSpace.ReplaceAllString(s, " ")) wasSpace := false
for i := 0; i < len(x); i++ {
r := x[i]
justSpace := r == ' '
if (wasSpace && justSpace) || r == '\t' || r == '\n' {
// whitespace detected, start writing a new string
var result strings.Builder
result.Grow(len(x))
if wasSpace {
result.WriteString(x[:i-1])
} else {
result.WriteString(x[:i])
}
for p := i; p < len(x); p++ {
for p < len(x) && (x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
p++
}
result.WriteByte(' ')
start := p
for p < len(x) && !(x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
p++
}
result.WriteString(x[start:p])
}
return result.String()
}
wasSpace = justSpace
}
// no problematic whitespace found
return x
} }
// this type is specially named to match up with the name returned by the // this type is specially named to match up with the name returned by the
@ -3500,6 +3587,8 @@ type __sqlbundle_Condition struct {
func (*__sqlbundle_Condition) private() {} func (*__sqlbundle_Condition) private() {}
func (c *__sqlbundle_Condition) Render() string { func (c *__sqlbundle_Condition) Render() string {
// TODO(jeff): maybe check if we can use placeholders instead of the
// literal null: this would make the templates easier.
switch { switch {
case c.Equal && c.Null: case c.Equal && c.Null:
@ -3916,7 +4005,8 @@ func (obj *postgresImpl) Create_UsedSerial(ctx context.Context,
} }
func (obj *postgresImpl) Create_BucketStorageTally(ctx context.Context, func (obj *postgresImpl) Create_BucketStorageTally(ctx context.Context,
bucket_storage_tally_bucket_id BucketStorageTally_BucketId_Field, bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field, bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_inline BucketStorageTally_Inline_Field, bucket_storage_tally_inline BucketStorageTally_Inline_Field,
bucket_storage_tally_remote BucketStorageTally_Remote_Field, bucket_storage_tally_remote BucketStorageTally_Remote_Field,
@ -3925,7 +4015,8 @@ func (obj *postgresImpl) Create_BucketStorageTally(ctx context.Context,
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field, bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) ( bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
bucket_storage_tally *BucketStorageTally, err error) { bucket_storage_tally *BucketStorageTally, err error) {
__bucket_id_val := bucket_storage_tally_bucket_id.value() __bucket_name_val := bucket_storage_tally_bucket_name.value()
__project_id_val := bucket_storage_tally_project_id.value()
__interval_start_val := bucket_storage_tally_interval_start.value() __interval_start_val := bucket_storage_tally_interval_start.value()
__inline_val := bucket_storage_tally_inline.value() __inline_val := bucket_storage_tally_inline.value()
__remote_val := bucket_storage_tally_remote.value() __remote_val := bucket_storage_tally_remote.value()
@ -3934,13 +4025,13 @@ func (obj *postgresImpl) Create_BucketStorageTally(ctx context.Context,
__object_count_val := bucket_storage_tally_object_count.value() __object_count_val := bucket_storage_tally_object_count.value()
__metadata_size_val := bucket_storage_tally_metadata_size.value() __metadata_size_val := bucket_storage_tally_metadata_size.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_storage_tallies.bucket_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size") var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size")
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __bucket_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val) obj.logStmt(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
bucket_storage_tally = &BucketStorageTally{} bucket_storage_tally = &BucketStorageTally{}
err = obj.driver.QueryRow(__stmt, __bucket_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val).Scan(&bucket_storage_tally.BucketId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize) err = obj.driver.QueryRow(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val).Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -4810,22 +4901,23 @@ func (obj *postgresImpl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
} }
func (obj *postgresImpl) Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart_And_Action(ctx context.Context, func (obj *postgresImpl) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
bucket_bandwidth_rollup_bucket_id BucketBandwidthRollup_BucketId_Field, bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field, bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) ( bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) { bucket_bandwidth_rollup *BucketBandwidthRollup, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?") var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_name = ? AND bucket_bandwidth_rollups.project_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?")
var __values []interface{} var __values []interface{}
__values = append(__values, bucket_bandwidth_rollup_bucket_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value()) __values = append(__values, bucket_bandwidth_rollup_bucket_name.value(), bucket_bandwidth_rollup_project_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...) obj.logStmt(__stmt, __values...)
bucket_bandwidth_rollup = &BucketBandwidthRollup{} bucket_bandwidth_rollup = &BucketBandwidthRollup{}
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled) err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} }
@ -4836,6 +4928,41 @@ func (obj *postgresImpl) Find_BucketBandwidthRollup_By_BucketId_And_IntervalStar
} }
func (obj *postgresImpl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
bucket_storage_tally *BucketStorageTally, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? ORDER BY bucket_storage_tallies.interval_start DESC LIMIT 1 OFFSET 0")
var __values []interface{}
__values = append(__values, bucket_storage_tally_project_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__rows, err := obj.driver.Query(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
defer __rows.Close()
if !__rows.Next() {
if err := __rows.Err(); err != nil {
return nil, obj.makeErr(err)
}
return nil, nil
}
bucket_storage_tally = &BucketStorageTally{}
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
if err != nil {
return nil, obj.makeErr(err)
}
return bucket_storage_tally, nil
}
func (obj *postgresImpl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context, func (obj *postgresImpl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field, storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field, storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
@ -5646,33 +5773,6 @@ func (obj *postgresImpl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx contex
} }
func (obj *postgresImpl) Delete_UsedSerial_By_SerialNumberId_And_StorageNodeId(ctx context.Context,
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
deleted bool, err error) {
var __embed_stmt = __sqlbundle_Literal("DELETE FROM used_serials WHERE used_serials.serial_number_id = ? AND used_serials.storage_node_id = ?")
var __values []interface{}
__values = append(__values, used_serial_serial_number_id.value(), used_serial_storage_node_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__res, err := obj.driver.Exec(__stmt, __values...)
if err != nil {
return false, obj.makeErr(err)
}
__count, err := __res.RowsAffected()
if err != nil {
return false, obj.makeErr(err)
}
return __count > 0, nil
}
func (obj *postgresImpl) Delete_CertRecord_By_Id(ctx context.Context, func (obj *postgresImpl) Delete_CertRecord_By_Id(ctx context.Context,
certRecord_id CertRecord_Id_Field) ( certRecord_id CertRecord_Id_Field) (
deleted bool, err error) { deleted bool, err error) {
@ -6333,7 +6433,8 @@ func (obj *sqlite3Impl) Create_UsedSerial(ctx context.Context,
} }
func (obj *sqlite3Impl) Create_BucketStorageTally(ctx context.Context, func (obj *sqlite3Impl) Create_BucketStorageTally(ctx context.Context,
bucket_storage_tally_bucket_id BucketStorageTally_BucketId_Field, bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field, bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_inline BucketStorageTally_Inline_Field, bucket_storage_tally_inline BucketStorageTally_Inline_Field,
bucket_storage_tally_remote BucketStorageTally_Remote_Field, bucket_storage_tally_remote BucketStorageTally_Remote_Field,
@ -6342,7 +6443,8 @@ func (obj *sqlite3Impl) Create_BucketStorageTally(ctx context.Context,
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field, bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) ( bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
bucket_storage_tally *BucketStorageTally, err error) { bucket_storage_tally *BucketStorageTally, err error) {
__bucket_id_val := bucket_storage_tally_bucket_id.value() __bucket_name_val := bucket_storage_tally_bucket_name.value()
__project_id_val := bucket_storage_tally_project_id.value()
__interval_start_val := bucket_storage_tally_interval_start.value() __interval_start_val := bucket_storage_tally_interval_start.value()
__inline_val := bucket_storage_tally_inline.value() __inline_val := bucket_storage_tally_inline.value()
__remote_val := bucket_storage_tally_remote.value() __remote_val := bucket_storage_tally_remote.value()
@ -6351,12 +6453,12 @@ func (obj *sqlite3Impl) Create_BucketStorageTally(ctx context.Context,
__object_count_val := bucket_storage_tally_object_count.value() __object_count_val := bucket_storage_tally_object_count.value()
__metadata_size_val := bucket_storage_tally_metadata_size.value() __metadata_size_val := bucket_storage_tally_metadata_size.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )") var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )")
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __bucket_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val) obj.logStmt(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
__res, err := obj.driver.Exec(__stmt, __bucket_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val) __res, err := obj.driver.Exec(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -7236,22 +7338,23 @@ func (obj *sqlite3Impl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
} }
func (obj *sqlite3Impl) Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart_And_Action(ctx context.Context, func (obj *sqlite3Impl) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
bucket_bandwidth_rollup_bucket_id BucketBandwidthRollup_BucketId_Field, bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field, bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) ( bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) { bucket_bandwidth_rollup *BucketBandwidthRollup, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?") var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_name = ? AND bucket_bandwidth_rollups.project_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?")
var __values []interface{} var __values []interface{}
__values = append(__values, bucket_bandwidth_rollup_bucket_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value()) __values = append(__values, bucket_bandwidth_rollup_bucket_name.value(), bucket_bandwidth_rollup_project_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...) obj.logStmt(__stmt, __values...)
bucket_bandwidth_rollup = &BucketBandwidthRollup{} bucket_bandwidth_rollup = &BucketBandwidthRollup{}
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled) err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} }
@ -7262,6 +7365,41 @@ func (obj *sqlite3Impl) Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart
} }
func (obj *sqlite3Impl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
bucket_storage_tally *BucketStorageTally, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? ORDER BY bucket_storage_tallies.interval_start DESC LIMIT 1 OFFSET 0")
var __values []interface{}
__values = append(__values, bucket_storage_tally_project_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__rows, err := obj.driver.Query(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
defer __rows.Close()
if !__rows.Next() {
if err := __rows.Err(); err != nil {
return nil, obj.makeErr(err)
}
return nil, nil
}
bucket_storage_tally = &BucketStorageTally{}
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
if err != nil {
return nil, obj.makeErr(err)
}
return bucket_storage_tally, nil
}
func (obj *sqlite3Impl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context, func (obj *sqlite3Impl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field, storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field, storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
@ -8152,33 +8290,6 @@ func (obj *sqlite3Impl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context
} }
func (obj *sqlite3Impl) Delete_UsedSerial_By_SerialNumberId_And_StorageNodeId(ctx context.Context,
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
deleted bool, err error) {
var __embed_stmt = __sqlbundle_Literal("DELETE FROM used_serials WHERE used_serials.serial_number_id = ? AND used_serials.storage_node_id = ?")
var __values []interface{}
__values = append(__values, used_serial_serial_number_id.value(), used_serial_storage_node_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__res, err := obj.driver.Exec(__stmt, __values...)
if err != nil {
return false, obj.makeErr(err)
}
__count, err := __res.RowsAffected()
if err != nil {
return false, obj.makeErr(err)
}
return __count > 0, nil
}
func (obj *sqlite3Impl) Delete_CertRecord_By_Id(ctx context.Context, func (obj *sqlite3Impl) Delete_CertRecord_By_Id(ctx context.Context,
certRecord_id CertRecord_Id_Field) ( certRecord_id CertRecord_Id_Field) (
deleted bool, err error) { deleted bool, err error) {
@ -8443,13 +8554,13 @@ func (obj *sqlite3Impl) getLastBucketStorageTally(ctx context.Context,
pk int64) ( pk int64) (
bucket_storage_tally *BucketStorageTally, err error) { bucket_storage_tally *BucketStorageTally, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE _rowid_ = ?") var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE _rowid_ = ?")
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, pk) obj.logStmt(__stmt, pk)
bucket_storage_tally = &BucketStorageTally{} bucket_storage_tally = &BucketStorageTally{}
err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_storage_tally.BucketId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize) err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -8895,7 +9006,8 @@ func (rx *Rx) Create_ApiKey(ctx context.Context,
} }
func (rx *Rx) Create_BucketStorageTally(ctx context.Context, func (rx *Rx) Create_BucketStorageTally(ctx context.Context,
bucket_storage_tally_bucket_id BucketStorageTally_BucketId_Field, bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field, bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_inline BucketStorageTally_Inline_Field, bucket_storage_tally_inline BucketStorageTally_Inline_Field,
bucket_storage_tally_remote BucketStorageTally_Remote_Field, bucket_storage_tally_remote BucketStorageTally_Remote_Field,
@ -8908,7 +9020,7 @@ func (rx *Rx) Create_BucketStorageTally(ctx context.Context,
if tx, err = rx.getTx(ctx); err != nil { if tx, err = rx.getTx(ctx); err != nil {
return return
} }
return tx.Create_BucketStorageTally(ctx, bucket_storage_tally_bucket_id, bucket_storage_tally_interval_start, bucket_storage_tally_inline, bucket_storage_tally_remote, bucket_storage_tally_remote_segments_count, bucket_storage_tally_inline_segments_count, bucket_storage_tally_object_count, bucket_storage_tally_metadata_size) return tx.Create_BucketStorageTally(ctx, bucket_storage_tally_bucket_name, bucket_storage_tally_project_id, bucket_storage_tally_interval_start, bucket_storage_tally_inline, bucket_storage_tally_remote, bucket_storage_tally_remote_segments_count, bucket_storage_tally_inline_segments_count, bucket_storage_tally_object_count, bucket_storage_tally_metadata_size)
} }
@ -9189,17 +9301,6 @@ func (rx *Rx) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
} }
func (rx *Rx) Delete_UsedSerial_By_SerialNumberId_And_StorageNodeId(ctx context.Context,
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
deleted bool, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Delete_UsedSerial_By_SerialNumberId_And_StorageNodeId(ctx, used_serial_serial_number_id, used_serial_storage_node_id)
}
func (rx *Rx) Delete_User_By_Id(ctx context.Context, func (rx *Rx) Delete_User_By_Id(ctx context.Context,
user_id User_Id_Field) ( user_id User_Id_Field) (
deleted bool, err error) { deleted bool, err error) {
@ -9220,8 +9321,9 @@ func (rx *Rx) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
return tx.Find_AccountingTimestamps_Value_By_Name(ctx, accounting_timestamps_name) return tx.Find_AccountingTimestamps_Value_By_Name(ctx, accounting_timestamps_name)
} }
func (rx *Rx) Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart_And_Action(ctx context.Context, func (rx *Rx) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
bucket_bandwidth_rollup_bucket_id BucketBandwidthRollup_BucketId_Field, bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field, bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) ( bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) { bucket_bandwidth_rollup *BucketBandwidthRollup, err error) {
@ -9229,7 +9331,7 @@ func (rx *Rx) Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart_And_Actio
if tx, err = rx.getTx(ctx); err != nil { if tx, err = rx.getTx(ctx); err != nil {
return return
} }
return tx.Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart_And_Action(ctx, bucket_bandwidth_rollup_bucket_id, bucket_bandwidth_rollup_interval_start, bucket_bandwidth_rollup_action) return tx.Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx, bucket_bandwidth_rollup_bucket_name, bucket_bandwidth_rollup_project_id, bucket_bandwidth_rollup_interval_start, bucket_bandwidth_rollup_action)
} }
func (rx *Rx) Find_SerialNumber_By_SerialNumber(ctx context.Context, func (rx *Rx) Find_SerialNumber_By_SerialNumber(ctx context.Context,
@ -9254,6 +9356,16 @@ func (rx *Rx) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart
return tx.Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start, storagenode_bandwidth_rollup_action) return tx.Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start, storagenode_bandwidth_rollup_action)
} }
func (rx *Rx) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
bucket_storage_tally *BucketStorageTally, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id)
}
func (rx *Rx) First_Injuredsegment(ctx context.Context) ( func (rx *Rx) First_Injuredsegment(ctx context.Context) (
injuredsegment *Injuredsegment, err error) { injuredsegment *Injuredsegment, err error) {
var tx *Tx var tx *Tx
@ -9611,7 +9723,8 @@ type Methods interface {
api_key *ApiKey, err error) api_key *ApiKey, err error)
Create_BucketStorageTally(ctx context.Context, Create_BucketStorageTally(ctx context.Context,
bucket_storage_tally_bucket_id BucketStorageTally_BucketId_Field, bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field, bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_inline BucketStorageTally_Inline_Field, bucket_storage_tally_inline BucketStorageTally_Inline_Field,
bucket_storage_tally_remote BucketStorageTally_Remote_Field, bucket_storage_tally_remote BucketStorageTally_Remote_Field,
@ -9754,11 +9867,6 @@ type Methods interface {
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) ( serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
count int64, err error) count int64, err error)
Delete_UsedSerial_By_SerialNumberId_And_StorageNodeId(ctx context.Context,
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
deleted bool, err error)
Delete_User_By_Id(ctx context.Context, Delete_User_By_Id(ctx context.Context,
user_id User_Id_Field) ( user_id User_Id_Field) (
deleted bool, err error) deleted bool, err error)
@ -9767,8 +9875,9 @@ type Methods interface {
accounting_timestamps_name AccountingTimestamps_Name_Field) ( accounting_timestamps_name AccountingTimestamps_Name_Field) (
row *Value_Row, err error) row *Value_Row, err error)
Find_BucketBandwidthRollup_By_BucketId_And_IntervalStart_And_Action(ctx context.Context, Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
bucket_bandwidth_rollup_bucket_id BucketBandwidthRollup_BucketId_Field, bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field, bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) ( bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) bucket_bandwidth_rollup *BucketBandwidthRollup, err error)
@ -9783,6 +9892,10 @@ type Methods interface {
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) ( storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) (
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error)
First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
bucket_storage_tally *BucketStorageTally, err error)
First_Injuredsegment(ctx context.Context) ( First_Injuredsegment(ctx context.Context) (
injuredsegment *Injuredsegment, err error) injuredsegment *Injuredsegment, err error)

View File

@ -27,17 +27,19 @@ CREATE TABLE accounting_timestamps (
PRIMARY KEY ( name ) PRIMARY KEY ( name )
); );
CREATE TABLE bucket_bandwidth_rollups ( CREATE TABLE bucket_bandwidth_rollups (
bucket_id bytea NOT NULL, bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL, interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL, interval_seconds integer NOT NULL,
action integer NOT NULL, action integer NOT NULL,
inline bigint NOT NULL, inline bigint NOT NULL,
allocated bigint NOT NULL, allocated bigint NOT NULL,
settled bigint NOT NULL, settled bigint NOT NULL,
PRIMARY KEY ( bucket_id, interval_start, action ) PRIMARY KEY ( bucket_name, project_id, interval_start, action )
); );
CREATE TABLE bucket_storage_tallies ( CREATE TABLE bucket_storage_tallies (
bucket_id bytea NOT NULL, bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL, interval_start timestamp NOT NULL,
inline bigint NOT NULL, inline bigint NOT NULL,
remote bigint NOT NULL, remote bigint NOT NULL,
@ -45,7 +47,7 @@ CREATE TABLE bucket_storage_tallies (
inline_segments_count integer NOT NULL, inline_segments_count integer NOT NULL,
object_count integer NOT NULL, object_count integer NOT NULL,
metadata_size bigint NOT NULL, metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_id, interval_start ) PRIMARY KEY ( bucket_name, project_id, interval_start )
); );
CREATE TABLE bucket_usages ( CREATE TABLE bucket_usages (
id bytea NOT NULL, id bytea NOT NULL,
@ -181,7 +183,7 @@ CREATE TABLE used_serials (
storage_node_id bytea NOT NULL, storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id ) PRIMARY KEY ( serial_number_id, storage_node_id )
); );
CREATE INDEX bucket_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_id, interval_start, interval_seconds ); CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );

View File

@ -27,17 +27,19 @@ CREATE TABLE accounting_timestamps (
PRIMARY KEY ( name ) PRIMARY KEY ( name )
); );
CREATE TABLE bucket_bandwidth_rollups ( CREATE TABLE bucket_bandwidth_rollups (
bucket_id BLOB NOT NULL, bucket_name BLOB NOT NULL,
project_id BLOB NOT NULL,
interval_start TIMESTAMP NOT NULL, interval_start TIMESTAMP NOT NULL,
interval_seconds INTEGER NOT NULL, interval_seconds INTEGER NOT NULL,
action INTEGER NOT NULL, action INTEGER NOT NULL,
inline INTEGER NOT NULL, inline INTEGER NOT NULL,
allocated INTEGER NOT NULL, allocated INTEGER NOT NULL,
settled INTEGER NOT NULL, settled INTEGER NOT NULL,
PRIMARY KEY ( bucket_id, interval_start, action ) PRIMARY KEY ( bucket_name, project_id, interval_start, action )
); );
CREATE TABLE bucket_storage_tallies ( CREATE TABLE bucket_storage_tallies (
bucket_id BLOB NOT NULL, bucket_name BLOB NOT NULL,
project_id BLOB NOT NULL,
interval_start TIMESTAMP NOT NULL, interval_start TIMESTAMP NOT NULL,
inline INTEGER NOT NULL, inline INTEGER NOT NULL,
remote INTEGER NOT NULL, remote INTEGER NOT NULL,
@ -45,7 +47,7 @@ CREATE TABLE bucket_storage_tallies (
inline_segments_count INTEGER NOT NULL, inline_segments_count INTEGER NOT NULL,
object_count INTEGER NOT NULL, object_count INTEGER NOT NULL,
metadata_size INTEGER NOT NULL, metadata_size INTEGER NOT NULL,
PRIMARY KEY ( bucket_id, interval_start ) PRIMARY KEY ( bucket_name, project_id, interval_start )
); );
CREATE TABLE bucket_usages ( CREATE TABLE bucket_usages (
id BLOB NOT NULL, id BLOB NOT NULL,
@ -181,7 +183,7 @@ CREATE TABLE used_serials (
storage_node_id BLOB NOT NULL, storage_node_id BLOB NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id ) PRIMARY KEY ( serial_number_id, storage_node_id )
); );
CREATE INDEX bucket_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_id, interval_start, interval_seconds ); CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );

View File

@ -50,6 +50,13 @@ type lockedAccounting struct {
db accounting.DB db accounting.DB
} }
// CreateBucketStorageTally creates a record for BucketStorageTally in the accounting DB table
func (m *lockedAccounting) CreateBucketStorageTally(ctx context.Context, tally accounting.BucketStorageTally) error {
m.Lock()
defer m.Unlock()
return m.db.CreateBucketStorageTally(ctx, tally)
}
// DeleteRawBefore deletes all raw tallies prior to some time // DeleteRawBefore deletes all raw tallies prior to some time
func (m *lockedAccounting) DeleteRawBefore(ctx context.Context, latestRollup time.Time) error { func (m *lockedAccounting) DeleteRawBefore(ctx context.Context, latestRollup time.Time) error {
m.Lock() m.Lock()
@ -78,11 +85,18 @@ func (m *lockedAccounting) LastTimestamp(ctx context.Context, timestampType stri
return m.db.LastTimestamp(ctx, timestampType) return m.db.LastTimestamp(ctx, timestampType)
} }
// SaveBucketTallies saves the latest bucket info // ProjectBandwidthTotal returns the sum of GET bandwidth usage for a projectID in the past time frame
func (m *lockedAccounting) SaveBucketTallies(ctx context.Context, intervalStart time.Time, bucketInfo map[string]*accounting.BucketTally) error { func (m *lockedAccounting) ProjectBandwidthTotal(ctx context.Context, bucketID []byte, from time.Time) (int64, error) {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
return m.db.SaveBucketTallies(ctx, intervalStart, bucketInfo) return m.db.ProjectBandwidthTotal(ctx, bucketID, from)
}
// ProjectStorageTotals returns the current inline and remote storage usage for a projectID
func (m *lockedAccounting) ProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
m.Lock()
defer m.Unlock()
return m.db.ProjectStorageTotals(ctx, projectID)
} }
// QueryPaymentInfo queries Overlay, Accounting Rollup on nodeID // QueryPaymentInfo queries Overlay, Accounting Rollup on nodeID
@ -106,6 +120,13 @@ func (m *lockedAccounting) SaveBWRaw(ctx context.Context, tallyEnd time.Time, cr
return m.db.SaveBWRaw(ctx, tallyEnd, created, bwTotals) return m.db.SaveBWRaw(ctx, tallyEnd, created, bwTotals)
} }
// SaveBucketTallies saves the latest bucket info
func (m *lockedAccounting) SaveBucketTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) error {
m.Lock()
defer m.Unlock()
return m.db.SaveBucketTallies(ctx, intervalStart, bucketTallies)
}
// SaveRollup records raw tallies of at rest data to the database // SaveRollup records raw tallies of at rest data to the database
func (m *lockedAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error { func (m *lockedAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error {
m.Lock() m.Lock()
@ -581,7 +602,7 @@ func (m *lockedOrders) GetStorageNodeBandwidth(ctx context.Context, nodeID storj
return m.db.GetStorageNodeBandwidth(ctx, nodeID, from, to) return m.db.GetStorageNodeBandwidth(ctx, nodeID, from, to)
} }
// UnuseSerialNumber // UnuseSerialNumber removes pair serial number -> storage node id from database
func (m *lockedOrders) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error { func (m *lockedOrders) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()

View File

@ -337,7 +337,7 @@ func (db *DB) PostgresMigration() *migrate.Migration {
get_egress bigint NOT NULL, get_egress bigint NOT NULL,
audit_egress bigint NOT NULL, audit_egress bigint NOT NULL,
PRIMARY KEY ( id ), PRIMARY KEY ( id ),
UNIQUE ( rollup_end_time, bucket_id ) UNIQUE ( rollup_end_time, bucket_id )
)`, )`,
}, },
}, },
@ -494,6 +494,42 @@ func (db *DB) PostgresMigration() *migrate.Migration {
`DROP TABLE overlay_cache_nodes CASCADE;`, `DROP TABLE overlay_cache_nodes CASCADE;`,
}, },
}, },
{
Description: "Change bucket_id to bucket_name and project_id",
Version: 13,
Action: migrate.SQL{
// Modify columns: bucket_id --> bucket_name + project_id for table bucket_storage_tallies
`ALTER TABLE bucket_storage_tallies ADD project_id bytea;`,
`UPDATE bucket_storage_tallies SET project_id=SUBSTRING(bucket_id FROM 1 FOR 16);`,
`ALTER TABLE bucket_storage_tallies ALTER COLUMN project_id SET NOT NULL;`,
`ALTER TABLE bucket_storage_tallies RENAME COLUMN bucket_id TO bucket_name;`,
`UPDATE bucket_storage_tallies SET bucket_name=SUBSTRING(bucket_name from 18);`,
// Update the primary key for bucket_storage_tallies
`ALTER TABLE bucket_storage_tallies DROP CONSTRAINT bucket_storage_rollups_pkey;`,
`ALTER TABLE bucket_storage_tallies ADD CONSTRAINT bucket_storage_tallies_pk PRIMARY KEY (bucket_name, project_id, interval_start);`,
// Modify columns: bucket_id --> bucket_name + project_id for table bucket_bandwidth_rollups
`ALTER TABLE bucket_bandwidth_rollups ADD project_id bytea;`,
`UPDATE bucket_bandwidth_rollups SET project_id=SUBSTRING(bucket_id FROM 1 FOR 16);`,
`ALTER TABLE bucket_bandwidth_rollups ALTER COLUMN project_id SET NOT NULL;`,
`ALTER TABLE bucket_bandwidth_rollups RENAME COLUMN bucket_id TO bucket_name;`,
`UPDATE bucket_bandwidth_rollups SET bucket_name=SUBSTRING(bucket_name from 18);`,
// Update index for bucket_bandwidth_rollups
`DROP INDEX IF EXISTS bucket_id_interval_start_interval_seconds_index;`,
`CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups (
bucket_name,
project_id,
interval_start,
interval_seconds
);`,
// Update the primary key for bucket_bandwidth_rollups
`ALTER TABLE bucket_bandwidth_rollups DROP CONSTRAINT bucket_bandwidth_rollups_pkey;`,
`ALTER TABLE bucket_bandwidth_rollups ADD CONSTRAINT bucket_bandwidth_rollups_pk PRIMARY KEY (bucket_name, project_id, interval_start, action);`,
},
},
}, },
} }
} }

View File

@ -4,6 +4,7 @@
package satellitedb package satellitedb
import ( import (
"bytes"
"context" "context"
"database/sql" "database/sql"
"time" "time"
@ -56,12 +57,17 @@ func (db *ordersDB) UpdateBucketBandwidthAllocation(ctx context.Context, bucketI
now := time.Now() now := time.Now()
intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
pathElements := bytes.Split(bucketID, []byte("/"))
bucketName, projectID := pathElements[1], pathElements[0]
statement := db.db.Rebind( statement := db.db.Rebind(
`INSERT INTO bucket_bandwidth_rollups VALUES (?, ?, ?, ?, ?, ?, ?) `INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
ON CONFLICT(bucket_id, interval_start, action) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(bucket_name, project_id, interval_start, action)
DO UPDATE SET allocated = bucket_bandwidth_rollups.allocated + ?`, DO UPDATE SET allocated = bucket_bandwidth_rollups.allocated + ?`,
) )
_, err := db.db.ExecContext(ctx, statement, bucketID, intervalStart, defaultIntervalSeconds, action, 0, uint64(amount), 0, uint64(amount)) _, err := db.db.ExecContext(ctx, statement,
bucketName, projectID, intervalStart, defaultIntervalSeconds, action, 0, uint64(amount), 0, uint64(amount),
)
if err != nil { if err != nil {
return err return err
} }
@ -74,12 +80,17 @@ func (db *ordersDB) UpdateBucketBandwidthSettle(ctx context.Context, bucketID []
now := time.Now() now := time.Now()
intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
pathElements := bytes.Split(bucketID, []byte("/"))
bucketName, projectID := pathElements[1], pathElements[0]
statement := db.db.Rebind( statement := db.db.Rebind(
`INSERT INTO bucket_bandwidth_rollups VALUES (?, ?, ?, ?, ?, ?, ?) `INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
ON CONFLICT(bucket_id, interval_start, action) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(bucket_name, project_id, interval_start, action)
DO UPDATE SET settled = bucket_bandwidth_rollups.settled + ?`, DO UPDATE SET settled = bucket_bandwidth_rollups.settled + ?`,
) )
_, err := db.db.ExecContext(ctx, statement, bucketID, intervalStart, defaultIntervalSeconds, action, 0, 0, uint64(amount), uint64(amount)) _, err := db.db.ExecContext(ctx, statement,
bucketName, projectID, intervalStart, defaultIntervalSeconds, action, 0, 0, uint64(amount), uint64(amount),
)
if err != nil { if err != nil {
return err return err
} }
@ -91,12 +102,17 @@ func (db *ordersDB) UpdateBucketBandwidthInline(ctx context.Context, bucketID []
now := time.Now() now := time.Now()
intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
pathElements := bytes.Split(bucketID, []byte("/"))
bucketName, projectID := pathElements[1], pathElements[0]
statement := db.db.Rebind( statement := db.db.Rebind(
`INSERT INTO bucket_bandwidth_rollups VALUES (?, ?, ?, ?, ?, ?, ?) `INSERT INTO bucket_bandwidth_rollups (bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
ON CONFLICT(bucket_id, interval_start, action) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(bucket_name, project_id, interval_start, action)
DO UPDATE SET inline = bucket_bandwidth_rollups.inline + ?`, DO UPDATE SET inline = bucket_bandwidth_rollups.inline + ?`,
) )
_, err := db.db.ExecContext(ctx, statement, bucketID, intervalStart, defaultIntervalSeconds, action, uint64(amount), 0, 0, uint64(amount)) _, err := db.db.ExecContext(ctx, statement,
bucketName, projectID, intervalStart, defaultIntervalSeconds, action, uint64(amount), 0, 0, uint64(amount),
)
if err != nil { if err != nil {
return err return err
} }
@ -109,11 +125,14 @@ func (db *ordersDB) UpdateStoragenodeBandwidthAllocation(ctx context.Context, st
intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
statement := db.db.Rebind( statement := db.db.Rebind(
`INSERT INTO storagenode_bandwidth_rollups VALUES (?, ?, ?, ?, ?, ?) `INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, allocated, settled)
VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT(storagenode_id, interval_start, action) ON CONFLICT(storagenode_id, interval_start, action)
DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + ?`, DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + ?`,
) )
_, err := db.db.ExecContext(ctx, statement, storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0, uint64(amount)) _, err := db.db.ExecContext(ctx, statement,
storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0, uint64(amount),
)
if err != nil { if err != nil {
return err return err
} }
@ -126,11 +145,14 @@ func (db *ordersDB) UpdateStoragenodeBandwidthSettle(ctx context.Context, storag
intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) intervalStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
statement := db.db.Rebind( statement := db.db.Rebind(
`INSERT INTO storagenode_bandwidth_rollups VALUES (?, ?, ?, ?, ?, ?) `INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, allocated, settled)
VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT(storagenode_id, interval_start, action) ON CONFLICT(storagenode_id, interval_start, action)
DO UPDATE SET settled = storagenode_bandwidth_rollups.settled + ?`, DO UPDATE SET settled = storagenode_bandwidth_rollups.settled + ?`,
) )
_, err := db.db.ExecContext(ctx, statement, storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, 0, uint64(amount), uint64(amount)) _, err := db.db.ExecContext(ctx, statement,
storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, 0, uint64(amount), uint64(amount),
)
if err != nil { if err != nil {
return err return err
} }
@ -139,9 +161,11 @@ func (db *ordersDB) UpdateStoragenodeBandwidthSettle(ctx context.Context, storag
// GetBucketBandwidth gets total bucket bandwidth from period of time // GetBucketBandwidth gets total bucket bandwidth from period of time
func (db *ordersDB) GetBucketBandwidth(ctx context.Context, bucketID []byte, from, to time.Time) (int64, error) { func (db *ordersDB) GetBucketBandwidth(ctx context.Context, bucketID []byte, from, to time.Time) (int64, error) {
pathElements := bytes.Split(bucketID, []byte("/"))
bucketName, projectID := pathElements[1], pathElements[0]
var sum *int64 var sum *int64
query := `SELECT SUM(settled) FROM bucket_bandwidth_rollups WHERE bucket_id = ? AND interval_start > ? AND interval_start <= ?` query := `SELECT SUM(settled) FROM bucket_bandwidth_rollups WHERE bucket_name = ? AND project_id = ? AND interval_start > ? AND interval_start <= ?`
err := db.db.QueryRow(db.db.Rebind(query), bucketID, from, to).Scan(&sum) err := db.db.QueryRow(db.db.Rebind(query), bucketName, projectID, from, to).Scan(&sum)
if err == sql.ErrNoRows || sum == nil { if err == sql.ErrNoRows || sum == nil {
return 0, nil return 0, nil
} }

View File

@ -0,0 +1,229 @@
-- Copied from the corresponding version of dbx generated schema
CREATE TABLE accounting_raws (
id bigserial NOT NULL,
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
data_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE accounting_rollups (
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE bucket_usages (
id bytea NOT NULL,
bucket_id bytea NOT NULL,
rollup_end_time timestamp with time zone NOT NULL,
remote_stored_data bigint NOT NULL,
inline_stored_data bigint NOT NULL,
remote_segments integer NOT NULL,
inline_segments integer NOT NULL,
objects integer NOT NULL,
metadata_size bigint NOT NULL,
repair_egress bigint NOT NULL,
get_egress bigint NOT NULL,
audit_egress bigint NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE bwagreements (
serialnum text NOT NULL,
storage_node_id bytea NOT NULL,
uplink_id bytea NOT NULL,
action bigint NOT NULL,
total bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( serialnum )
);
CREATE TABLE certRecords (
publickey bytea NOT NULL,
id bytea NOT NULL,
update_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE injuredsegments (
id bigserial NOT NULL,
info bytea NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL,
protocol integer NOT NULL,
type integer NOT NULL,
email text NOT NULL,
wallet text NOT NULL,
free_bandwidth bigint NOT NULL,
free_disk bigint NOT NULL,
latency_90 bigint NOT NULL,
audit_success_count bigint NOT NULL,
total_audit_count bigint NOT NULL,
audit_success_ratio double precision NOT NULL,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_ratio double precision NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
last_contact_success timestamp with time zone NOT NULL,
last_contact_failure timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE serial_numbers (
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_storage_tallies (
storagenode_id bytea NOT NULL,
interval_start timestamp NOT NULL,
total bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start )
);
CREATE TABLE users (
id bytea NOT NULL,
full_name text NOT NULL,
short_name text,
email text NOT NULL,
password_hash bytea NOT NULL,
status integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
key bytea NOT NULL,
name text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( key ),
UNIQUE ( name, project_id )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE used_serials (
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id )
);
CREATE INDEX bucket_id_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
---
INSERT INTO "accounting_raws" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000, 0, '2019-02-14 08:16:57.844849+00');
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', 0, 4, '', '', -1, -1, 0, 0, 0, 0, 3, 3, 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch');
INSERT INTO "projects"("id", "name", "description", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', '2019-02-14 08:28:24.254934+00');
INSERT INTO "api_keys"("id", "project_id", "key", "name", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\000]\\326N \\343\\270L\\327\\027\\337\\242\\240\\322mOl\\0318\\251.P I'::bytea, 'key 2', '2019-02-14 08:28:24.267934+00');
INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@ukr.net', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "bwagreements"("serialnum", "storage_node_id", "action", "total", "created_at", "expires_at", "uplink_id") VALUES ('8fc0ceaa-984c-4d52-bcf4-b5429e1e35e812FpiifDbcJkePa12jxjDEutKrfLmwzT7sz2jfVwpYqgtM8B74c', E'\\245Z[/\\333\\022\\011\\001\\036\\003\\204\\005\\032.\\206\\333E\\261\\342\\227=y,}aRaH6\\240\\370\\000'::bytea, 1, 666, '2019-02-14 15:09:54.420181+00', '2019-02-14 16:09:54+00', E'\\253Z+\\374eFm\\245$\\036\\206\\335\\247\\263\\350x\\\\\\304+\\364\\343\\364+\\276fIJQ\\361\\014\\232\\000'::bytea);
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "injuredsegments" ("id", "info") VALUES (1, '\x0a0130120100');
INSERT INTO "certrecords" VALUES (E'0Y0\\023\\006\\007*\\206H\\316=\\002\\001\\006\\010*\\206H\\316=\\003\\001\\007\\003B\\000\\004\\360\\267\\227\\377\\253u\\222\\337Y\\324C:GQ\\010\\277v\\010\\315D\\271\\333\\337.\\203\\023=C\\343\\014T%6\\027\\362?\\214\\326\\017U\\334\\000\\260\\224\\260J\\221\\304\\331F\\304\\221\\236zF,\\325\\326l\\215\\306\\365\\200\\022', E'L\\301|\\200\\247}F|1\\320\\232\\037n\\335\\241\\206\\244\\242\\207\\204.\\253\\357\\326\\352\\033Dt\\202`\\022\\325', '2019-02-14 08:07:31.335028+00');
INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" ("storagenode_id", "interval_start", "total") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 4024);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
-- NEW DATA --