Per-project usage limiting (#2036)
What: Changes to support custom usage limit for the project. With this implementation by default project usage limit is taken from configuration flag. If project DB field usage_limit will be set to value larger than 0 it will become custom usage limit and we will be used to verify is limit was exceeded. Whats changed: usage_limit (bigint) field added to projects table (with migration) things related to project usage moved from metainfo endpoint to project usage type accounting.ProjectAccounting extended with GetProjectUsageLimits() method Why: We need to have different usage limits per project. https://storjlabs.atlassian.net/browse/V3-1814
This commit is contained in:
parent
c07162beef
commit
f731267e8c
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
@ -74,4 +75,6 @@ type ProjectAccounting interface {
|
||||
GetAllocatedBandwidthTotal(ctx context.Context, bucketID []byte, from time.Time) (int64, error)
|
||||
// GetStorageTotals returns the current inline and remote storage usage for a projectID
|
||||
GetStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error)
|
||||
// GetProjectUsageLimits returns project usage limit
|
||||
GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error)
|
||||
}
|
||||
|
@ -4,7 +4,15 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/zeebo/errs"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/accounting/live"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -15,20 +23,114 @@ const (
|
||||
ExpansionFactor = 3
|
||||
)
|
||||
|
||||
// ExceedsAlphaUsage returns true if the storage or bandwidth usage limits have been exceeded
|
||||
// for a project in the past month (30 days). The usage limit is 25GB multiplied by the redundancy
|
||||
// expansion factor, so that the uplinks have a raw limit of 25GB.
|
||||
// TODO(jg): remove this code once we no longer need usage limiting for alpha release
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
func ExceedsAlphaUsage(bandwidthGetTotal, inlineTotal, remoteTotal int64, maxAlphaUsageGB memory.Size) (bool, string) {
|
||||
maxUsage := maxAlphaUsageGB.Int64() * int64(ExpansionFactor)
|
||||
if bandwidthGetTotal >= maxUsage {
|
||||
return true, "bandwidth"
|
||||
}
|
||||
var (
|
||||
// ErrProjectUsage general error for project usage
|
||||
ErrProjectUsage = errs.Class("project usage error")
|
||||
)
|
||||
|
||||
if inlineTotal+remoteTotal >= maxUsage {
|
||||
return true, "storage"
|
||||
}
|
||||
|
||||
return false, ""
|
||||
// ProjectUsage defines project usage
|
||||
type ProjectUsage struct {
|
||||
projectAccountingDB ProjectAccounting
|
||||
liveAccounting live.Service
|
||||
maxAlphaUsage memory.Size
|
||||
}
|
||||
|
||||
// NewProjectUsage created new instance of project usage service
|
||||
func NewProjectUsage(projectAccountingDB ProjectAccounting, liveAccounting live.Service, maxAlphaUsage memory.Size) *ProjectUsage {
|
||||
return &ProjectUsage{
|
||||
projectAccountingDB: projectAccountingDB,
|
||||
liveAccounting: liveAccounting,
|
||||
maxAlphaUsage: maxAlphaUsage,
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsBandwidthUsage returns true if the bandwidth usage limits have been exceeded
|
||||
// for a project in the past month (30 days). The usage limit is (e.g 25GB) multiplied by the redundancy
|
||||
// expansion factor, so that the uplinks have a raw limit.
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
func (usage *ProjectUsage) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.UUID, bucketID []byte) (_ bool, limit memory.Size, err error) {
|
||||
var group errgroup.Group
|
||||
var bandwidthGetTotal int64
|
||||
limit = usage.maxAlphaUsage
|
||||
|
||||
// TODO(michal): to reduce db load, consider using a cache to retrieve the project.UsageLimit value if needed
|
||||
group.Go(func() error {
|
||||
projectLimit, err := usage.projectAccountingDB.GetProjectUsageLimits(ctx, projectID)
|
||||
if projectLimit > 0 {
|
||||
limit = projectLimit
|
||||
}
|
||||
return err
|
||||
})
|
||||
group.Go(func() error {
|
||||
var err error
|
||||
from := time.Now().AddDate(0, 0, -AverageDaysInMonth) // past 30 days
|
||||
bandwidthGetTotal, err = usage.projectAccountingDB.GetAllocatedBandwidthTotal(ctx, bucketID, from)
|
||||
return err
|
||||
})
|
||||
|
||||
err = group.Wait()
|
||||
if err != nil {
|
||||
return false, 0, ErrProjectUsage.Wrap(err)
|
||||
}
|
||||
|
||||
maxUsage := limit.Int64() * int64(ExpansionFactor)
|
||||
if bandwidthGetTotal >= maxUsage {
|
||||
return true, limit, nil
|
||||
}
|
||||
|
||||
return false, limit, nil
|
||||
}
|
||||
|
||||
// ExceedsStorageUsage returns true if the storage usage limits have been exceeded
|
||||
// for a project in the past month (30 days). The usage limit is (e.g. 25GB) multiplied by the redundancy
|
||||
// expansion factor, so that the uplinks have a raw limit.
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
func (usage *ProjectUsage) ExceedsStorageUsage(ctx context.Context, projectID uuid.UUID) (_ bool, limit memory.Size, err error) {
|
||||
var group errgroup.Group
|
||||
var inlineTotal, remoteTotal int64
|
||||
limit = usage.maxAlphaUsage
|
||||
|
||||
// TODO(michal): to reduce db load, consider using a cache to retrieve the project.UsageLimit value if needed
|
||||
group.Go(func() error {
|
||||
projectLimit, err := usage.projectAccountingDB.GetProjectUsageLimits(ctx, projectID)
|
||||
if projectLimit > 0 {
|
||||
limit = projectLimit
|
||||
}
|
||||
return err
|
||||
})
|
||||
group.Go(func() error {
|
||||
var err error
|
||||
inlineTotal, remoteTotal, err = usage.getProjectStorageTotals(ctx, projectID)
|
||||
return err
|
||||
})
|
||||
err = group.Wait()
|
||||
if err != nil {
|
||||
return false, 0, ErrProjectUsage.Wrap(err)
|
||||
}
|
||||
|
||||
maxUsage := limit.Int64() * int64(ExpansionFactor)
|
||||
if inlineTotal+remoteTotal >= maxUsage {
|
||||
return true, limit, nil
|
||||
}
|
||||
|
||||
return false, limit, nil
|
||||
}
|
||||
|
||||
func (usage *ProjectUsage) getProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
||||
lastCountInline, lastCountRemote, err := usage.projectAccountingDB.GetStorageTotals(ctx, projectID)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
rtInline, rtRemote, err := usage.liveAccounting.GetProjectStorageUsage(ctx, projectID)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return lastCountInline + rtInline, lastCountRemote + rtRemote, nil
|
||||
}
|
||||
|
||||
// AddProjectStorageUsage lets the live accounting know that the given
|
||||
// project has just added inlineSpaceUsed bytes of inline space usage
|
||||
// and remoteSpaceUsed bytes of remote space usage.
|
||||
func (usage *ProjectUsage) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, inlineSpaceUsed, remoteSpaceUsed int64) error {
|
||||
return usage.liveAccounting.AddProjectStorageUsage(ctx, projectID, inlineSpaceUsed, remoteSpaceUsed)
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ func TestProjectUsageStorage(t *testing.T) {
|
||||
expectedErrMsg string
|
||||
}{
|
||||
{name: "doesn't exceed storage or bandwidth project limit", expectedExceeded: false, expectedErrMsg: ""},
|
||||
{name: "exceeds storage project limit", expectedExceeded: true, expectedResource: "storage", expectedErrMsg: "segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit; segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit"},
|
||||
{name: "exceeds storage project limit", expectedExceeded: true, expectedResource: "storage", expectedErrMsg: "segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Usage Limit; segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Usage Limit"},
|
||||
}
|
||||
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
@ -43,8 +43,10 @@ func TestProjectUsageStorage(t *testing.T) {
|
||||
|
||||
// Setup: create a new project to use the projectID
|
||||
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
||||
projectID := projects[0].ID
|
||||
require.NoError(t, err)
|
||||
projectID := projects[0].ID
|
||||
|
||||
projectUsage := planet.Satellites[0].Accounting.ProjectUsage
|
||||
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@ -52,17 +54,13 @@ func TestProjectUsageStorage(t *testing.T) {
|
||||
// Setup: create BucketStorageTally records to test exceeding storage project limit
|
||||
if tt.expectedResource == "storage" {
|
||||
now := time.Now()
|
||||
err := setUpStorageTallies(ctx, projectID, acctDB, now)
|
||||
err := setUpStorageTallies(ctx, projectID, acctDB, 25, now)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Execute test: get storage totals for a project, then check if that exceeds the max usage limit
|
||||
inlineTotal, remoteTotal, err := acctDB.GetStorageTotals(ctx, projectID)
|
||||
actualExceeded, _, err := projectUsage.ExceedsStorageUsage(ctx, projectID)
|
||||
require.NoError(t, err)
|
||||
maxAlphaUsage := 25 * memory.GB
|
||||
actualExceeded, actualResource := accounting.ExceedsAlphaUsage(0, inlineTotal, remoteTotal, maxAlphaUsage)
|
||||
require.Equal(t, tt.expectedExceeded, actualExceeded)
|
||||
require.Equal(t, tt.expectedResource, actualResource)
|
||||
|
||||
// Setup: create some bytes for the uplink to upload
|
||||
expectedData := make([]byte, 50*memory.KiB)
|
||||
@ -89,7 +87,7 @@ func TestProjectUsageBandwidth(t *testing.T) {
|
||||
expectedErrMsg string
|
||||
}{
|
||||
{name: "doesn't exceed storage or bandwidth project limit", expectedExceeded: false, expectedErrMsg: ""},
|
||||
{name: "exceeds bandwidth project limit", expectedExceeded: true, expectedResource: "bandwidth", expectedErrMsg: "segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Alpha Usage Limit"},
|
||||
{name: "exceeds bandwidth project limit", expectedExceeded: true, expectedResource: "bandwidth", expectedErrMsg: "segment error: metainfo error: rpc error: code = ResourceExhausted desc = Exceeded Usage Limit"},
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
@ -99,7 +97,6 @@ func TestProjectUsageBandwidth(t *testing.T) {
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
saDB := planet.Satellites[0].DB
|
||||
orderDB := saDB.Orders()
|
||||
acctDB := saDB.ProjectAccounting()
|
||||
|
||||
// Setup: get projectID and create bucketID
|
||||
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
||||
@ -108,6 +105,8 @@ func TestProjectUsageBandwidth(t *testing.T) {
|
||||
bucketName := "testbucket"
|
||||
bucketID := createBucketID(projectID, []byte(bucketName))
|
||||
|
||||
projectUsage := planet.Satellites[0].Accounting.ProjectUsage
|
||||
|
||||
// Setup: create a BucketBandwidthRollup record to test exceeding bandwidth project limit
|
||||
if tt.expectedResource == "bandwidth" {
|
||||
now := time.Now().UTC()
|
||||
@ -124,17 +123,9 @@ func TestProjectUsageBandwidth(t *testing.T) {
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, filePath, expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup: This date represents the past 30 days so that we can check
|
||||
// if the alpha max usage has been exceeded in the past month
|
||||
from := time.Now().AddDate(0, 0, -accounting.AverageDaysInMonth)
|
||||
|
||||
// Execute test: get bandwidth totals for a project, then check if that exceeds the max usage limit
|
||||
bandwidthTotal, err := acctDB.GetAllocatedBandwidthTotal(ctx, bucketID, from)
|
||||
actualExceeded, _, err := projectUsage.ExceedsBandwidthUsage(ctx, projectID, bucketID)
|
||||
require.NoError(t, err)
|
||||
maxAlphaUsage := 25 * memory.GB
|
||||
actualExceeded, actualResource := accounting.ExceedsAlphaUsage(bandwidthTotal, 0, 0, maxAlphaUsage)
|
||||
require.Equal(t, tt.expectedExceeded, actualExceeded)
|
||||
require.Equal(t, tt.expectedResource, actualResource)
|
||||
|
||||
// Execute test: check that the uplink gets an error when they have exceeded bandwidth limits and try to download a file
|
||||
_, actualErr := planet.Uplinks[0].Download(ctx, planet.Satellites[0], bucketName, filePath)
|
||||
@ -155,10 +146,10 @@ func createBucketID(projectID uuid.UUID, bucket []byte) []byte {
|
||||
return []byte(storj.JoinPaths(entries...))
|
||||
}
|
||||
|
||||
func setUpStorageTallies(ctx *testcontext.Context, projectID uuid.UUID, acctDB accounting.ProjectAccounting, time time.Time) error {
|
||||
func setUpStorageTallies(ctx *testcontext.Context, projectID uuid.UUID, acctDB accounting.ProjectAccounting, numberOfGB int, time time.Time) error {
|
||||
|
||||
// Create many records that sum greater than project usage limit of 25GB
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := 0; i < numberOfGB; i++ {
|
||||
bucketName := fmt.Sprintf("%s%d", "testbucket", i)
|
||||
tally := accounting.BucketStorageTally{
|
||||
BucketName: bucketName,
|
||||
@ -167,7 +158,7 @@ func setUpStorageTallies(ctx *testcontext.Context, projectID uuid.UUID, acctDB a
|
||||
|
||||
// In order to exceed the project limits, create storage tally records
|
||||
// that sum greater than the maxAlphaUsage * expansionFactor
|
||||
RemoteBytes: 10 * memory.GB.Int64() * accounting.ExpansionFactor,
|
||||
RemoteBytes: memory.GB.Int64() * accounting.ExpansionFactor,
|
||||
}
|
||||
err := acctDB.CreateStorageTally(ctx, tally)
|
||||
if err != nil {
|
||||
@ -262,3 +253,43 @@ func setUpBucketBandwidthAllocations(ctx *testcontext.Context, projectID uuid.UU
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestProjectUsageCustomLimit(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satDB := planet.Satellites[0].DB
|
||||
acctDB := satDB.ProjectAccounting()
|
||||
|
||||
projectsDB := satDB.Console().Projects()
|
||||
projects, err := projectsDB.GetAll(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
project := projects[0]
|
||||
// set custom usage limit for project
|
||||
project.UsageLimit = memory.GiB.Int64() * 10
|
||||
err = projectsDB.Update(ctx, &project)
|
||||
require.NoError(t, err)
|
||||
|
||||
projectUsage := planet.Satellites[0].Accounting.ProjectUsage
|
||||
|
||||
// Setup: create BucketStorageTally records to test exceeding storage project limit
|
||||
now := time.Now()
|
||||
err = setUpStorageTallies(ctx, project.ID, acctDB, 11, now)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualExceeded, limit, err := projectUsage.ExceedsStorageUsage(ctx, project.ID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, actualExceeded)
|
||||
require.Equal(t, project.UsageLimit, limit.Int64())
|
||||
|
||||
// Setup: create some bytes for the uplink to upload
|
||||
expectedData := make([]byte, 50*memory.KiB)
|
||||
_, err = rand.Read(expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Execute test: check that the uplink gets an error when they have exceeded storage limits and try to upload a file
|
||||
actualErr := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path", expectedData)
|
||||
assert.Error(t, actualErr)
|
||||
})
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ type Project struct {
|
||||
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
UsageLimit int64 `json:"usageLimit"`
|
||||
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
}
|
||||
|
@ -17,9 +17,7 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/accounting"
|
||||
"storj.io/storj/pkg/accounting/live"
|
||||
"storj.io/storj/pkg/auth"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/identity"
|
||||
@ -59,19 +57,16 @@ type Endpoint struct {
|
||||
metainfo *Service
|
||||
orders *orders.Service
|
||||
cache *overlay.Cache
|
||||
projectUsage *accounting.ProjectUsage
|
||||
containment Containment
|
||||
apiKeys APIKeys
|
||||
storagenodeAccountingDB accounting.StoragenodeAccounting
|
||||
projectAccountingDB accounting.ProjectAccounting
|
||||
liveAccounting live.Service
|
||||
maxAlphaUsage memory.Size
|
||||
}
|
||||
|
||||
// NewEndpoint creates new metainfo endpoint instance
|
||||
func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cache *overlay.Cache, containment Containment,
|
||||
apiKeys APIKeys, sdb accounting.StoragenodeAccounting,
|
||||
pdb accounting.ProjectAccounting, liveAccounting live.Service,
|
||||
maxAlphaUsage memory.Size) *Endpoint {
|
||||
projectUsage *accounting.ProjectUsage) *Endpoint {
|
||||
// TODO do something with too many params
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
@ -81,9 +76,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cac
|
||||
containment: containment,
|
||||
apiKeys: apiKeys,
|
||||
storagenodeAccountingDB: sdb,
|
||||
projectAccountingDB: pdb,
|
||||
liveAccounting: liveAccounting,
|
||||
maxAlphaUsage: maxAlphaUsage,
|
||||
projectUsage: projectUsage,
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,21 +172,15 @@ func (endpoint *Endpoint) CreateSegment(ctx context.Context, req *pb.SegmentWrit
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// Check if this projectID has exceeded alpha usage limits, i.e. 25GB of bandwidth or storage used in the past month
|
||||
// TODO: remove this code once we no longer need usage limiting for alpha release
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
inlineTotal, remoteTotal, err := endpoint.getProjectStorageTotals(ctx, keyInfo.ProjectID)
|
||||
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("retrieving project storage totals", zap.Error(err))
|
||||
|
||||
}
|
||||
exceeded, resource := accounting.ExceedsAlphaUsage(0, inlineTotal, remoteTotal, endpoint.maxAlphaUsage)
|
||||
if exceeded {
|
||||
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for %s for projectID %s.",
|
||||
endpoint.maxAlphaUsage.String(),
|
||||
resource, keyInfo.ProjectID,
|
||||
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s",
|
||||
limit, keyInfo.ProjectID,
|
||||
)
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Alpha Usage Limit")
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Usage Limit")
|
||||
}
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(req.GetRedundancy())
|
||||
@ -227,18 +214,6 @@ func (endpoint *Endpoint) CreateSegment(ctx context.Context, req *pb.SegmentWrit
|
||||
return &pb.SegmentWriteResponse{AddressedLimits: addressedLimits, RootPieceId: rootPieceID}, nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) getProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
||||
lastCountInline, lastCountRemote, err := endpoint.projectAccountingDB.GetStorageTotals(ctx, projectID)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
rtInline, rtRemote, err := endpoint.liveAccounting.GetProjectStorageUsage(ctx, projectID)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return lastCountInline + rtInline, lastCountRemote + rtRemote, nil
|
||||
}
|
||||
|
||||
func calculateSpaceUsed(ptr *pb.Pointer) (inlineSpace, remoteSpace int64) {
|
||||
inline := ptr.GetInlineSegment()
|
||||
if inline != nil {
|
||||
@ -290,7 +265,7 @@ func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentComm
|
||||
}
|
||||
|
||||
inlineUsed, remoteUsed := calculateSpaceUsed(req.Pointer)
|
||||
if err := endpoint.liveAccounting.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed, remoteUsed); err != nil {
|
||||
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed, remoteUsed); err != nil {
|
||||
endpoint.log.Sugar().Errorf("Could not track new storage usage by project %v: %v", keyInfo.ProjectID, err)
|
||||
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
|
||||
// that will be affected is our per-project bandwidth and storage limits.
|
||||
@ -337,21 +312,17 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// Check if this projectID has exceeded alpha usage limits for bandwidth or storage used in the past month
|
||||
// TODO: remove this code once we no longer need usage limiting for alpha release
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
bucketID := createBucketID(keyInfo.ProjectID, req.Bucket)
|
||||
from := time.Now().AddDate(0, 0, -accounting.AverageDaysInMonth) // past 30 days
|
||||
bandwidthTotal, err := endpoint.projectAccountingDB.GetAllocatedBandwidthTotal(ctx, bucketID, from)
|
||||
|
||||
exceeded, limit, err := endpoint.projectUsage.ExceedsBandwidthUsage(ctx, keyInfo.ProjectID, bucketID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("retrieving ProjectBandwidthTotal", zap.Error(err))
|
||||
endpoint.log.Error("retrieving project bandwidth total", zap.Error(err))
|
||||
}
|
||||
exceeded, resource := accounting.ExceedsAlphaUsage(bandwidthTotal, 0, 0, endpoint.maxAlphaUsage)
|
||||
if exceeded {
|
||||
endpoint.log.Sugar().Errorf("monthly project usage limit has been exceeded for resource: %s, for project: %d. Contact customer support to increase the limit.",
|
||||
resource, keyInfo.ProjectID,
|
||||
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for bandwidth for projectID %s.",
|
||||
limit, keyInfo.ProjectID,
|
||||
)
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Alpha Usage Limit")
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Usage Limit")
|
||||
}
|
||||
|
||||
path, err := CreatePath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path)
|
||||
|
@ -178,8 +178,9 @@ type Peer struct {
|
||||
}
|
||||
|
||||
Accounting struct {
|
||||
Tally *tally.Service
|
||||
Rollup *rollup.Service
|
||||
Tally *tally.Service
|
||||
Rollup *rollup.Service
|
||||
ProjectUsage *accounting.ProjectUsage
|
||||
}
|
||||
|
||||
LiveAccounting struct {
|
||||
@ -322,6 +323,15 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config, ve
|
||||
peer.LiveAccounting.Service = liveAccountingService
|
||||
}
|
||||
|
||||
{ // setup accounting project usage
|
||||
log.Debug("Setting up accounting project usage")
|
||||
peer.Accounting.ProjectUsage = accounting.NewProjectUsage(
|
||||
peer.DB.ProjectAccounting(),
|
||||
peer.LiveAccounting.Service,
|
||||
config.Rollup.MaxAlphaUsage,
|
||||
)
|
||||
}
|
||||
|
||||
{ // setup orders
|
||||
log.Debug("Setting up orders")
|
||||
satelliteSignee := signing.SigneeFromPeerIdentity(peer.Identity.PeerIdentity())
|
||||
@ -360,9 +370,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config, ve
|
||||
peer.DB.Containment(),
|
||||
peer.DB.Console().APIKeys(),
|
||||
peer.DB.StoragenodeAccounting(),
|
||||
peer.DB.ProjectAccounting(),
|
||||
peer.LiveAccounting.Service,
|
||||
config.Rollup.MaxAlphaUsage,
|
||||
peer.Accounting.ProjectUsage,
|
||||
)
|
||||
|
||||
pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)
|
||||
|
@ -216,6 +216,7 @@ model project (
|
||||
|
||||
field name text
|
||||
field description text ( updatable )
|
||||
field usage_limit int64 ( updatable )
|
||||
|
||||
field created_at timestamp ( autoinsert )
|
||||
)
|
||||
|
@ -415,6 +415,7 @@ CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
@ -700,6 +701,7 @@ CREATE TABLE projects (
|
||||
id BLOB NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
usage_limit INTEGER NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
@ -3078,6 +3080,7 @@ type Project struct {
|
||||
Id []byte
|
||||
Name string
|
||||
Description string
|
||||
UsageLimit int64
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
@ -3085,6 +3088,7 @@ func (Project) _Table() string { return "projects" }
|
||||
|
||||
type Project_Update_Fields struct {
|
||||
Description Project_Description_Field
|
||||
UsageLimit Project_UsageLimit_Field
|
||||
}
|
||||
|
||||
type Project_Id_Field struct {
|
||||
@ -3144,6 +3148,25 @@ func (f Project_Description_Field) value() interface{} {
|
||||
|
||||
func (Project_Description_Field) _Column() string { return "description" }
|
||||
|
||||
type Project_UsageLimit_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value int64
|
||||
}
|
||||
|
||||
func Project_UsageLimit(v int64) Project_UsageLimit_Field {
|
||||
return Project_UsageLimit_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f Project_UsageLimit_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (Project_UsageLimit_Field) _Column() string { return "usage_limit" }
|
||||
|
||||
type Project_CreatedAt_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
@ -4498,22 +4521,24 @@ func (obj *postgresImpl) Create_User(ctx context.Context,
|
||||
func (obj *postgresImpl) Create_Project(ctx context.Context,
|
||||
project_id Project_Id_Field,
|
||||
project_name Project_Name_Field,
|
||||
project_description Project_Description_Field) (
|
||||
project_description Project_Description_Field,
|
||||
project_usage_limit Project_UsageLimit_Field) (
|
||||
project *Project, err error) {
|
||||
|
||||
__now := obj.db.Hooks.Now().UTC()
|
||||
__id_val := project_id.value()
|
||||
__name_val := project_name.value()
|
||||
__description_val := project_description.value()
|
||||
__usage_limit_val := project_usage_limit.value()
|
||||
__created_at_val := __now
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.created_at")
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, created_at ) VALUES ( ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __created_at_val)
|
||||
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -5140,7 +5165,7 @@ func (obj *postgresImpl) Get_User_By_Id(ctx context.Context,
|
||||
func (obj *postgresImpl) All_Project(ctx context.Context) (
|
||||
rows []*Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values)
|
||||
@ -5156,7 +5181,7 @@ func (obj *postgresImpl) All_Project(ctx context.Context) (
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -5173,7 +5198,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
project *Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_id.value())
|
||||
@ -5182,7 +5207,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -5194,7 +5219,7 @@ func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Proje
|
||||
project_member_member_id ProjectMember_MemberId_Field) (
|
||||
rows []*Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_member_member_id.value())
|
||||
@ -5210,7 +5235,7 @@ func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Proje
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -6317,7 +6342,7 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
|
||||
project *Project, err error) {
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.created_at")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
@ -6328,6 +6353,11 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
||||
}
|
||||
|
||||
if update.UsageLimit._set {
|
||||
__values = append(__values, update.UsageLimit.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
|
||||
}
|
||||
|
||||
if len(__sets_sql.SQLs) == 0 {
|
||||
return nil, emptyUpdate()
|
||||
}
|
||||
@ -6341,7 +6371,7 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -7388,21 +7418,23 @@ func (obj *sqlite3Impl) Create_User(ctx context.Context,
|
||||
func (obj *sqlite3Impl) Create_Project(ctx context.Context,
|
||||
project_id Project_Id_Field,
|
||||
project_name Project_Name_Field,
|
||||
project_description Project_Description_Field) (
|
||||
project_description Project_Description_Field,
|
||||
project_usage_limit Project_UsageLimit_Field) (
|
||||
project *Project, err error) {
|
||||
|
||||
__now := obj.db.Hooks.Now().UTC()
|
||||
__id_val := project_id.value()
|
||||
__name_val := project_name.value()
|
||||
__description_val := project_description.value()
|
||||
__usage_limit_val := project_usage_limit.value()
|
||||
__created_at_val := __now
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, created_at ) VALUES ( ?, ?, ?, ? )")
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, created_at ) VALUES ( ?, ?, ?, ?, ? )")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __created_at_val)
|
||||
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val)
|
||||
|
||||
__res, err := obj.driver.Exec(__stmt, __id_val, __name_val, __description_val, __created_at_val)
|
||||
__res, err := obj.driver.Exec(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -8066,7 +8098,7 @@ func (obj *sqlite3Impl) Get_User_By_Id(ctx context.Context,
|
||||
func (obj *sqlite3Impl) All_Project(ctx context.Context) (
|
||||
rows []*Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values)
|
||||
@ -8082,7 +8114,7 @@ func (obj *sqlite3Impl) All_Project(ctx context.Context) (
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -8099,7 +8131,7 @@ func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
project *Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_id.value())
|
||||
@ -8108,7 +8140,7 @@ func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -8120,7 +8152,7 @@ func (obj *sqlite3Impl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Projec
|
||||
project_member_member_id ProjectMember_MemberId_Field) (
|
||||
rows []*Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_member_member_id.value())
|
||||
@ -8136,7 +8168,7 @@ func (obj *sqlite3Impl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Projec
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -9304,6 +9336,11 @@ func (obj *sqlite3Impl) Update_Project_By_Id(ctx context.Context,
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
||||
}
|
||||
|
||||
if update.UsageLimit._set {
|
||||
__values = append(__values, update.UsageLimit.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
|
||||
}
|
||||
|
||||
if len(__sets_sql.SQLs) == 0 {
|
||||
return nil, emptyUpdate()
|
||||
}
|
||||
@ -9322,12 +9359,12 @@ func (obj *sqlite3Impl) Update_Project_By_Id(ctx context.Context,
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
|
||||
var __embed_stmt_get = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
var __embed_stmt_get = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
||||
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
||||
|
||||
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -10055,13 +10092,13 @@ func (obj *sqlite3Impl) getLastProject(ctx context.Context,
|
||||
pk int64) (
|
||||
project *Project, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE _rowid_ = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE _rowid_ = ?")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, pk)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRow(__stmt, pk).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
||||
err = obj.driver.QueryRow(__stmt, pk).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -10845,13 +10882,14 @@ func (rx *Rx) Create_PendingAudits(ctx context.Context,
|
||||
func (rx *Rx) Create_Project(ctx context.Context,
|
||||
project_id Project_Id_Field,
|
||||
project_name Project_Name_Field,
|
||||
project_description Project_Description_Field) (
|
||||
project_description Project_Description_Field,
|
||||
project_usage_limit Project_UsageLimit_Field) (
|
||||
project *Project, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Create_Project(ctx, project_id, project_name, project_description)
|
||||
return tx.Create_Project(ctx, project_id, project_name, project_description, project_usage_limit)
|
||||
|
||||
}
|
||||
|
||||
@ -11650,7 +11688,8 @@ type Methods interface {
|
||||
Create_Project(ctx context.Context,
|
||||
project_id Project_Id_Field,
|
||||
project_name Project_Name_Field,
|
||||
project_description Project_Description_Field) (
|
||||
project_description Project_Description_Field,
|
||||
project_usage_limit Project_UsageLimit_Field) (
|
||||
project *Project, err error)
|
||||
|
||||
Create_ProjectMember(ctx context.Context,
|
||||
|
@ -143,6 +143,7 @@ CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
|
@ -143,6 +143,7 @@ CREATE TABLE projects (
|
||||
id BLOB NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
usage_limit INTEGER NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/accounting"
|
||||
"storj.io/storj/pkg/audit"
|
||||
"storj.io/storj/pkg/bwagreement"
|
||||
@ -768,6 +769,13 @@ func (m *lockedProjectAccounting) GetAllocatedBandwidthTotal(ctx context.Context
|
||||
return m.db.GetAllocatedBandwidthTotal(ctx, bucketID, from)
|
||||
}
|
||||
|
||||
// GetProjectUsageLimits
|
||||
func (m *lockedProjectAccounting) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.GetProjectUsageLimits(ctx, projectID)
|
||||
}
|
||||
|
||||
// GetStorageTotals returns the current inline and remote storage usage for a projectID
|
||||
func (m *lockedProjectAccounting) GetStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
||||
m.Lock()
|
||||
|
@ -712,6 +712,13 @@ func (db *DB) PostgresMigration() *migrate.Migration {
|
||||
);`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Add usage_limit column to projects table",
|
||||
Version: 24,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE projects ADD usage_limit bigint NOT NULL DEFAULT 0;`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/accounting"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
@ -114,3 +115,12 @@ func (db *ProjectAccounting) GetStorageTotals(ctx context.Context, projectID uui
|
||||
}
|
||||
return inlineSum.Int64, remoteSum.Int64, err
|
||||
}
|
||||
|
||||
// GetProjectUsageLimits returns project usage limit
|
||||
func (db *ProjectAccounting) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error) {
|
||||
project, err := db.db.Get_Project_By_Id(ctx, dbx.Project_Id(projectID[:]))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return memory.Size(project.UsageLimit), nil
|
||||
}
|
||||
|
@ -58,7 +58,9 @@ func (projects *projects) Insert(ctx context.Context, project *console.Project)
|
||||
createdProject, err := projects.db.Create_Project(ctx,
|
||||
dbx.Project_Id(projectID[:]),
|
||||
dbx.Project_Name(project.Name),
|
||||
dbx.Project_Description(project.Description))
|
||||
dbx.Project_Description(project.Description),
|
||||
dbx.Project_UsageLimit(0),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -78,6 +80,7 @@ func (projects *projects) Delete(ctx context.Context, id uuid.UUID) error {
|
||||
func (projects *projects) Update(ctx context.Context, project *console.Project) error {
|
||||
updateFields := dbx.Project_Update_Fields{
|
||||
Description: dbx.Project_Description(project.Description),
|
||||
UsageLimit: dbx.Project_UsageLimit(project.UsageLimit),
|
||||
}
|
||||
|
||||
_, err := projects.db.Update_Project_By_Id(ctx,
|
||||
|
272
satellite/satellitedb/testdata/postgres.v24.sql
vendored
Normal file
272
satellite/satellitedb/testdata/postgres.v24.sql
vendored
Normal file
@ -0,0 +1,272 @@
|
||||
CREATE TABLE accounting_rollups (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE bucket_usages (
|
||||
id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
rollup_end_time timestamp with time zone NOT NULL,
|
||||
remote_stored_data bigint NOT NULL,
|
||||
inline_stored_data bigint NOT NULL,
|
||||
remote_segments integer NOT NULL,
|
||||
inline_segments integer NOT NULL,
|
||||
objects integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
repair_egress bigint NOT NULL,
|
||||
get_egress bigint NOT NULL,
|
||||
audit_egress bigint NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE bwagreements (
|
||||
serialnum text NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
uplink_id bytea NOT NULL,
|
||||
action bigint NOT NULL,
|
||||
total bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( serialnum )
|
||||
);
|
||||
CREATE TABLE certRecords (
|
||||
publickey bytea NOT NULL,
|
||||
id bytea NOT NULL,
|
||||
update_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path text NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
last_ip text NOT NULL,
|
||||
protocol integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_bandwidth bigint NOT NULL,
|
||||
free_disk bigint NOT NULL,
|
||||
major bigint NOT NULL,
|
||||
minor bigint NOT NULL,
|
||||
patch bigint NOT NULL,
|
||||
hash text NOT NULL,
|
||||
timestamp timestamp with time zone NOT NULL,
|
||||
release boolean NOT NULL,
|
||||
latency_90 bigint NOT NULL,
|
||||
audit_success_count bigint NOT NULL,
|
||||
total_audit_count bigint NOT NULL,
|
||||
audit_success_ratio double precision NOT NULL,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
uptime_ratio double precision NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
last_contact_success timestamp with time zone NOT NULL,
|
||||
last_contact_failure timestamp with time zone NOT NULL,
|
||||
contained boolean NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
credit_in_cents integer NOT NULL,
|
||||
award_credit_duration_days integer NOT NULL,
|
||||
invitee_credit_duration_days integer NOT NULL,
|
||||
redeemable_cap integer NOT NULL,
|
||||
num_redeemed integer NOT NULL,
|
||||
expires_at timestamp with time zone,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
email text NOT NULL,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_ip );
|
||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
||||
|
||||
---
|
||||
|
||||
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_ip", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 0, 5, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false);
|
||||
INSERT INTO "nodes"("id", "address", "last_ip", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 1, 3, 3, 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false);
|
||||
INSERT INTO "nodes"("id", "address", "last_ip", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 1, 0, 0, 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@ukr.net', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "bwagreements"("serialnum", "storage_node_id", "action", "total", "created_at", "expires_at", "uplink_id") VALUES ('8fc0ceaa-984c-4d52-bcf4-b5429e1e35e812FpiifDbcJkePa12jxjDEutKrfLmwzT7sz2jfVwpYqgtM8B74c', E'\\245Z[/\\333\\022\\011\\001\\036\\003\\204\\005\\032.\\206\\333E\\261\\342\\227=y,}aRaH6\\240\\370\\000'::bytea, 1, 666, '2019-02-14 15:09:54.420181+00', '2019-02-14 16:09:54+00', E'\\253Z+\\374eFm\\245$\\036\\206\\335\\247\\263\\350x\\\\\\304+\\364\\343\\364+\\276fIJQ\\361\\014\\232\\000'::bytea);
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "certrecords" VALUES (E'0Y0\\023\\006\\007*\\206H\\316=\\002\\001\\006\\010*\\206H\\316=\\003\\001\\007\\003B\\000\\004\\360\\267\\227\\377\\253u\\222\\337Y\\324C:GQ\\010\\277v\\010\\315D\\271\\333\\337.\\203\\023=C\\343\\014T%6\\027\\362?\\214\\326\\017U\\334\\000\\260\\224\\260J\\221\\304\\331F\\304\\221\\236zF,\\325\\326l\\215\\306\\365\\200\\022', E'L\\301|\\200\\247}F|1\\320\\232\\037n\\335\\241\\206\\244\\242\\207\\204.\\253\\357\\326\\352\\033Dt\\202`\\022\\325', '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1);
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "type", "credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "num_redeemed", "status") VALUES (1, 'testOffer', 'Test offer 1', 0, 1000, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, '2019-02-14 08:28:24.267934+00');
|
Loading…
Reference in New Issue
Block a user