satellite/accounting: separate project limit from project entity (#3632)

This commit is contained in:
Yaroslav Vorobiov 2019-11-25 16:18:04 +02:00 committed by GitHub
parent 8234e24d13
commit 8a002e8c8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 780 additions and 69 deletions

View File

@ -158,10 +158,19 @@ type ProjectAccounting interface {
GetAllocatedBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (int64, error)
// GetStorageTotals returns the current inline and remote storage usage for a projectID
GetStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error)
// GetProjectUsageLimits returns project usage limit
GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error)
// UpdateProjectStorageLimit updates project storage usage limit.
UpdateProjectStorageLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) error
// UpdateProjectBandwidthLimit updates project bandwidth usage limit.
UpdateProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) error
// GetProjectStorageLimit returns project storage usage limit.
GetProjectStorageLimit(ctx context.Context, projectID uuid.UUID) (memory.Size, error)
// GetProjectBandwidthLimit returns project bandwidth usage limit.
GetProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID) (memory.Size, error)
// GetProjectTotal returns project usage summary for specified period of time.
GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (*ProjectUsage, error)
// GetBucketUsageRollups returns usage rollup per each bucket for specified period of time.
GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]BucketUsageRollup, error)
// GetBucketTotals returns per bucket usage summary for specified period of time.
GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor BucketUsageCursor, since, before time.Time) (*BucketUsagePage, error)
}

View File

@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/require"
"storj.io/storj/pkg/storj"
"storj.io/storj/private/memory"
"storj.io/storj/private/testcontext"
"storj.io/storj/private/testrand"
"storj.io/storj/satellite"
@ -138,6 +139,46 @@ func TestStorageNodeUsage(t *testing.T) {
})
}
func TestProjectLimits(t *testing.T) {
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
projectID := testrand.UUID()
err := db.ProjectAccounting().UpdateProjectStorageLimit(ctx, projectID, 1)
require.NoError(t, err)
err = db.ProjectAccounting().UpdateProjectBandwidthLimit(ctx, projectID, 2)
require.NoError(t, err)
t.Run("get", func(t *testing.T) {
storageLimit, err := db.ProjectAccounting().GetProjectStorageLimit(ctx, projectID)
assert.NoError(t, err)
assert.Equal(t, memory.Size(1), storageLimit)
bandwidthLimit, err := db.ProjectAccounting().GetProjectBandwidthLimit(ctx, projectID)
assert.NoError(t, err)
assert.Equal(t, memory.Size(2), bandwidthLimit)
})
t.Run("update", func(t *testing.T) {
err := db.ProjectAccounting().UpdateProjectStorageLimit(ctx, projectID, 3)
require.NoError(t, err)
err = db.ProjectAccounting().UpdateProjectBandwidthLimit(ctx, projectID, 4)
require.NoError(t, err)
storageLimit, err := db.ProjectAccounting().GetProjectStorageLimit(ctx, projectID)
assert.NoError(t, err)
assert.Equal(t, memory.Size(3), storageLimit)
bandwidthLimit, err := db.ProjectAccounting().GetProjectBandwidthLimit(ctx, projectID)
assert.NoError(t, err)
assert.Equal(t, memory.Size(4), bandwidthLimit)
})
})
}
func createBucketStorageTallies(projectID uuid.UUID) (map[string]*accounting.BucketTally, []accounting.BucketTally, error) {
bucketTallies := make(map[string]*accounting.BucketTally)
var expectedTallies []accounting.BucketTally

View File

@ -61,7 +61,7 @@ func (usage *Service) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.
// TODO(michal): to reduce db load, consider using a cache to retrieve the project.UsageLimit value if needed
group.Go(func() error {
projectLimit, err := usage.projectAccountingDB.GetProjectUsageLimits(ctx, projectID)
projectLimit, err := usage.projectAccountingDB.GetProjectBandwidthLimit(ctx, projectID)
if projectLimit > 0 {
limit = projectLimit
}
@ -100,7 +100,7 @@ func (usage *Service) ExceedsStorageUsage(ctx context.Context, projectID uuid.UU
// TODO(michal): to reduce db load, consider using a cache to retrieve the project.UsageLimit value if needed
group.Go(func() error {
projectLimit, err := usage.projectAccountingDB.GetProjectUsageLimits(ctx, projectID)
projectLimit, err := usage.projectAccountingDB.GetProjectStorageLimit(ctx, projectID)
if projectLimit > 0 {
limit = projectLimit
}

View File

@ -260,15 +260,16 @@ func TestProjectUsageCustomLimit(t *testing.T) {
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satDB := planet.Satellites[0].DB
acctDB := satDB.ProjectAccounting()
projectsDB := satDB.Console().Projects()
projects, err := projectsDB.GetAll(ctx)
require.NoError(t, err)
project := projects[0]
// set custom usage limit for project
project.UsageLimit = memory.GiB.Int64() * 10
err = projectsDB.Update(ctx, &project)
expectedLimit := memory.Size(memory.GiB.Int64() * 10)
err = acctDB.UpdateProjectStorageLimit(ctx, project.ID, expectedLimit)
require.NoError(t, err)
projectUsage := planet.Satellites[0].Accounting.ProjectUsage
@ -281,7 +282,7 @@ func TestProjectUsageCustomLimit(t *testing.T) {
actualExceeded, limit, err := projectUsage.ExceedsStorageUsage(ctx, project.ID)
require.NoError(t, err)
require.True(t, actualExceeded)
require.Equal(t, project.UsageLimit, limit.Int64())
require.Equal(t, expectedLimit.Int64(), limit.Int64())
// Setup: create some bytes for the uplink to upload
expectedData := testrand.Bytes(50 * memory.KiB)

View File

@ -40,7 +40,6 @@ type Project struct {
Name string `json:"name"`
Description string `json:"description"`
UsageLimit int64 `json:"usageLimit"`
PartnerID uuid.UUID `json:"partnerId"`
OwnerID uuid.UUID `json:"ownerId"`

View File

@ -54,7 +54,6 @@ func TestUsers(t *testing.T) {
ID: testrand.UUID(),
Name: "John Doe",
Description: "some description",
UsageLimit: int64(1000),
PartnerID: testrand.UUID(),
CreatedAt: time.Now(),
})
@ -65,7 +64,6 @@ func TestUsers(t *testing.T) {
ID: testrand.UUID(),
Name: "John Doe",
Description: "some description",
UsageLimit: int64(1000),
CreatedAt: time.Now(),
})
require.NoError(t, err)

View File

@ -262,7 +262,6 @@ model project (
field name text
field description text ( updatable )
field usage_limit int64 ( updatable )
field partner_id blob ( nullable )
field owner_id blob
@ -560,6 +559,24 @@ read all (
where storagenode_storage_tally.interval_end_time >= ?
)
// --- storage node accounting tables --- //
model project_limit (
key project_id limit_type
field project_id blob
field usage_limit uint64
field limit_type int
field created_at timestamp ( autoinsert )
)
read one (
select project_limit
where project_limit.project_id = ?
where project_limit.limit_type = ?
)
//--- peer_identity ---//
model peer_identity (

View File

@ -1,5 +1,4 @@
//lint:file-ignore * generated file
// AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
// DO NOT EDIT.
@ -430,12 +429,18 @@ CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint NOT NULL,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_limits (
project_id bytea NOT NULL,
usage_limit bigint NOT NULL,
limit_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, limit_type )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
@ -3467,7 +3472,6 @@ type Project struct {
Id []byte
Name string
Description string
UsageLimit int64
PartnerId []byte
OwnerId []byte
CreatedAt time.Time
@ -3481,7 +3485,6 @@ type Project_Create_Fields struct {
type Project_Update_Fields struct {
Description Project_Description_Field
UsageLimit Project_UsageLimit_Field
}
type Project_Id_Field struct {
@ -3541,25 +3544,6 @@ func (f Project_Description_Field) value() interface{} {
func (Project_Description_Field) _Column() string { return "description" }
type Project_UsageLimit_Field struct {
_set bool
_null bool
_value int64
}
func Project_UsageLimit(v int64) Project_UsageLimit_Field {
return Project_UsageLimit_Field{_set: true, _value: v}
}
func (f Project_UsageLimit_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (Project_UsageLimit_Field) _Column() string { return "usage_limit" }
type Project_PartnerId_Field struct {
_set bool
_null bool
@ -3630,6 +3614,94 @@ func (f Project_CreatedAt_Field) value() interface{} {
func (Project_CreatedAt_Field) _Column() string { return "created_at" }
type ProjectLimit struct {
ProjectId []byte
UsageLimit uint64
LimitType int
CreatedAt time.Time
}
func (ProjectLimit) _Table() string { return "project_limits" }
type ProjectLimit_Update_Fields struct {
}
type ProjectLimit_ProjectId_Field struct {
_set bool
_null bool
_value []byte
}
func ProjectLimit_ProjectId(v []byte) ProjectLimit_ProjectId_Field {
return ProjectLimit_ProjectId_Field{_set: true, _value: v}
}
func (f ProjectLimit_ProjectId_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (ProjectLimit_ProjectId_Field) _Column() string { return "project_id" }
type ProjectLimit_UsageLimit_Field struct {
_set bool
_null bool
_value uint64
}
func ProjectLimit_UsageLimit(v uint64) ProjectLimit_UsageLimit_Field {
return ProjectLimit_UsageLimit_Field{_set: true, _value: v}
}
func (f ProjectLimit_UsageLimit_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (ProjectLimit_UsageLimit_Field) _Column() string { return "usage_limit" }
type ProjectLimit_LimitType_Field struct {
_set bool
_null bool
_value int
}
func ProjectLimit_LimitType(v int) ProjectLimit_LimitType_Field {
return ProjectLimit_LimitType_Field{_set: true, _value: v}
}
func (f ProjectLimit_LimitType_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (ProjectLimit_LimitType_Field) _Column() string { return "limit_type" }
type ProjectLimit_CreatedAt_Field struct {
_set bool
_null bool
_value time.Time
}
func ProjectLimit_CreatedAt(v time.Time) ProjectLimit_CreatedAt_Field {
return ProjectLimit_CreatedAt_Field{_set: true, _value: v}
}
func (f ProjectLimit_CreatedAt_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (ProjectLimit_CreatedAt_Field) _Column() string { return "created_at" }
type RegistrationToken struct {
Secret []byte
OwnerId []byte
@ -6274,7 +6346,6 @@ func (obj *postgresImpl) Create_Project(ctx context.Context,
project_id Project_Id_Field,
project_name Project_Name_Field,
project_description Project_Description_Field,
project_usage_limit Project_UsageLimit_Field,
project_owner_id Project_OwnerId_Field,
optional Project_Create_Fields) (
project *Project, err error) {
@ -6283,18 +6354,17 @@ func (obj *postgresImpl) Create_Project(ctx context.Context,
__id_val := project_id.value()
__name_val := project_name.value()
__description_val := project_description.value()
__usage_limit_val := project_usage_limit.value()
__partner_id_val := optional.PartnerId.value()
__owner_id_val := project_owner_id.value()
__created_at_val := __now
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at")
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at")
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val)
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __partner_id_val, __owner_id_val, __created_at_val)
project = &Project{}
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __partner_id_val, __owner_id_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -7285,7 +7355,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
project_id Project_Id_Field) (
project *Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
var __values []interface{}
__values = append(__values, project_id.value())
@ -7294,7 +7364,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...)
project = &Project{}
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -7305,7 +7375,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
func (obj *postgresImpl) All_Project(ctx context.Context) (
rows []*Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
var __values []interface{}
__values = append(__values)
@ -7321,7 +7391,7 @@ func (obj *postgresImpl) All_Project(ctx context.Context) (
for __rows.Next() {
project := &Project{}
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -7338,7 +7408,7 @@ func (obj *postgresImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx
project_created_at_less Project_CreatedAt_Field) (
rows []*Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
var __values []interface{}
__values = append(__values, project_created_at_less.value())
@ -7354,7 +7424,7 @@ func (obj *postgresImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx
for __rows.Next() {
project := &Project{}
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -7371,7 +7441,7 @@ func (obj *postgresImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx contex
project_owner_id Project_OwnerId_Field) (
rows []*Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
var __values []interface{}
__values = append(__values, project_owner_id.value())
@ -7387,7 +7457,7 @@ func (obj *postgresImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx contex
for __rows.Next() {
project := &Project{}
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -7404,7 +7474,7 @@ func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Proje
project_member_member_id ProjectMember_MemberId_Field) (
rows []*Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
var __values []interface{}
__values = append(__values, project_member_member_id.value())
@ -7420,7 +7490,7 @@ func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Proje
for __rows.Next() {
project := &Project{}
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -7438,7 +7508,7 @@ func (obj *postgresImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt
limit int, offset int64) (
rows []*Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, project_created_at_less.value())
@ -7456,7 +7526,7 @@ func (obj *postgresImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt
for __rows.Next() {
project := &Project{}
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
@ -8030,6 +8100,28 @@ func (obj *postgresImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterO
}
func (obj *postgresImpl) Get_ProjectLimit_By_ProjectId_And_LimitType(ctx context.Context,
project_limit_project_id ProjectLimit_ProjectId_Field,
project_limit_limit_type ProjectLimit_LimitType_Field) (
project_limit *ProjectLimit, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT project_limits.project_id, project_limits.usage_limit, project_limits.limit_type, project_limits.created_at FROM project_limits WHERE project_limits.project_id = ? AND project_limits.limit_type = ?")
var __values []interface{}
__values = append(__values, project_limit_project_id.value(), project_limit_limit_type.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
project_limit = &ProjectLimit{}
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project_limit.ProjectId, &project_limit.UsageLimit, &project_limit.LimitType, &project_limit.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
return project_limit, nil
}
func (obj *postgresImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
peer_identity_node_id PeerIdentity_NodeId_Field) (
peer_identity *PeerIdentity, err error) {
@ -9212,7 +9304,7 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
project *Project, err error) {
var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.partner_id, projects.owner_id, projects.created_at")}}
__sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{}
@ -9223,11 +9315,6 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
}
if update.UsageLimit._set {
__values = append(__values, update.UsageLimit.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
}
if len(__sets_sql.SQLs) == 0 {
return nil, emptyUpdate()
}
@ -9241,7 +9328,7 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...)
project = &Project{}
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err == sql.ErrNoRows {
return nil, nil
}
@ -10453,6 +10540,16 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error)
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
}
count += __count
__res, err = obj.driver.Exec("DELETE FROM project_limits;")
if err != nil {
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
@ -11112,7 +11209,6 @@ func (rx *Rx) Create_Project(ctx context.Context,
project_id Project_Id_Field,
project_name Project_Name_Field,
project_description Project_Description_Field,
project_usage_limit Project_UsageLimit_Field,
project_owner_id Project_OwnerId_Field,
optional Project_Create_Fields) (
project *Project, err error) {
@ -11120,7 +11216,7 @@ func (rx *Rx) Create_Project(ctx context.Context,
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Create_Project(ctx, project_id, project_name, project_description, project_usage_limit, project_owner_id, optional)
return tx.Create_Project(ctx, project_id, project_name, project_description, project_owner_id, optional)
}
@ -11673,6 +11769,17 @@ func (rx *Rx) Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx context.Con
return tx.Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx, project_invoice_stamp_project_id, project_invoice_stamp_start_date)
}
func (rx *Rx) Get_ProjectLimit_By_ProjectId_And_LimitType(ctx context.Context,
project_limit_project_id ProjectLimit_ProjectId_Field,
project_limit_limit_type ProjectLimit_LimitType_Field) (
project_limit *ProjectLimit, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Get_ProjectLimit_By_ProjectId_And_LimitType(ctx, project_limit_project_id, project_limit_limit_type)
}
func (rx *Rx) Get_Project_By_Id(ctx context.Context,
project_id Project_Id_Field) (
project *Project, err error) {
@ -12342,7 +12449,6 @@ type Methods interface {
project_id Project_Id_Field,
project_name Project_Name_Field,
project_description Project_Description_Field,
project_usage_limit Project_UsageLimit_Field,
project_owner_id Project_OwnerId_Field,
optional Project_Create_Fields) (
project *Project, err error)
@ -12588,6 +12694,11 @@ type Methods interface {
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field) (
project_invoice_stamp *ProjectInvoiceStamp, err error)
Get_ProjectLimit_By_ProjectId_And_LimitType(ctx context.Context,
project_limit_project_id ProjectLimit_ProjectId_Field,
project_limit_limit_type ProjectLimit_LimitType_Field) (
project_limit *ProjectLimit, err error)
Get_Project_By_Id(ctx context.Context,
project_id Project_Id_Field) (
project *Project, err error)

View File

@ -163,12 +163,18 @@ CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint NOT NULL,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_limits (
project_id bytea NOT NULL,
usage_limit bigint NOT NULL,
limit_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, limit_type )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,

View File

@ -466,6 +466,21 @@ func (db *DB) PostgresMigration() *migrate.Migration {
);`,
},
},
{
DB: db.db,
Description: "Add project_limits table, remove usage_limit from project",
Version: 70,
Action: migrate.SQL{
`CREATE TABLE project_limits (
project_id bytea NOT NULL,
usage_limit bigint NOT NULL,
limit_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, limit_type )
);`,
`ALTER TABLE projects DROP COLUMN usage_limit;`,
},
},
},
}
}

View File

@ -22,6 +22,21 @@ type ProjectAccounting struct {
db *dbx.DB
}
// projectLimitType represents project limit types.
type projectLimitType int
const (
// projectLimitTypeStorage defines project storage limit type.
projectLimitTypeStorage = 0
// projectLimitTypeBandwidth defines project bandwidth limit type.
projectLimitTypeBandwidth = 1
)
// Int returns int value of project limit type.
func (limitType projectLimitType) Int() int {
return int(limitType)
}
// SaveTallies saves the latest bucket info
func (db *ProjectAccounting) SaveTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) (err error) {
defer mon.Task()(&ctx)(&err)
@ -135,14 +150,74 @@ func (db *ProjectAccounting) GetStorageTotals(ctx context.Context, projectID uui
return inlineSum.Int64, remoteSum.Int64, err
}
// GetProjectUsageLimits returns project usage limit
func (db *ProjectAccounting) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (_ memory.Size, err error) {
// UpdateProjectStorageLimit updates project storage usage limit.
func (db *ProjectAccounting) UpdateProjectStorageLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) (err error) {
defer mon.Task()(&ctx)(&err)
project, err := db.db.Get_Project_By_Id(ctx, dbx.Project_Id(projectID[:]))
return db.updateProjectLimit(ctx, projectID, projectLimitTypeStorage, limit)
}
// UpdateProjectBandwidthLimit updates project bandwidth usage limit.
func (db *ProjectAccounting) UpdateProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) (err error) {
defer mon.Task()(&ctx)(&err)
return db.updateProjectLimit(ctx, projectID, projectLimitTypeBandwidth, limit)
}
// GetProjectStorageLimit returns project storage usage limit.
func (db *ProjectAccounting) GetProjectStorageLimit(ctx context.Context, projectID uuid.UUID) (_ memory.Size, err error) {
defer mon.Task()(&ctx)(&err)
return db.getProjectLimit(ctx, projectID, projectLimitTypeStorage)
}
// GetProjectBandwidthLimit returns project bandwidth usage limit.
func (db *ProjectAccounting) GetProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID) (_ memory.Size, err error) {
defer mon.Task()(&ctx)(&err)
return db.getProjectLimit(ctx, projectID, projectLimitTypeBandwidth)
}
// updateProjectLimit updates project limit by project id and project limit type.
// Returns error if no rows were affected.
func (db *ProjectAccounting) updateProjectLimit(ctx context.Context, projectID uuid.UUID, limitType projectLimitType, limit memory.Size) (err error) {
defer mon.Task()(&ctx)(&err)
query := db.db.Rebind(`INSERT INTO project_limits (project_id, limit_type, usage_limit, created_at)
VALUES (?, ?, ?, ?)
ON CONFLICT (project_id, limit_type)
DO UPDATE SET usage_limit = ?;`)
result, err := db.db.ExecContext(ctx, query, projectID[:], limitType.Int(), limit, time.Now().UTC(), limit)
if err != nil {
return err
}
affected, err := result.RowsAffected()
if err != nil {
return err
}
if affected != 1 {
return Error.New("no rows were affected")
}
return nil
}
// getProjectLimit returns project limit by type and project id.
// Returns 0 if there is no such limit.
func (db *ProjectAccounting) getProjectLimit(ctx context.Context, projectID uuid.UUID, limitType projectLimitType) (_ memory.Size, err error) {
defer mon.Task()(&ctx)(&err)
dbxLimit, err := db.db.Get_ProjectLimit_By_ProjectId_And_LimitType(ctx,
dbx.ProjectLimit_ProjectId(projectID[:]),
dbx.ProjectLimit_LimitType(limitType.Int()),
)
if err != nil {
if err == sql.ErrNoRows {
return 0, nil
}
return 0, err
}
return memory.Size(project.UsageLimit), nil
return memory.Size(dbxLimit.UsageLimit), nil
}
// GetProjectTotal retrieves project usage for a given period

View File

@ -99,7 +99,6 @@ func (projects *projects) Insert(ctx context.Context, project *console.Project)
dbx.Project_Id(projectID[:]),
dbx.Project_Name(project.Name),
dbx.Project_Description(project.Description),
dbx.Project_UsageLimit(0),
dbx.Project_OwnerId(project.OwnerID[:]),
createFields,
)
@ -126,7 +125,6 @@ func (projects *projects) Update(ctx context.Context, project *console.Project)
updateFields := dbx.Project_Update_Fields{
Description: dbx.Project_Description(project.Description),
UsageLimit: dbx.Project_UsageLimit(project.UsageLimit),
}
_, err = projects.db.Update_Project_By_Id(ctx,

View File

@ -0,0 +1,441 @@
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
-- DO NOT EDIT
CREATE TABLE accounting_rollups
(
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE accounting_timestamps
(
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY (name)
);
CREATE TABLE bucket_bandwidth_rollups
(
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY (bucket_name, project_id, interval_start, action)
);
CREATE TABLE bucket_storage_tallies
(
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY (bucket_name, project_id, interval_start)
);
CREATE TABLE injuredsegments
(
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp,
PRIMARY KEY (path)
);
CREATE TABLE irreparabledbs
(
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY (segmentpath)
);
CREATE TABLE nodes
(
id bytea NOT NULL,
address text NOT NULL,
last_net text NOT NULL,
protocol integer NOT NULL,
type integer NOT NULL,
email text NOT NULL,
wallet text NOT NULL,
free_bandwidth bigint NOT NULL,
free_disk bigint NOT NULL,
piece_count bigint NOT NULL,
major bigint NOT NULL,
minor bigint NOT NULL,
patch bigint NOT NULL,
hash text NOT NULL,
timestamp timestamp with time zone NOT NULL,
release boolean NOT NULL,
latency_90 bigint NOT NULL,
audit_success_count bigint NOT NULL,
total_audit_count bigint NOT NULL,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
last_contact_success timestamp with time zone NOT NULL,
last_contact_failure timestamp with time zone NOT NULL,
contained boolean NOT NULL,
disqualified timestamp with time zone,
audit_reputation_alpha double precision NOT NULL,
audit_reputation_beta double precision NOT NULL,
uptime_reputation_alpha double precision NOT NULL,
uptime_reputation_beta double precision NOT NULL,
exit_initiated_at timestamp,
exit_loop_completed_at timestamp,
exit_finished_at timestamp,
exit_success boolean NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE offers
(
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL,
invitee_credit_in_cents integer NOT NULL,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE peer_identities
(
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY (node_id)
);
CREATE TABLE pending_audits
(
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY (node_id)
);
CREATE TABLE projects
(
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE registration_tokens
(
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (secret),
UNIQUE (owner_id)
);
CREATE TABLE reset_password_tokens
(
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (secret),
UNIQUE (owner_id)
);
CREATE TABLE serial_numbers
(
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE storagenode_bandwidth_rollups
(
storagenode_id bytea NOT NULL,
interval_start timestamp NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY (storagenode_id, interval_start, action)
);
CREATE TABLE storagenode_storage_tallies
(
id bigserial NOT NULL,
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions
(
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp NOT NULL,
PRIMARY KEY (project_id, bucket_name)
);
CREATE TABLE api_keys
(
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (id),
UNIQUE (head),
UNIQUE (name, project_id)
);
CREATE TABLE bucket_metainfos
(
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects (id),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY (id),
UNIQUE (name, project_id)
);
CREATE TABLE project_invoice_stamps
(
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
invoice_id bytea NOT NULL,
start_date timestamp with time zone NOT NULL,
end_date timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (project_id, start_date, end_date),
UNIQUE (invoice_id)
);
CREATE TABLE project_members
(
member_id bytea NOT NULL REFERENCES users (id) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (member_id, project_id)
);
CREATE TABLE used_serials
(
serial_number_id integer NOT NULL REFERENCES serial_numbers (id) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY (serial_number_id, storage_node_id)
);
CREATE TABLE user_credits
(
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users (id) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers (id),
referred_by bytea REFERENCES users (id) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL,
pieces_failed bigint NOT NULL,
updated_at timestamp NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp NOT NULL,
requested_at timestamp,
last_failed_at timestamp,
last_failed_code integer,
failed_count integer,
finished_at timestamp,
order_limit_send_count integer NOT NULL,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE project_limits (
project_id bytea NOT NULL,
usage_limit bigint NOT NULL,
limit_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, limit_type )
);
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits (id, offer_id) WHERE credits_earned_in_cents=0;
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 5, 100, 5, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 1, 100, 1, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 100, 300, 100, false);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "status", "type") VALUES ('testOffer', 'Test offer 1', 0, 0, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0);
INSERT INTO "offers" ("name","description","award_credit_in_cents","award_credit_duration_days", "invitee_credit_in_cents","invitee_credit_duration_days", "expires_at","created_at","status","type") VALUES ('Default free credit offer','Is active when no active free credit offer',0, NULL,300, 14, '2119-03-14 08:28:24.636949+00','2019-07-14 08:28:24.636949+00',1,1);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
-- NEW DATA --
INSERT INTO "project_limits" ("project_id", "limit_type", "usage_limit", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 0, 10, '2019-06-01 08:28:24.267934+00');
INSERT INTO "project_limits" ("project_id", "limit_type", "usage_limit", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 1, 20, '2019-06-01 08:28:24.267934+00');