satellite/satellitedb: console tables archview comments updated (#3465)
This commit is contained in:
parent
0621830f05
commit
0c2e498f09
@ -4,77 +4,11 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
// BucketUsage is bucket usage rollup repository
|
||||
//
|
||||
// architecture: Database
|
||||
type BucketUsage interface {
|
||||
Get(ctx context.Context, id uuid.UUID) (*BucketRollup, error)
|
||||
GetPaged(ctx context.Context, cursor *BucketRollupCursor) ([]BucketRollup, error)
|
||||
Create(ctx context.Context, rollup BucketRollup) (*BucketRollup, error)
|
||||
Delete(ctx context.Context, id uuid.UUID) error
|
||||
}
|
||||
|
||||
// Order is sorting order can be asc or desc
|
||||
type Order string
|
||||
|
||||
const (
|
||||
// Asc ascending sort order
|
||||
Asc Order = "asc"
|
||||
// Desc descending sort order
|
||||
Desc Order = "desc"
|
||||
)
|
||||
|
||||
// BucketRollupCursor encapsulates cursor based page
|
||||
type BucketRollupCursor struct {
|
||||
BucketID uuid.UUID
|
||||
Before time.Time
|
||||
After time.Time
|
||||
|
||||
Order Order
|
||||
|
||||
PageSize int
|
||||
Next *BucketRollupCursor
|
||||
}
|
||||
|
||||
// BucketRollup holds usage rollup info
|
||||
type BucketRollup struct {
|
||||
ID uuid.UUID
|
||||
BucketID uuid.UUID
|
||||
|
||||
RollupEndTime time.Time
|
||||
|
||||
RemoteStoredData uint64
|
||||
InlineStoredData uint64
|
||||
RemoteSegments uint
|
||||
InlineSegments uint
|
||||
Objects uint
|
||||
MetadataSize uint64
|
||||
|
||||
RepairEgress uint64
|
||||
GetEgress uint64
|
||||
AuditEgress uint64
|
||||
}
|
||||
|
||||
// BucketBandwidthRollup contains data about bandwidth rollup
|
||||
type BucketBandwidthRollup struct {
|
||||
BucketName string
|
||||
ProjectID uuid.UUID
|
||||
|
||||
IntervalStart time.Time
|
||||
IntervalSeconds uint
|
||||
Action uint
|
||||
|
||||
Inline uint64
|
||||
Allocated uint64
|
||||
Settled uint64
|
||||
}
|
||||
|
||||
// BucketStorageTally holds data about a bucket tally
|
||||
type BucketStorageTally struct {
|
||||
BucketName string
|
||||
|
@ -1,168 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package accounting_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/accounting"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
)
|
||||
|
||||
func TestBucketUsage(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
|
||||
count := 50
|
||||
now := time.Now()
|
||||
ctx := testcontext.New(t)
|
||||
|
||||
usageDB := db.Console().BucketUsage()
|
||||
if usageDB == nil {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
bucketID := testrand.UUID()
|
||||
|
||||
compareRollups := func(t *testing.T, expected *accounting.BucketRollup, actual *accounting.BucketRollup) {
|
||||
assert.Equal(t, expected.BucketID, actual.BucketID)
|
||||
assert.Equal(t, expected.RollupEndTime.Unix(), actual.RollupEndTime.Unix())
|
||||
assert.Equal(t, expected.RemoteStoredData, actual.RemoteStoredData)
|
||||
assert.Equal(t, expected.InlineStoredData, actual.InlineStoredData)
|
||||
assert.Equal(t, expected.RemoteSegments, actual.RemoteSegments)
|
||||
assert.Equal(t, expected.InlineSegments, actual.InlineSegments)
|
||||
assert.Equal(t, expected.Objects, actual.Objects)
|
||||
assert.Equal(t, expected.MetadataSize, actual.MetadataSize)
|
||||
assert.Equal(t, expected.RepairEgress, actual.RepairEgress)
|
||||
assert.Equal(t, expected.GetEgress, actual.GetEgress)
|
||||
assert.Equal(t, expected.AuditEgress, actual.AuditEgress)
|
||||
}
|
||||
|
||||
var rollup *accounting.BucketRollup
|
||||
t.Run("add rollup", func(t *testing.T) {
|
||||
var err error
|
||||
data := accounting.BucketRollup{
|
||||
BucketID: bucketID,
|
||||
RollupEndTime: now,
|
||||
RemoteStoredData: 5,
|
||||
InlineStoredData: 6,
|
||||
RemoteSegments: 7,
|
||||
InlineSegments: 8,
|
||||
Objects: 9,
|
||||
MetadataSize: 10,
|
||||
RepairEgress: 11,
|
||||
GetEgress: 12,
|
||||
AuditEgress: 13,
|
||||
}
|
||||
|
||||
rollup, err = usageDB.Create(ctx, data)
|
||||
assert.NotNil(t, rollup)
|
||||
assert.NoError(t, err)
|
||||
compareRollups(t, &data, rollup)
|
||||
})
|
||||
|
||||
t.Run("get rollup", func(t *testing.T) {
|
||||
result, err := usageDB.Get(ctx, rollup.ID)
|
||||
assert.NoError(t, err)
|
||||
compareRollups(t, rollup, result)
|
||||
})
|
||||
|
||||
t.Run("delete rollup", func(t *testing.T) {
|
||||
err := usageDB.Delete(ctx, rollup.ID)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
var addedRollups []accounting.BucketRollup
|
||||
t.Run("add rollups", func(t *testing.T) {
|
||||
for i := 0; i < count; i++ {
|
||||
data := accounting.BucketRollup{
|
||||
BucketID: bucketID,
|
||||
RollupEndTime: now.Add(time.Hour * time.Duration(i+1)),
|
||||
RemoteStoredData: uint64(i),
|
||||
InlineStoredData: uint64(i + 1),
|
||||
RemoteSegments: 7,
|
||||
InlineSegments: 8,
|
||||
Objects: 9,
|
||||
MetadataSize: 10,
|
||||
RepairEgress: 11,
|
||||
GetEgress: 12,
|
||||
AuditEgress: 13,
|
||||
}
|
||||
|
||||
rollup, err := usageDB.Create(ctx, data)
|
||||
assert.NotNil(t, rollup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
addedRollups = append(addedRollups, *rollup)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("retrieve rollup", func(t *testing.T) {
|
||||
t.Run("first 30 backward", func(t *testing.T) {
|
||||
cursor := &accounting.BucketRollupCursor{
|
||||
BucketID: bucketID,
|
||||
Before: now.Add(time.Hour * 30),
|
||||
Order: accounting.Desc,
|
||||
PageSize: 10,
|
||||
}
|
||||
|
||||
var pagedRollups []accounting.BucketRollup
|
||||
for {
|
||||
rollups, err := usageDB.GetPaged(ctx, cursor)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, rollups)
|
||||
assert.True(t, len(rollups) <= 10)
|
||||
|
||||
pagedRollups = append(pagedRollups, rollups...)
|
||||
|
||||
if cursor.Next == nil {
|
||||
break
|
||||
}
|
||||
cursor = cursor.Next
|
||||
}
|
||||
|
||||
testSlice := addedRollups[:30]
|
||||
for i := range pagedRollups {
|
||||
assert.Equal(t, testSlice[i].ID, pagedRollups[29-i].ID)
|
||||
compareRollups(t, &testSlice[i], &pagedRollups[29-i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("last 30 forward", func(t *testing.T) {
|
||||
cursor := &accounting.BucketRollupCursor{
|
||||
BucketID: bucketID,
|
||||
After: now.Add(time.Hour * 20),
|
||||
Before: now.Add(time.Hour * time.Duration(count+1)),
|
||||
Order: accounting.Asc,
|
||||
PageSize: 10,
|
||||
}
|
||||
|
||||
var pagedRollups []accounting.BucketRollup
|
||||
for {
|
||||
rollups, err := usageDB.GetPaged(ctx, cursor)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, rollups)
|
||||
assert.True(t, len(rollups) <= 10)
|
||||
|
||||
pagedRollups = append(pagedRollups, rollups...)
|
||||
|
||||
if cursor.Next == nil {
|
||||
break
|
||||
}
|
||||
cursor = cursor.Next
|
||||
}
|
||||
|
||||
testSlice := addedRollups[20:]
|
||||
for i := range pagedRollups {
|
||||
assert.Equal(t, testSlice[i].ID, pagedRollups[i].ID)
|
||||
compareRollups(t, &testSlice[i], &pagedRollups[i])
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
@ -5,8 +5,6 @@ package console
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"storj.io/storj/satellite/accounting"
|
||||
)
|
||||
|
||||
// DB contains access to different satellite databases
|
||||
@ -21,8 +19,6 @@ type DB interface {
|
||||
ProjectMembers() ProjectMembers
|
||||
// APIKeys is a getter for APIKeys repository
|
||||
APIKeys() APIKeys
|
||||
// BucketUsage is a getter for accounting.BucketUsage repository
|
||||
BucketUsage() accounting.BucketUsage
|
||||
// RegistrationTokens is a getter for RegistrationTokens repository
|
||||
RegistrationTokens() RegistrationTokens
|
||||
// ResetPasswordTokens is a getter for ResetPasswordTokens repository
|
||||
|
@ -14,6 +14,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that apikeys implements console.APIKeys.
|
||||
var _ console.APIKeys = (*apikeys)(nil)
|
||||
|
||||
// apikeys is an implementation of satellite.APIKeys
|
||||
type apikeys struct {
|
||||
methods dbx.Methods
|
||||
|
@ -1,188 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package satellitedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/satellite/accounting"
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
type bucketusage struct {
|
||||
db dbx.Methods
|
||||
}
|
||||
|
||||
// Get retrieves bucket usage rollup info by id
|
||||
func (usage *bucketusage) Get(ctx context.Context, id uuid.UUID) (_ *accounting.BucketRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
dbxUsage, err := usage.db.Get_BucketUsage_By_Id(ctx, dbx.BucketUsage_Id(id[:]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fromDBXUsage(ctx, dbxUsage)
|
||||
}
|
||||
|
||||
// GetPaged retrieves list of bucket usage rollup entries for given cursor
|
||||
func (usage bucketusage) GetPaged(ctx context.Context, cursor *accounting.BucketRollupCursor) (_ []accounting.BucketRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var getUsage func(context.Context,
|
||||
dbx.BucketUsage_BucketId_Field,
|
||||
dbx.BucketUsage_RollupEndTime_Field,
|
||||
dbx.BucketUsage_RollupEndTime_Field,
|
||||
int, int64) ([]*dbx.BucketUsage, error)
|
||||
|
||||
switch cursor.Order {
|
||||
case accounting.Desc:
|
||||
getUsage = usage.db.Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime
|
||||
default:
|
||||
getUsage = usage.db.Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime
|
||||
}
|
||||
|
||||
dbxUsages, err := getUsage(
|
||||
ctx,
|
||||
dbx.BucketUsage_BucketId(cursor.BucketID[:]),
|
||||
dbx.BucketUsage_RollupEndTime(cursor.After),
|
||||
dbx.BucketUsage_RollupEndTime(cursor.Before),
|
||||
cursor.PageSize,
|
||||
0,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rollups []accounting.BucketRollup
|
||||
for _, dbxUsage := range dbxUsages {
|
||||
rollup, err := fromDBXUsage(ctx, dbxUsage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rollups = append(rollups, *rollup)
|
||||
}
|
||||
|
||||
switch cursor.Order {
|
||||
// going backwards
|
||||
case accounting.Desc:
|
||||
dbxUsages, err := getUsage(
|
||||
ctx,
|
||||
dbx.BucketUsage_BucketId(cursor.BucketID[:]),
|
||||
dbx.BucketUsage_RollupEndTime(cursor.After),
|
||||
dbx.BucketUsage_RollupEndTime(rollups[len(rollups)-1].RollupEndTime),
|
||||
2,
|
||||
0,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dbxUsages) == 2 {
|
||||
cursor.Next = &accounting.BucketRollupCursor{
|
||||
BucketID: cursor.BucketID,
|
||||
After: cursor.After,
|
||||
Before: dbxUsages[1].RollupEndTime,
|
||||
Order: cursor.Order,
|
||||
PageSize: cursor.PageSize,
|
||||
}
|
||||
}
|
||||
// going forward
|
||||
default:
|
||||
dbxUsages, err := getUsage(
|
||||
ctx,
|
||||
dbx.BucketUsage_BucketId(cursor.BucketID[:]),
|
||||
dbx.BucketUsage_RollupEndTime(rollups[len(rollups)-1].RollupEndTime),
|
||||
dbx.BucketUsage_RollupEndTime(cursor.Before),
|
||||
1,
|
||||
0,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dbxUsages) > 0 {
|
||||
cursor.Next = &accounting.BucketRollupCursor{
|
||||
BucketID: cursor.BucketID,
|
||||
After: rollups[len(rollups)-1].RollupEndTime,
|
||||
Before: cursor.Before,
|
||||
Order: cursor.Order,
|
||||
PageSize: cursor.PageSize,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rollups, nil
|
||||
}
|
||||
|
||||
// Create creates new bucket usage rollup
|
||||
func (usage bucketusage) Create(ctx context.Context, rollup accounting.BucketRollup) (_ *accounting.BucketRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
id, err := uuid.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbxUsage, err := usage.db.Create_BucketUsage(
|
||||
ctx,
|
||||
dbx.BucketUsage_Id(id[:]),
|
||||
dbx.BucketUsage_BucketId(rollup.BucketID[:]),
|
||||
dbx.BucketUsage_RollupEndTime(rollup.RollupEndTime),
|
||||
dbx.BucketUsage_RemoteStoredData(rollup.RemoteStoredData),
|
||||
dbx.BucketUsage_InlineStoredData(rollup.InlineStoredData),
|
||||
dbx.BucketUsage_RemoteSegments(rollup.RemoteSegments),
|
||||
dbx.BucketUsage_InlineSegments(rollup.InlineSegments),
|
||||
dbx.BucketUsage_Objects(rollup.Objects),
|
||||
dbx.BucketUsage_MetadataSize(rollup.MetadataSize),
|
||||
dbx.BucketUsage_RepairEgress(rollup.RepairEgress),
|
||||
dbx.BucketUsage_GetEgress(rollup.GetEgress),
|
||||
dbx.BucketUsage_AuditEgress(rollup.AuditEgress),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fromDBXUsage(ctx, dbxUsage)
|
||||
}
|
||||
|
||||
// Delete deletes bucket usage rollup entry by id
|
||||
func (usage *bucketusage) Delete(ctx context.Context, id uuid.UUID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = usage.db.Delete_BucketUsage_By_Id(ctx, dbx.BucketUsage_Id(id[:]))
|
||||
return err
|
||||
}
|
||||
|
||||
// fromDBXUsage helper method to conert dbx.BucketUsage to accounting.BucketRollup
|
||||
func fromDBXUsage(ctx context.Context, dbxUsage *dbx.BucketUsage) (_ *accounting.BucketRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
id, err := bytesToUUID(dbxUsage.Id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bucketID, err := bytesToUUID(dbxUsage.BucketId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &accounting.BucketRollup{
|
||||
ID: id,
|
||||
BucketID: bucketID,
|
||||
RollupEndTime: dbxUsage.RollupEndTime,
|
||||
RemoteStoredData: dbxUsage.RemoteStoredData,
|
||||
InlineStoredData: dbxUsage.InlineStoredData,
|
||||
RemoteSegments: dbxUsage.RemoteSegments,
|
||||
InlineSegments: dbxUsage.InlineSegments,
|
||||
Objects: dbxUsage.Objects,
|
||||
MetadataSize: dbxUsage.MetadataSize,
|
||||
RepairEgress: dbxUsage.RepairEgress,
|
||||
GetEgress: dbxUsage.GetEgress,
|
||||
AuditEgress: dbxUsage.AuditEgress,
|
||||
}, nil
|
||||
}
|
@ -8,11 +8,13 @@ import (
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/satellite/accounting"
|
||||
"storj.io/storj/satellite/console"
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that ConsoleDB implements console.DB.
|
||||
var _ console.DB = (*ConsoleDB)(nil)
|
||||
|
||||
// ConsoleDB contains access to different satellite databases
|
||||
type ConsoleDB struct {
|
||||
db *dbx.DB
|
||||
@ -41,11 +43,6 @@ func (db *ConsoleDB) APIKeys() console.APIKeys {
|
||||
return &apikeys{db.methods, db.db}
|
||||
}
|
||||
|
||||
// BucketUsage is a getter for accounting.BucketUsage repository
|
||||
func (db *ConsoleDB) BucketUsage() accounting.BucketUsage {
|
||||
return &bucketusage{db.methods}
|
||||
}
|
||||
|
||||
// RegistrationTokens is a getter for RegistrationTokens repository
|
||||
func (db *ConsoleDB) RegistrationTokens() console.RegistrationTokens {
|
||||
return ®istrationTokens{db.methods}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensure that customers implements stripecoinpayments.CustomersDB.
|
||||
// ensures that customers implements stripecoinpayments.CustomersDB.
|
||||
var _ stripecoinpayments.CustomersDB = (*customers)(nil)
|
||||
|
||||
// customers is an implementation of stripecoinpayments.CustomersDB.
|
||||
|
@ -383,54 +383,6 @@ read all (
|
||||
orderby asc api_key.name
|
||||
)
|
||||
|
||||
//-----bucket_usage----//
|
||||
|
||||
model bucket_usage (
|
||||
key id
|
||||
index (
|
||||
name bucket_id_rollup
|
||||
fields bucket_id rollup_end_time
|
||||
unique
|
||||
)
|
||||
|
||||
field id blob
|
||||
field bucket_id blob //--TODO: add foreign key constraint--//
|
||||
|
||||
field rollup_end_time timestamp
|
||||
|
||||
field remote_stored_data uint64
|
||||
field inline_stored_data uint64
|
||||
field remote_segments uint
|
||||
field inline_segments uint
|
||||
field objects uint
|
||||
field metadata_size uint64
|
||||
field repair_egress uint64
|
||||
field get_egress uint64
|
||||
field audit_egress uint64
|
||||
)
|
||||
|
||||
create bucket_usage ( )
|
||||
delete bucket_usage ( where bucket_usage.id = ? )
|
||||
|
||||
read one (
|
||||
select bucket_usage
|
||||
where bucket_usage.id = ?
|
||||
)
|
||||
read limitoffset (
|
||||
select bucket_usage
|
||||
where bucket_usage.bucket_id = ?
|
||||
where bucket_usage.rollup_end_time > ?
|
||||
where bucket_usage.rollup_end_time <= ?
|
||||
orderby asc bucket_usage.rollup_end_time
|
||||
)
|
||||
read limitoffset (
|
||||
select bucket_usage
|
||||
where bucket_usage.bucket_id = ?
|
||||
where bucket_usage.rollup_end_time > ?
|
||||
where bucket_usage.rollup_end_time <= ?
|
||||
orderby desc bucket_usage.rollup_end_time
|
||||
)
|
||||
|
||||
//--- tracking serial numbers ---//
|
||||
|
||||
model serial_number (
|
||||
|
@ -305,21 +305,6 @@ CREATE TABLE bucket_storage_tallies (
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE bucket_usages (
|
||||
id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
rollup_end_time timestamp with time zone NOT NULL,
|
||||
remote_stored_data bigint NOT NULL,
|
||||
inline_stored_data bigint NOT NULL,
|
||||
remote_segments integer NOT NULL,
|
||||
inline_segments integer NOT NULL,
|
||||
objects integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
repair_egress bigint NOT NULL,
|
||||
get_egress bigint NOT NULL,
|
||||
audit_egress bigint NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
@ -580,7 +565,6 @@ CREATE TABLE user_credits (
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||
@ -1246,254 +1230,6 @@ func (f BucketStorageTally_MetadataSize_Field) value() interface{} {
|
||||
|
||||
func (BucketStorageTally_MetadataSize_Field) _Column() string { return "metadata_size" }
|
||||
|
||||
type BucketUsage struct {
|
||||
Id []byte
|
||||
BucketId []byte
|
||||
RollupEndTime time.Time
|
||||
RemoteStoredData uint64
|
||||
InlineStoredData uint64
|
||||
RemoteSegments uint
|
||||
InlineSegments uint
|
||||
Objects uint
|
||||
MetadataSize uint64
|
||||
RepairEgress uint64
|
||||
GetEgress uint64
|
||||
AuditEgress uint64
|
||||
}
|
||||
|
||||
func (BucketUsage) _Table() string { return "bucket_usages" }
|
||||
|
||||
type BucketUsage_Update_Fields struct {
|
||||
}
|
||||
|
||||
type BucketUsage_Id_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value []byte
|
||||
}
|
||||
|
||||
func BucketUsage_Id(v []byte) BucketUsage_Id_Field {
|
||||
return BucketUsage_Id_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_Id_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_Id_Field) _Column() string { return "id" }
|
||||
|
||||
type BucketUsage_BucketId_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value []byte
|
||||
}
|
||||
|
||||
func BucketUsage_BucketId(v []byte) BucketUsage_BucketId_Field {
|
||||
return BucketUsage_BucketId_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_BucketId_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_BucketId_Field) _Column() string { return "bucket_id" }
|
||||
|
||||
type BucketUsage_RollupEndTime_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value time.Time
|
||||
}
|
||||
|
||||
func BucketUsage_RollupEndTime(v time.Time) BucketUsage_RollupEndTime_Field {
|
||||
return BucketUsage_RollupEndTime_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_RollupEndTime_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_RollupEndTime_Field) _Column() string { return "rollup_end_time" }
|
||||
|
||||
type BucketUsage_RemoteStoredData_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint64
|
||||
}
|
||||
|
||||
func BucketUsage_RemoteStoredData(v uint64) BucketUsage_RemoteStoredData_Field {
|
||||
return BucketUsage_RemoteStoredData_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_RemoteStoredData_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_RemoteStoredData_Field) _Column() string { return "remote_stored_data" }
|
||||
|
||||
type BucketUsage_InlineStoredData_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint64
|
||||
}
|
||||
|
||||
func BucketUsage_InlineStoredData(v uint64) BucketUsage_InlineStoredData_Field {
|
||||
return BucketUsage_InlineStoredData_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_InlineStoredData_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_InlineStoredData_Field) _Column() string { return "inline_stored_data" }
|
||||
|
||||
type BucketUsage_RemoteSegments_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint
|
||||
}
|
||||
|
||||
func BucketUsage_RemoteSegments(v uint) BucketUsage_RemoteSegments_Field {
|
||||
return BucketUsage_RemoteSegments_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_RemoteSegments_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_RemoteSegments_Field) _Column() string { return "remote_segments" }
|
||||
|
||||
type BucketUsage_InlineSegments_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint
|
||||
}
|
||||
|
||||
func BucketUsage_InlineSegments(v uint) BucketUsage_InlineSegments_Field {
|
||||
return BucketUsage_InlineSegments_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_InlineSegments_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_InlineSegments_Field) _Column() string { return "inline_segments" }
|
||||
|
||||
type BucketUsage_Objects_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint
|
||||
}
|
||||
|
||||
func BucketUsage_Objects(v uint) BucketUsage_Objects_Field {
|
||||
return BucketUsage_Objects_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_Objects_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_Objects_Field) _Column() string { return "objects" }
|
||||
|
||||
type BucketUsage_MetadataSize_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint64
|
||||
}
|
||||
|
||||
func BucketUsage_MetadataSize(v uint64) BucketUsage_MetadataSize_Field {
|
||||
return BucketUsage_MetadataSize_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_MetadataSize_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_MetadataSize_Field) _Column() string { return "metadata_size" }
|
||||
|
||||
type BucketUsage_RepairEgress_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint64
|
||||
}
|
||||
|
||||
func BucketUsage_RepairEgress(v uint64) BucketUsage_RepairEgress_Field {
|
||||
return BucketUsage_RepairEgress_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_RepairEgress_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_RepairEgress_Field) _Column() string { return "repair_egress" }
|
||||
|
||||
type BucketUsage_GetEgress_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint64
|
||||
}
|
||||
|
||||
func BucketUsage_GetEgress(v uint64) BucketUsage_GetEgress_Field {
|
||||
return BucketUsage_GetEgress_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_GetEgress_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_GetEgress_Field) _Column() string { return "get_egress" }
|
||||
|
||||
type BucketUsage_AuditEgress_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value uint64
|
||||
}
|
||||
|
||||
func BucketUsage_AuditEgress(v uint64) BucketUsage_AuditEgress_Field {
|
||||
return BucketUsage_AuditEgress_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f BucketUsage_AuditEgress_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (BucketUsage_AuditEgress_Field) _Column() string { return "audit_egress" }
|
||||
|
||||
type CoinpaymentsTransaction struct {
|
||||
Id string
|
||||
UserId []byte
|
||||
@ -6283,47 +6019,6 @@ func (obj *postgresImpl) Create_ApiKey(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Create_BucketUsage(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
||||
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
||||
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
||||
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
||||
bucket_usage_objects BucketUsage_Objects_Field,
|
||||
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
||||
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
||||
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
||||
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
||||
bucket_usage *BucketUsage, err error) {
|
||||
__id_val := bucket_usage_id.value()
|
||||
__bucket_id_val := bucket_usage_bucket_id.value()
|
||||
__rollup_end_time_val := bucket_usage_rollup_end_time.value()
|
||||
__remote_stored_data_val := bucket_usage_remote_stored_data.value()
|
||||
__inline_stored_data_val := bucket_usage_inline_stored_data.value()
|
||||
__remote_segments_val := bucket_usage_remote_segments.value()
|
||||
__inline_segments_val := bucket_usage_inline_segments.value()
|
||||
__objects_val := bucket_usage_objects.value()
|
||||
__metadata_size_val := bucket_usage_metadata_size.value()
|
||||
__repair_egress_val := bucket_usage_repair_egress.value()
|
||||
__get_egress_val := bucket_usage_get_egress.value()
|
||||
__audit_egress_val := bucket_usage_audit_egress.value()
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_usages ( id, bucket_id, rollup_end_time, remote_stored_data, inline_stored_data, remote_segments, inline_segments, objects, metadata_size, repair_egress, get_egress, audit_egress ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress")
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val)
|
||||
|
||||
bucket_usage = &BucketUsage{}
|
||||
err = obj.driver.QueryRow(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return bucket_usage, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) CreateNoReturn_SerialNumber(ctx context.Context,
|
||||
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
||||
serial_number_bucket_id SerialNumber_BucketId_Field,
|
||||
@ -7517,103 +7212,6 @@ func (obj *postgresImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Co
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Get_BucketUsage_By_Id(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field) (
|
||||
bucket_usage *BucketUsage, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_usage_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
bucket_usage = &BucketUsage{}
|
||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return bucket_usage, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketUsage, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value())
|
||||
|
||||
__values = append(__values, limit, offset)
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
bucket_usage := &BucketUsage{}
|
||||
err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
rows = append(rows, bucket_usage)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketUsage, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time DESC LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value())
|
||||
|
||||
__values = append(__values, limit, offset)
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
__rows, err := obj.driver.Query(__stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
bucket_usage := &BucketUsage{}
|
||||
err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
rows = append(rows, bucket_usage)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
||||
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
||||
serial_number *SerialNumber, err error) {
|
||||
@ -9719,32 +9317,6 @@ func (obj *postgresImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Delete_BucketUsage_By_Id(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field) (
|
||||
deleted bool, err error) {
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_usages WHERE bucket_usages.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_usage_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
__res, err := obj.driver.Exec(__stmt, __values...)
|
||||
if err != nil {
|
||||
return false, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err := __res.RowsAffected()
|
||||
if err != nil {
|
||||
return false, obj.makeErr(err)
|
||||
}
|
||||
|
||||
return __count > 0, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *postgresImpl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
||||
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
||||
count int64, err error) {
|
||||
@ -10240,16 +9812,6 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error)
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err = __res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
count += __count
|
||||
__res, err = obj.driver.Exec("DELETE FROM bucket_usages;")
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
__count, err = __res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
@ -10731,28 +10293,6 @@ func (rx *Rx) Create_BucketMetainfo(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (rx *Rx) Create_BucketUsage(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
||||
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
||||
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
||||
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
||||
bucket_usage_objects BucketUsage_Objects_Field,
|
||||
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
||||
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
||||
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
||||
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
||||
bucket_usage *BucketUsage, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Create_BucketUsage(ctx, bucket_usage_id, bucket_usage_bucket_id, bucket_usage_rollup_end_time, bucket_usage_remote_stored_data, bucket_usage_inline_stored_data, bucket_usage_remote_segments, bucket_usage_inline_segments, bucket_usage_objects, bucket_usage_metadata_size, bucket_usage_repair_egress, bucket_usage_get_egress, bucket_usage_audit_egress)
|
||||
|
||||
}
|
||||
|
||||
func (rx *Rx) Create_CoinpaymentsTransaction(ctx context.Context,
|
||||
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
||||
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
||||
@ -10973,16 +10513,6 @@ func (rx *Rx) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
||||
return tx.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
||||
}
|
||||
|
||||
func (rx *Rx) Delete_BucketUsage_By_Id(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field) (
|
||||
deleted bool, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Delete_BucketUsage_By_Id(ctx, bucket_usage_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Delete_GracefulExitProgress_By_NodeId(ctx context.Context,
|
||||
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
||||
deleted bool, err error) {
|
||||
@ -11247,16 +10777,6 @@ func (rx *Rx) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
||||
return tx.Get_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_BucketUsage_By_Id(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field) (
|
||||
bucket_usage *BucketUsage, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Get_BucketUsage_By_Id(ctx, bucket_usage_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
||||
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
||||
graceful_exit_progress *GracefulExitProgress, err error) {
|
||||
@ -11475,32 +10995,6 @@ func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_N
|
||||
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketUsage, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx, bucket_usage_bucket_id, bucket_usage_rollup_end_time_greater, bucket_usage_rollup_end_time_less_or_equal, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketUsage, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx, bucket_usage_bucket_id, bucket_usage_rollup_end_time_greater, bucket_usage_rollup_end_time_less_or_equal, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
||||
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
|
||||
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
||||
@ -11933,21 +11427,6 @@ type Methods interface {
|
||||
optional BucketMetainfo_Create_Fields) (
|
||||
bucket_metainfo *BucketMetainfo, err error)
|
||||
|
||||
Create_BucketUsage(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
||||
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
||||
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
||||
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
||||
bucket_usage_objects BucketUsage_Objects_Field,
|
||||
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
||||
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
||||
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
||||
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
||||
bucket_usage *BucketUsage, err error)
|
||||
|
||||
Create_CoinpaymentsTransaction(ctx context.Context,
|
||||
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
||||
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
||||
@ -12059,10 +11538,6 @@ type Methods interface {
|
||||
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
||||
deleted bool, err error)
|
||||
|
||||
Delete_BucketUsage_By_Id(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field) (
|
||||
deleted bool, err error)
|
||||
|
||||
Delete_GracefulExitProgress_By_NodeId(ctx context.Context,
|
||||
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
||||
deleted bool, err error)
|
||||
@ -12174,10 +11649,6 @@ type Methods interface {
|
||||
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
||||
bucket_metainfo *BucketMetainfo, err error)
|
||||
|
||||
Get_BucketUsage_By_Id(ctx context.Context,
|
||||
bucket_usage_id BucketUsage_Id_Field) (
|
||||
bucket_usage *BucketUsage, err error)
|
||||
|
||||
Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
||||
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
||||
graceful_exit_progress *GracefulExitProgress, err error)
|
||||
@ -12270,20 +11741,6 @@ type Methods interface {
|
||||
limit int, offset int64) (
|
||||
rows []*BucketMetainfo, err error)
|
||||
|
||||
Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketUsage, err error)
|
||||
|
||||
Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
||||
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
||||
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
||||
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*BucketUsage, err error)
|
||||
|
||||
Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
||||
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
|
||||
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
||||
|
@ -40,21 +40,6 @@ CREATE TABLE bucket_storage_tallies (
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE bucket_usages (
|
||||
id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
rollup_end_time timestamp with time zone NOT NULL,
|
||||
remote_stored_data bigint NOT NULL,
|
||||
inline_stored_data bigint NOT NULL,
|
||||
remote_segments integer NOT NULL,
|
||||
inline_segments integer NOT NULL,
|
||||
objects integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
repair_egress bigint NOT NULL,
|
||||
get_egress bigint NOT NULL,
|
||||
audit_egress bigint NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
@ -315,7 +300,6 @@ CREATE TABLE user_credits (
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||
|
@ -1352,6 +1352,14 @@ func (db *DB) PostgresMigration() *migrate.Migration {
|
||||
);`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: db.db,
|
||||
Description: "Removing unused bucket_usages table",
|
||||
Version: 64,
|
||||
Action: migrate.SQL{
|
||||
`DROP TABLE bucket_usages CASCADE;`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that projectMembers implements console.ProjectMembers.
|
||||
var _ console.ProjectMembers = (*projectMembers)(nil)
|
||||
|
||||
// ProjectMembers exposes methods to manage ProjectMembers table in database.
|
||||
type projectMembers struct {
|
||||
methods dbx.Methods
|
||||
|
@ -14,6 +14,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that projects implements console.Projects.
|
||||
var _ console.Projects = (*projects)(nil)
|
||||
|
||||
// implementation of Projects interface repository using spacemonkeygo/dbx orm
|
||||
type projects struct {
|
||||
db dbx.Methods
|
||||
|
@ -13,6 +13,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that registrationTokens implements console.RegistrationTokens.
|
||||
var _ console.RegistrationTokens = (*registrationTokens)(nil)
|
||||
|
||||
// registrationTokens is an implementation of RegistrationTokens interface using spacemonkeygo/dbx orm
|
||||
type registrationTokens struct {
|
||||
db dbx.Methods
|
||||
|
@ -13,6 +13,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that resetPasswordTokens implements console.ResetPasswordTokens.
|
||||
var _ console.ResetPasswordTokens = (*resetPasswordTokens)(nil)
|
||||
|
||||
type resetPasswordTokens struct {
|
||||
db dbx.Methods
|
||||
}
|
||||
|
405
satellite/satellitedb/testdata/postgres.v64.sql
vendored
Normal file
405
satellite/satellitedb/testdata/postgres.v64.sql
vendored
Normal file
@ -0,0 +1,405 @@
|
||||
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups
|
||||
(
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE accounting_timestamps
|
||||
(
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (name)
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups
|
||||
(
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY (bucket_name, project_id, interval_start, action)
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies
|
||||
(
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY (bucket_name, project_id, interval_start)
|
||||
);
|
||||
CREATE TABLE injuredsegments
|
||||
(
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp,
|
||||
PRIMARY KEY (path)
|
||||
);
|
||||
CREATE TABLE irreparabledbs
|
||||
(
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY (segmentpath)
|
||||
);
|
||||
CREATE TABLE nodes
|
||||
(
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
last_net text NOT NULL,
|
||||
protocol integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_bandwidth bigint NOT NULL,
|
||||
free_disk bigint NOT NULL,
|
||||
piece_count bigint NOT NULL,
|
||||
major bigint NOT NULL,
|
||||
minor bigint NOT NULL,
|
||||
patch bigint NOT NULL,
|
||||
hash text NOT NULL,
|
||||
timestamp timestamp with time zone NOT NULL,
|
||||
release boolean NOT NULL,
|
||||
latency_90 bigint NOT NULL,
|
||||
audit_success_count bigint NOT NULL,
|
||||
total_audit_count bigint NOT NULL,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
last_contact_success timestamp with time zone NOT NULL,
|
||||
last_contact_failure timestamp with time zone NOT NULL,
|
||||
contained boolean NOT NULL,
|
||||
disqualified timestamp with time zone,
|
||||
audit_reputation_alpha double precision NOT NULL,
|
||||
audit_reputation_beta double precision NOT NULL,
|
||||
uptime_reputation_alpha double precision NOT NULL,
|
||||
uptime_reputation_beta double precision NOT NULL,
|
||||
exit_initiated_at timestamp,
|
||||
exit_loop_completed_at timestamp,
|
||||
exit_finished_at timestamp,
|
||||
exit_success boolean NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE offers
|
||||
(
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL,
|
||||
invitee_credit_in_cents integer NOT NULL,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE peer_identities
|
||||
(
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (node_id)
|
||||
);
|
||||
CREATE TABLE pending_audits
|
||||
(
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY (node_id)
|
||||
);
|
||||
CREATE TABLE projects
|
||||
(
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE registration_tokens
|
||||
(
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (secret),
|
||||
UNIQUE (owner_id)
|
||||
);
|
||||
CREATE TABLE reset_password_tokens
|
||||
(
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (secret),
|
||||
UNIQUE (owner_id)
|
||||
);
|
||||
CREATE TABLE serial_numbers
|
||||
(
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups
|
||||
(
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY (storagenode_id, interval_start, action)
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies
|
||||
(
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions
|
||||
(
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp NOT NULL,
|
||||
PRIMARY KEY (project_id, bucket_name)
|
||||
);
|
||||
CREATE TABLE api_keys
|
||||
(
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (head),
|
||||
UNIQUE (name, project_id)
|
||||
);
|
||||
CREATE TABLE bucket_metainfos
|
||||
(
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects (id),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (name, project_id)
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps
|
||||
(
|
||||
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
|
||||
invoice_id bytea NOT NULL,
|
||||
start_date timestamp with time zone NOT NULL,
|
||||
end_date timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (project_id, start_date, end_date),
|
||||
UNIQUE (invoice_id)
|
||||
);
|
||||
CREATE TABLE project_members
|
||||
(
|
||||
member_id bytea NOT NULL REFERENCES users (id) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (member_id, project_id)
|
||||
);
|
||||
CREATE TABLE used_serials
|
||||
(
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers (id) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY (serial_number_id, storage_node_id)
|
||||
);
|
||||
CREATE TABLE user_credits
|
||||
(
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users (id) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers (id),
|
||||
referred_by bytea REFERENCES users (id) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL,
|
||||
pieces_failed bigint NOT NULL,
|
||||
updated_at timestamp NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp NOT NULL,
|
||||
requested_at timestamp,
|
||||
last_failed_at timestamp,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
||||
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits (id, offer_id) WHERE credits_earned_in_cents=0;
|
||||
|
||||
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 5, 100, 5, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 1, 100, 1, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 100, 300, 100, false);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "status", "type") VALUES ('testOffer', 'Test offer 1', 0, 0, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0);
|
||||
INSERT INTO "offers" ("name","description","award_credit_in_cents","award_credit_duration_days", "invitee_credit_in_cents","invitee_credit_duration_days", "expires_at","created_at","status","type") VALUES ('Default free credit offer','Is active when no active free credit offer',0, NULL,300, 14, '2119-03-14 08:28:24.636949+00','2019-07-14 08:28:24.636949+00',1,1);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103');
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103', '2019-09-12 10:07:32.028103', null, null, 0, '2019-09-12 10:07:33.028103');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
-- NEW DATA --
|
@ -17,6 +17,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that usagerollups implements console.UsageRollups.
|
||||
var _ console.UsageRollups = (*usagerollups)(nil)
|
||||
|
||||
// usagerollups implements console.UsageRollups
|
||||
type usagerollups struct {
|
||||
db *dbx.DB
|
||||
|
@ -18,6 +18,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that usercredits implements console.UserCredits.
|
||||
var _ console.UserCredits = (*usercredits)(nil)
|
||||
|
||||
type usercredits struct {
|
||||
db *dbx.DB
|
||||
tx *dbx.Tx
|
||||
|
@ -14,6 +14,9 @@ import (
|
||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// ensures that users implements console.Users.
|
||||
var _ console.Users = (*users)(nil)
|
||||
|
||||
// implementation of Users interface repository using spacemonkeygo/dbx orm
|
||||
type users struct {
|
||||
db dbx.Methods
|
||||
|
Loading…
Reference in New Issue
Block a user