satellite: prevents uplink from creating a bucket once it exceeds the max bucket allocation.
Change-Id: I4b3822ed723c03dbbc0df136b2201027e19ba0cd
This commit is contained in:
parent
62fec25104
commit
784a156eea
@ -168,6 +168,9 @@ func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, er
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if isAutogeneratedCockroachIndex(index) {
|
||||
continue
|
||||
}
|
||||
schema.Indexes = append(schema.Indexes, index)
|
||||
}
|
||||
|
||||
@ -272,3 +275,23 @@ func parseIndexDefinition(indexdef string) (*dbschema.Index, error) {
|
||||
Columns: strings.Split(indexDirRemove.Replace(matches[4]), ", "),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hackity hack:
|
||||
//
|
||||
// Cockroach sometimes creates automatic indexes to enforce foreign key
|
||||
// relationships, if it doesn't think the need is already met by other
|
||||
// indexes. If you then add the other indexes after creating the table,
|
||||
// the auto-generated index does not go away. So you get different results
|
||||
// when establishing one table with a set of constraints over multiple
|
||||
// steps, versus creating that same table with the same set of constraints
|
||||
// all at once. Unfortunately, our system wants very much for those two
|
||||
// paths to produce exactly the same result.
|
||||
//
|
||||
// This should make it so that we disregard the difference in the cases
|
||||
// that it arises.
|
||||
//
|
||||
// See above for an important lesson about going in against a database when
|
||||
// death is on the line.
|
||||
func isAutogeneratedCockroachIndex(index *dbschema.Index) bool {
|
||||
return strings.Contains(index.Name, "_auto_index_fk_")
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/private/dbutil"
|
||||
"storj.io/storj/private/dbutil/dbschema"
|
||||
"storj.io/storj/private/dbutil/pgtest"
|
||||
"storj.io/storj/private/dbutil/pgutil"
|
||||
@ -94,14 +93,6 @@ func TestQuery(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if db.Implementation == dbutil.Cockroach {
|
||||
expected.Indexes = append(expected.Indexes, &dbschema.Index{
|
||||
Name: "names_auto_index_fk_users_a_ref_users",
|
||||
Table: "names",
|
||||
Columns: []string{"users_a"},
|
||||
})
|
||||
}
|
||||
|
||||
expected.Sort()
|
||||
schema.Sort()
|
||||
assert.Equal(t, expected, schema)
|
||||
|
@ -443,6 +443,11 @@ func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtes
|
||||
CacheCapacity: 100,
|
||||
CacheExpiration: 10 * time.Second,
|
||||
},
|
||||
ProjectLimits: metainfo.ProjectLimitConfig{
|
||||
MaxBuckets: 1000,
|
||||
DefaultMaxUsage: 25 * memory.GB,
|
||||
DefaultMaxBandwidth: 25 * memory.GB,
|
||||
},
|
||||
PieceDeletion: piecedeletion.Config{
|
||||
MaxConcurrency: 100,
|
||||
|
||||
@ -519,8 +524,6 @@ func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtes
|
||||
},
|
||||
Rollup: rollup.Config{
|
||||
Interval: defaultInterval,
|
||||
DefaultMaxUsage: 25 * memory.GB,
|
||||
DefaultMaxBandwidth: 25 * memory.GB,
|
||||
DeleteTallies: false,
|
||||
},
|
||||
ReportedRollup: reportedrollup.Config{
|
||||
|
@ -40,8 +40,8 @@ func TestProjectUsageStorage(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Rollup.DefaultMaxUsage = 1 * memory.MB
|
||||
config.Rollup.DefaultMaxBandwidth = 1 * memory.MB
|
||||
config.Metainfo.ProjectLimits.DefaultMaxUsage = 1 * memory.MB
|
||||
config.Metainfo.ProjectLimits.DefaultMaxBandwidth = 1 * memory.MB
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/sync2"
|
||||
@ -19,8 +18,6 @@ import (
|
||||
// Config contains configurable values for rollup
|
||||
type Config struct {
|
||||
Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s"`
|
||||
DefaultMaxUsage memory.Size `help:"the default storage usage limit" releaseDefault:"50GB" devDefault:"200GB"`
|
||||
DefaultMaxBandwidth memory.Size `help:"the default bandwidth usage limit" releaseDefault:"50GB" devDefault:"200GB"`
|
||||
DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"`
|
||||
}
|
||||
|
||||
|
@ -297,8 +297,8 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
peer.Accounting.ProjectUsage = accounting.NewService(
|
||||
peer.DB.ProjectAccounting(),
|
||||
peer.LiveAccounting.Cache,
|
||||
config.Rollup.DefaultMaxUsage,
|
||||
config.Rollup.DefaultMaxBandwidth,
|
||||
config.Metainfo.ProjectLimits.DefaultMaxUsage,
|
||||
config.Metainfo.ProjectLimits.DefaultMaxBandwidth,
|
||||
config.LiveAccounting.BandwidthCacheTTL,
|
||||
)
|
||||
}
|
||||
|
@ -35,6 +35,9 @@ type Projects interface {
|
||||
|
||||
// UpdateRateLimit is a method for updating projects rate limit.
|
||||
UpdateRateLimit(ctx context.Context, id uuid.UUID, newLimit int) error
|
||||
|
||||
// GetMaxBuckets is a method to get the maximum number of buckets allowed for the project
|
||||
GetMaxBuckets(ctx context.Context, id uuid.UUID) (int, error)
|
||||
}
|
||||
|
||||
// Project is a database object that describes Project entity
|
||||
@ -46,8 +49,8 @@ type Project struct {
|
||||
PartnerID uuid.UUID `json:"partnerId"`
|
||||
OwnerID uuid.UUID `json:"ownerId"`
|
||||
RateLimit *int `json:"rateLimit"`
|
||||
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
MaxBuckets int `json:"maxBuckets"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
}
|
||||
|
||||
// ProjectInfo holds data needed to create/update Project
|
||||
|
@ -234,3 +234,16 @@ func TestProjectsList(t *testing.T) {
|
||||
})))
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetMaxBuckets(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
maxCount := 100
|
||||
consoleDB := db.Console()
|
||||
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1", MaxBuckets: maxCount})
|
||||
require.NoError(t, err)
|
||||
projectsDB := db.Console().Projects()
|
||||
max, err := projectsDB.GetMaxBuckets(ctx, project.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, maxCount, max)
|
||||
})
|
||||
}
|
||||
|
@ -247,8 +247,8 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
peer.Accounting.ProjectUsage = accounting.NewService(
|
||||
peer.DB.ProjectAccounting(),
|
||||
peer.LiveAccounting.Cache,
|
||||
config.Rollup.DefaultMaxUsage,
|
||||
config.Rollup.DefaultMaxBandwidth,
|
||||
config.Metainfo.ProjectLimits.DefaultMaxUsage,
|
||||
config.Metainfo.ProjectLimits.DefaultMaxBandwidth,
|
||||
config.LiveAccounting.BandwidthCacheTTL,
|
||||
)
|
||||
}
|
||||
|
@ -46,6 +46,13 @@ type RateLimiterConfig struct {
|
||||
CacheExpiration time.Duration `help:"how long to cache the projects limiter." releaseDefault:"10m" devDefault:"10s"`
|
||||
}
|
||||
|
||||
// ProjectLimitConfig is a configuration struct for default project limits
|
||||
type ProjectLimitConfig struct {
|
||||
MaxBuckets int `help:"max bucket count for a project." default:"100"`
|
||||
DefaultMaxUsage memory.Size `help:"the default storage usage limit" releaseDefault:"50GB" devDefault:"200GB"`
|
||||
DefaultMaxBandwidth memory.Size `help:"the default bandwidth usage limit" releaseDefault:"50GB" devDefault:"200GB"`
|
||||
}
|
||||
|
||||
// Config is a configuration struct that is everything you need to start a metainfo
|
||||
type Config struct {
|
||||
DatabaseURL string `help:"the database connection string to use" default:"postgres://"`
|
||||
@ -58,6 +65,7 @@ type Config struct {
|
||||
RS RSConfig `help:"redundancy scheme configuration"`
|
||||
Loop LoopConfig `help:"loop configuration"`
|
||||
RateLimiter RateLimiterConfig `help:"rate limiter configuration"`
|
||||
ProjectLimits ProjectLimitConfig `help:"project limit configuration"`
|
||||
PieceDeletion piecedeletion.Config `help:"piece deletion configuration"`
|
||||
}
|
||||
|
||||
|
@ -25,4 +25,6 @@ type BucketsDB interface {
|
||||
DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)
|
||||
// List returns all buckets for a project
|
||||
ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error)
|
||||
// CountBuckets returns the number of buckets a project currently has
|
||||
CountBuckets(ctx context.Context, projectID uuid.UUID) (int, error)
|
||||
}
|
||||
|
@ -49,6 +49,10 @@ func TestBasicBucketOperations(t *testing.T) {
|
||||
bucketsDB := db.Buckets()
|
||||
expectedBucket := newTestBucket("testbucket", project.ID)
|
||||
|
||||
count, err := bucketsDB.CountBuckets(ctx, project.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, count)
|
||||
|
||||
// CreateBucket
|
||||
_, err = bucketsDB.CreateBucket(ctx, expectedBucket)
|
||||
require.NoError(t, err)
|
||||
@ -64,6 +68,16 @@ func TestBasicBucketOperations(t *testing.T) {
|
||||
require.Equal(t, expectedBucket.DefaultRedundancyScheme, bucket.DefaultRedundancyScheme)
|
||||
require.Equal(t, expectedBucket.DefaultEncryptionParameters, bucket.DefaultEncryptionParameters)
|
||||
|
||||
//CountBuckets
|
||||
count, err = bucketsDB.CountBuckets(ctx, project.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, count)
|
||||
_, err = bucketsDB.CreateBucket(ctx, newTestBucket("testbucket2", project.ID))
|
||||
require.NoError(t, err)
|
||||
count, err = bucketsDB.CountBuckets(ctx, project.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, count)
|
||||
|
||||
// DeleteBucket
|
||||
err = bucketsDB.DeleteBucket(ctx, []byte("testbucket"), project.ID)
|
||||
require.NoError(t, err)
|
||||
|
@ -55,7 +55,7 @@ var (
|
||||
ErrNodeAlreadyExists = errs.Class("metainfo error: node already exists")
|
||||
)
|
||||
|
||||
// APIKeys is api keys store methods used by endpoint
|
||||
// APIKeys is api keys store methods used by endpoint.
|
||||
//
|
||||
// architecture: Database
|
||||
type APIKeys interface {
|
||||
@ -124,7 +124,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes resources
|
||||
// Close closes resources.
|
||||
func (endpoint *Endpoint) Close() error { return nil }
|
||||
|
||||
func calculateSpaceUsed(ptr *pb.Pointer) (segmentSize, totalStored int64) {
|
||||
@ -234,7 +234,7 @@ func CreatePath(ctx context.Context, projectID uuid.UUID, segmentIndex int64, bu
|
||||
return storj.JoinPaths(entries...), nil
|
||||
}
|
||||
|
||||
// ProjectInfo returns allowed ProjectInfo for the provided API key
|
||||
// ProjectInfo returns allowed ProjectInfo for the provided API key.
|
||||
func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRequest) (_ *pb.ProjectInfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -253,7 +253,7 @@ func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRe
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBucket returns a bucket
|
||||
// GetBucket returns a bucket.
|
||||
func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetRequest) (resp *pb.BucketGetResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -275,7 +275,7 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
|
||||
}
|
||||
|
||||
// override RS to fit satellite settings
|
||||
convBucket, err := convertBucketToProto(ctx, bucket, endpoint.redundancyScheme())
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.redundancyScheme())
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
@ -285,7 +285,7 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket
|
||||
// CreateBucket creates a new bucket.
|
||||
func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreateRequest) (resp *pb.BucketCreateResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -316,14 +316,30 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
|
||||
bucket, err := convertProtoToBucket(req, keyInfo.ProjectID)
|
||||
// check if project has exceeded its allocated bucket limit
|
||||
maxBuckets, err := endpoint.projects.GetMaxBuckets(ctx, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if maxBuckets == 0 {
|
||||
maxBuckets = endpoint.config.ProjectLimits.MaxBuckets
|
||||
}
|
||||
bucketCount, err := endpoint.metainfo.CountBuckets(ctx, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bucketCount >= maxBuckets {
|
||||
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, fmt.Sprintf("number of allocated buckets (%d) exceeded", endpoint.config.ProjectLimits.MaxBuckets))
|
||||
}
|
||||
|
||||
bucketReq, err := convertProtoToBucket(req, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
bucket, err = endpoint.metainfo.CreateBucket(ctx, bucket)
|
||||
bucket, err := endpoint.metainfo.CreateBucket(ctx, bucketReq)
|
||||
if err != nil {
|
||||
endpoint.log.Error("error while creating bucket", zap.String("bucketName", bucket.Name), zap.Error(err))
|
||||
endpoint.log.Error("error while creating bucket", zap.String("bucketName", bucketReq.Name), zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create bucket")
|
||||
}
|
||||
|
||||
@ -333,7 +349,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
|
||||
}
|
||||
|
||||
// override RS to fit satellite settings
|
||||
convBucket, err := convertBucketToProto(ctx, bucket, endpoint.redundancyScheme())
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.redundancyScheme())
|
||||
if err != nil {
|
||||
endpoint.log.Error("error while converting bucket to proto", zap.String("bucketName", bucket.Name), zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create bucket")
|
||||
@ -344,7 +360,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket
|
||||
// DeleteBucket deletes a bucket.
|
||||
func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDeleteRequest) (resp *pb.BucketDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -404,7 +420,7 @@ func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDelete
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
|
||||
convBucket, err := convertBucketToProto(ctx, bucket, endpoint.redundancyScheme())
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.redundancyScheme())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -412,7 +428,7 @@ func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDelete
|
||||
return &pb.BucketDeleteResponse{Bucket: convBucket}, nil
|
||||
}
|
||||
|
||||
// ListBuckets returns buckets in a project where the bucket name matches the request cursor
|
||||
// ListBuckets returns buckets in a project where the bucket name matches the request cursor.
|
||||
func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListRequest) (resp *pb.BucketListResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
action := macaroon.Action{
|
||||
@ -455,6 +471,16 @@ func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListReq
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CountBuckets returns the number of buckets a project currently has.
|
||||
// TODO: add this to the uplink client side.
|
||||
func (endpoint *Endpoint) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
|
||||
count, err = endpoint.metainfo.CountBuckets(ctx, projectID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func getAllowedBuckets(ctx context.Context, header *pb.RequestHeader, action macaroon.Action) (_ macaroon.AllowedBuckets, err error) {
|
||||
key, err := getAPIKey(ctx, header)
|
||||
if err != nil {
|
||||
@ -508,7 +534,7 @@ func convertProtoToBucket(req *pb.BucketCreateRequest, projectID uuid.UUID) (buc
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertBucketToProto(ctx context.Context, bucket storj.Bucket, rs *pb.RedundancyScheme) (pbBucket *pb.Bucket, err error) {
|
||||
func convertBucketToProto(bucket storj.Bucket, rs *pb.RedundancyScheme) (pbBucket *pb.Bucket, err error) {
|
||||
if bucket == (storj.Bucket{}) {
|
||||
return nil, nil
|
||||
}
|
||||
@ -544,7 +570,7 @@ func convertBucketToProto(ctx context.Context, bucket storj.Bucket, rs *pb.Redun
|
||||
return pbBucket, nil
|
||||
}
|
||||
|
||||
// BeginObject begins object
|
||||
// BeginObject begins object.
|
||||
func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRequest) (resp *pb.ObjectBeginResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -614,8 +640,7 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CommitObject commits an object when all its segments have already been
|
||||
// committed.
|
||||
// CommitObject commits an object when all its segments have already been committed.
|
||||
func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommitRequest) (resp *pb.ObjectCommitResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -707,7 +732,7 @@ func (endpoint *Endpoint) commitObject(ctx context.Context, req *pb.ObjectCommit
|
||||
return &pb.ObjectCommitResponse{}, nil
|
||||
}
|
||||
|
||||
// GetObject gets single object
|
||||
// GetObject gets single object.
|
||||
func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetRequest) (resp *pb.ObjectGetResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -816,7 +841,7 @@ func (endpoint *Endpoint) getObject(ctx context.Context, projectID uuid.UUID, bu
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// ListObjects list objects according to specific parameters
|
||||
// ListObjects list objects according to specific parameters.
|
||||
func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListRequest) (resp *pb.ObjectListResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -964,7 +989,7 @@ func (endpoint *Endpoint) BeginDeleteObject(ctx context.Context, req *pb.ObjectB
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FinishDeleteObject finishes object deletion
|
||||
// FinishDeleteObject finishes object deletion.
|
||||
func (endpoint *Endpoint) FinishDeleteObject(ctx context.Context, req *pb.ObjectFinishDeleteRequest) (resp *pb.ObjectFinishDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -998,7 +1023,7 @@ func (endpoint *Endpoint) FinishDeleteObject(ctx context.Context, req *pb.Object
|
||||
return &pb.ObjectFinishDeleteResponse{}, nil
|
||||
}
|
||||
|
||||
// BeginSegment begins segment uploading
|
||||
// BeginSegment begins segment uploading.
|
||||
func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBeginRequest) (resp *pb.SegmentBeginResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1074,7 +1099,7 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CommitSegment commits segment after uploading
|
||||
// CommitSegment commits segment after uploading.
|
||||
func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentCommitRequest) (resp *pb.SegmentCommitResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1224,7 +1249,7 @@ func (endpoint *Endpoint) commitSegment(ctx context.Context, req *pb.SegmentComm
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MakeInlineSegment makes inline segment on satellite
|
||||
// MakeInlineSegment makes inline segment on satellite.
|
||||
func (endpoint *Endpoint) MakeInlineSegment(ctx context.Context, req *pb.SegmentMakeInlineRequest) (resp *pb.SegmentMakeInlineResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1315,7 +1340,7 @@ func (endpoint *Endpoint) makeInlineSegment(ctx context.Context, req *pb.Segment
|
||||
return pointer, &pb.SegmentMakeInlineResponse{}, nil
|
||||
}
|
||||
|
||||
// BeginDeleteSegment begins segment deletion process
|
||||
// BeginDeleteSegment begins segment deletion process.
|
||||
func (endpoint *Endpoint) BeginDeleteSegment(ctx context.Context, req *pb.SegmentBeginDeleteRequest) (resp *pb.SegmentBeginDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1373,7 +1398,7 @@ func (endpoint *Endpoint) BeginDeleteSegment(ctx context.Context, req *pb.Segmen
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FinishDeleteSegment finishes segment deletion process
|
||||
// FinishDeleteSegment finishes segment deletion process.
|
||||
func (endpoint *Endpoint) FinishDeleteSegment(ctx context.Context, req *pb.SegmentFinishDeleteRequest) (resp *pb.SegmentFinishDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1399,7 +1424,7 @@ func (endpoint *Endpoint) FinishDeleteSegment(ctx context.Context, req *pb.Segme
|
||||
return &pb.SegmentFinishDeleteResponse{}, nil
|
||||
}
|
||||
|
||||
// ListSegments list object segments
|
||||
// ListSegments list object segments.
|
||||
func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListRequest) (resp *pb.SegmentListResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1443,14 +1468,14 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
|
||||
if streamMeta.NumberOfSegments > 0 {
|
||||
// use unencrypted number of segments
|
||||
// TODO cleanup int32 vs int64
|
||||
return endpoint.listSegmentsFromNumberOfSegments(ctx, int32(streamMeta.NumberOfSegments), req.CursorPosition.Index, limit)
|
||||
return endpoint.listSegmentsFromNumberOfSegments(int32(streamMeta.NumberOfSegments), req.CursorPosition.Index, limit)
|
||||
}
|
||||
|
||||
// list segments by requesting each segment from cursor index to n until n segment is not found
|
||||
return endpoint.listSegmentsManually(ctx, keyInfo.ProjectID, streamID, req.CursorPosition.Index, limit)
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) listSegmentsFromNumberOfSegments(ctx context.Context, numberOfSegments, cursorIndex, limit int32) (resp *pb.SegmentListResponse, err error) {
|
||||
func (endpoint *Endpoint) listSegmentsFromNumberOfSegments(numberOfSegments, cursorIndex, limit int32) (resp *pb.SegmentListResponse, err error) {
|
||||
if numberOfSegments <= 0 {
|
||||
endpoint.log.Error(
|
||||
"Invalid number of segments; this function requires the value to be greater than 0",
|
||||
@ -1566,7 +1591,7 @@ func (endpoint *Endpoint) listSegmentsManually(ctx context.Context, projectID uu
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadSegment returns data necessary to download segment
|
||||
// DownloadSegment returns data necessary to download segment.
|
||||
func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -1734,17 +1759,17 @@ func (endpoint *Endpoint) getObjectNumberOfSegments(ctx context.Context, project
|
||||
return 0, err
|
||||
}
|
||||
|
||||
meta := &pb.StreamMeta{}
|
||||
err = pb.Unmarshal(pointer.Metadata, meta)
|
||||
metaData := &pb.StreamMeta{}
|
||||
err = pb.Unmarshal(pointer.Metadata, metaData)
|
||||
if err != nil {
|
||||
endpoint.log.Error("error unmarshaling pointer metadata", zap.Error(err))
|
||||
return 0, rpcstatus.Error(rpcstatus.Internal, "unable to unmarshal metadata")
|
||||
}
|
||||
|
||||
return meta.NumberOfSegments, nil
|
||||
return metaData.NumberOfSegments, nil
|
||||
}
|
||||
|
||||
// sortLimits sorts order limits and fill missing ones with nil values
|
||||
// sortLimits sorts order limits and fill missing ones with nil values.
|
||||
func sortLimits(limits []*pb.AddressedOrderLimit, pointer *pb.Pointer) []*pb.AddressedOrderLimit {
|
||||
sorted := make([]*pb.AddressedOrderLimit, pointer.GetRemote().GetRedundancy().GetTotal())
|
||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||
|
@ -5,6 +5,7 @@ package metainfo_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strconv"
|
||||
@ -35,6 +36,21 @@ import (
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
func TestMaxOutBuckets(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
limit := planet.Satellites[0].Config.Metainfo.ProjectLimits.MaxBuckets
|
||||
for i := 1; i <= limit; i++ {
|
||||
name := "test" + strconv.Itoa(i)
|
||||
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], name)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], fmt.Sprintf("test%d", limit+1))
|
||||
require.EqualError(t, err, fmt.Sprintf("uplink: bucket: metainfo error: number of allocated buckets (%d) exceeded", limit))
|
||||
})
|
||||
}
|
||||
|
||||
func TestRevokeAccess(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||
|
@ -400,3 +400,9 @@ func (s *Service) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.bucketsDB.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
|
||||
}
|
||||
|
||||
// CountBuckets returns the number of buckets a project currently has
|
||||
func (s *Service) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.bucketsDB.CountBuckets(ctx, projectID)
|
||||
}
|
||||
|
@ -127,3 +127,28 @@ func hasDuplicates(pieces []*pb.RemotePiece) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func TestCountBuckets(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
saPeer := planet.Satellites[0]
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
projectID := planet.Uplinks[0].Projects[0].ID
|
||||
count, err := saPeer.Metainfo.Service.CountBuckets(ctx, projectID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, count)
|
||||
// Setup: create 2 test buckets
|
||||
err = uplinkPeer.CreateBucket(ctx, saPeer, "test1")
|
||||
require.NoError(t, err)
|
||||
count, err = saPeer.Metainfo.Service.CountBuckets(ctx, projectID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, count)
|
||||
|
||||
err = uplinkPeer.CreateBucket(ctx, saPeer, "test2")
|
||||
require.NoError(t, err)
|
||||
count, err = saPeer.Metainfo.Service.CountBuckets(ctx, projectID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, count)
|
||||
})
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func (page *TransactionsPage) IDList() TransactionAndUserList {
|
||||
|
||||
// CreationTimes returns a map of creation times of page's transactions.
|
||||
func (page *TransactionsPage) CreationTimes() map[coinpayments.TransactionID]time.Time {
|
||||
var creationTimes map[coinpayments.TransactionID]time.Time
|
||||
creationTimes := make(map[coinpayments.TransactionID]time.Time)
|
||||
for _, tx := range page.Transactions {
|
||||
creationTimes[tx.ID] = tx.CreatedAt
|
||||
}
|
||||
|
@ -19,12 +19,12 @@ type bucketsDB struct {
|
||||
db *satelliteDB
|
||||
}
|
||||
|
||||
// Buckets returns database for interacting with buckets
|
||||
// Buckets returns database for interacting with buckets.
|
||||
func (db *satelliteDB) Buckets() metainfo.BucketsDB {
|
||||
return &bucketsDB{db: db}
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket
|
||||
// CreateBucket creates a new bucket.
|
||||
func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -62,7 +62,7 @@ func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ s
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
// GetBucket returns a bucket
|
||||
// GetBucket returns a bucket.
|
||||
func (db *bucketsDB) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
dbxBucket, err := db.db.Get_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
||||
@ -78,7 +78,7 @@ func (db *bucketsDB) GetBucket(ctx context.Context, bucketName []byte, projectID
|
||||
return convertDBXtoBucket(dbxBucket)
|
||||
}
|
||||
|
||||
// UpdateBucket upates a bucket
|
||||
// UpdateBucket updates a bucket.
|
||||
func (db *bucketsDB) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -96,7 +96,7 @@ func (db *bucketsDB) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ s
|
||||
return convertDBXtoBucket(dbxBucket)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket
|
||||
// DeleteBucket deletes a bucket.
|
||||
func (db *bucketsDB) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
deleted, err := db.db.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx,
|
||||
@ -112,7 +112,7 @@ func (db *bucketsDB) DeleteBucket(ctx context.Context, bucketName []byte, projec
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListBuckets returns a list of buckets for a project
|
||||
// ListBuckets returns a list of buckets for a project.
|
||||
func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -189,6 +189,15 @@ func (db *bucketsDB) ListBuckets(ctx context.Context, projectID uuid.UUID, listO
|
||||
return bucketList, nil
|
||||
}
|
||||
|
||||
// CountBuckets returns the number of buckets a project currently has.
|
||||
func (db *bucketsDB) CountBuckets(ctx context.Context, projectID uuid.UUID) (count int, err error) {
|
||||
count64, err := db.db.Count_BucketMetainfo_Name_By_ProjectId(ctx, dbx.BucketMetainfo_ProjectId(projectID[:]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return int(count64), nil
|
||||
}
|
||||
|
||||
func convertDBXtoBucket(dbxBucket *dbx.BucketMetainfo) (bucket storj.Bucket, err error) {
|
||||
id, err := uuid.FromBytes(dbxBucket.Id)
|
||||
if err != nil {
|
||||
|
@ -310,6 +310,7 @@ model project (
|
||||
field usage_limit int64 ( updatable, default 0 )
|
||||
field bandwidth_limit int64 ( updatable, default 0 )
|
||||
field rate_limit int ( nullable, updatable )
|
||||
field max_buckets int ( updatable, default 0 )
|
||||
field partner_id blob ( nullable )
|
||||
field owner_id blob
|
||||
|
||||
@ -332,6 +333,11 @@ read one (
|
||||
select project.bandwidth_limit
|
||||
where project.id = ?
|
||||
)
|
||||
read one (
|
||||
select project.max_buckets
|
||||
where project.id = ?
|
||||
)
|
||||
|
||||
read all (
|
||||
select project
|
||||
)
|
||||
@ -927,7 +933,8 @@ create user_credit ()
|
||||
|
||||
model bucket_metainfo (
|
||||
key id
|
||||
unique name project_id
|
||||
unique name project_id //to remove later
|
||||
unique project_id name
|
||||
|
||||
field id blob
|
||||
field project_id project.id restrict
|
||||
@ -982,6 +989,11 @@ read limitoffset ( // After
|
||||
orderby asc bucket_metainfo.name
|
||||
)
|
||||
|
||||
read count (
|
||||
select bucket_metainfo.name
|
||||
where bucket_metainfo.project_id = ?
|
||||
)
|
||||
|
||||
//--- graceful exit progress ---//
|
||||
|
||||
model graceful_exit_progress (
|
||||
|
@ -515,6 +515,7 @@ CREATE TABLE projects (
|
||||
usage_limit bigint NOT NULL DEFAULT 0,
|
||||
bandwidth_limit bigint NOT NULL DEFAULT 0,
|
||||
rate_limit integer,
|
||||
max_buckets integer NOT NULL DEFAULT 0,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
@ -686,7 +687,8 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id )
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
@ -1046,6 +1048,7 @@ CREATE TABLE projects (
|
||||
usage_limit bigint NOT NULL DEFAULT 0,
|
||||
bandwidth_limit bigint NOT NULL DEFAULT 0,
|
||||
rate_limit integer,
|
||||
max_buckets integer NOT NULL DEFAULT 0,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
@ -1217,7 +1220,8 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id )
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
@ -5314,6 +5318,7 @@ type Project struct {
|
||||
UsageLimit int64
|
||||
BandwidthLimit int64
|
||||
RateLimit *int
|
||||
MaxBuckets int
|
||||
PartnerId []byte
|
||||
OwnerId []byte
|
||||
CreatedAt time.Time
|
||||
@ -5325,6 +5330,7 @@ type Project_Create_Fields struct {
|
||||
UsageLimit Project_UsageLimit_Field
|
||||
BandwidthLimit Project_BandwidthLimit_Field
|
||||
RateLimit Project_RateLimit_Field
|
||||
MaxBuckets Project_MaxBuckets_Field
|
||||
PartnerId Project_PartnerId_Field
|
||||
}
|
||||
|
||||
@ -5334,6 +5340,7 @@ type Project_Update_Fields struct {
|
||||
UsageLimit Project_UsageLimit_Field
|
||||
BandwidthLimit Project_BandwidthLimit_Field
|
||||
RateLimit Project_RateLimit_Field
|
||||
MaxBuckets Project_MaxBuckets_Field
|
||||
}
|
||||
|
||||
type Project_Id_Field struct {
|
||||
@ -5463,6 +5470,25 @@ func (f Project_RateLimit_Field) value() interface{} {
|
||||
|
||||
func (Project_RateLimit_Field) _Column() string { return "rate_limit" }
|
||||
|
||||
type Project_MaxBuckets_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value int
|
||||
}
|
||||
|
||||
func Project_MaxBuckets(v int) Project_MaxBuckets_Field {
|
||||
return Project_MaxBuckets_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f Project_MaxBuckets_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (Project_MaxBuckets_Field) _Column() string { return "max_buckets" }
|
||||
|
||||
type Project_PartnerId_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
@ -9002,6 +9028,10 @@ type LeafSerialNumber_Row struct {
|
||||
LeafSerialNumber []byte
|
||||
}
|
||||
|
||||
type MaxBuckets_Row struct {
|
||||
MaxBuckets int
|
||||
}
|
||||
|
||||
type Paged_PendingSerialQueue_Continuation struct {
|
||||
_value_storage_node_id []byte
|
||||
_value_bucket_id []byte
|
||||
@ -9453,7 +9483,7 @@ func (obj *pgxImpl) Create_Project(ctx context.Context,
|
||||
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?")}
|
||||
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, __id_val, __name_val, __description_val, __rate_limit_val, __partner_id_val, __owner_id_val, __created_at_val)
|
||||
@ -9473,6 +9503,12 @@ func (obj *pgxImpl) Create_Project(ctx context.Context,
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if optional.MaxBuckets._set {
|
||||
__values = append(__values, optional.MaxBuckets.value())
|
||||
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("max_buckets"))
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if len(__optional_columns.SQLs) == 0 {
|
||||
if __columns.SQL == nil {
|
||||
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
||||
@ -9485,7 +9521,7 @@ func (obj *pgxImpl) Create_Project(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -11059,7 +11095,7 @@ func (obj *pgxImpl) Get_Project_By_Id(ctx context.Context,
|
||||
project *Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_id.value())
|
||||
@ -11068,7 +11104,7 @@ func (obj *pgxImpl) Get_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return (*Project)(nil), obj.makeErr(err)
|
||||
}
|
||||
@ -11120,11 +11156,33 @@ func (obj *pgxImpl) Get_Project_BandwidthLimit_By_Id(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
row *MaxBuckets_Row, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.max_buckets FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
row = &MaxBuckets_Row{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&row.MaxBuckets)
|
||||
if err != nil {
|
||||
return (*MaxBuckets_Row)(nil), obj.makeErr(err)
|
||||
}
|
||||
return row, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) All_Project(ctx context.Context) (
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
||||
|
||||
var __values []interface{}
|
||||
|
||||
@ -11139,7 +11197,7 @@ func (obj *pgxImpl) All_Project(ctx context.Context) (
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -11157,7 +11215,7 @@ func (obj *pgxImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx cont
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_created_at_less.value())
|
||||
@ -11173,7 +11231,7 @@ func (obj *pgxImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx cont
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -11191,7 +11249,7 @@ func (obj *pgxImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Con
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_owner_id.value())
|
||||
@ -11207,7 +11265,7 @@ func (obj *pgxImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Con
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -11225,7 +11283,7 @@ func (obj *pgxImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Na
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_member_member_id.value())
|
||||
@ -11241,7 +11299,7 @@ func (obj *pgxImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Na
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -11260,7 +11318,7 @@ func (obj *pgxImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_created_at_less.value())
|
||||
@ -11278,7 +11336,7 @@ func (obj *pgxImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -12372,6 +12430,28 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
||||
count int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT COUNT(*) FROM bucket_metainfos WHERE bucket_metainfos.project_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_metainfo_project_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
||||
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
||||
graceful_exit_progress *GracefulExitProgress, err error) {
|
||||
@ -13762,7 +13842,7 @@ func (obj *pgxImpl) Update_Project_By_Id(ctx context.Context,
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
@ -13793,6 +13873,11 @@ func (obj *pgxImpl) Update_Project_By_Id(ctx context.Context,
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("rate_limit = ?"))
|
||||
}
|
||||
|
||||
if update.MaxBuckets._set {
|
||||
__values = append(__values, update.MaxBuckets.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("max_buckets = ?"))
|
||||
}
|
||||
|
||||
if len(__sets_sql.SQLs) == 0 {
|
||||
return nil, emptyUpdate()
|
||||
}
|
||||
@ -13806,7 +13891,7 @@ func (obj *pgxImpl) Update_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -15964,7 +16049,7 @@ func (obj *pgxcockroachImpl) Create_Project(ctx context.Context,
|
||||
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?")}
|
||||
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, __id_val, __name_val, __description_val, __rate_limit_val, __partner_id_val, __owner_id_val, __created_at_val)
|
||||
@ -15984,6 +16069,12 @@ func (obj *pgxcockroachImpl) Create_Project(ctx context.Context,
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if optional.MaxBuckets._set {
|
||||
__values = append(__values, optional.MaxBuckets.value())
|
||||
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("max_buckets"))
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if len(__optional_columns.SQLs) == 0 {
|
||||
if __columns.SQL == nil {
|
||||
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
||||
@ -15996,7 +16087,7 @@ func (obj *pgxcockroachImpl) Create_Project(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -17570,7 +17661,7 @@ func (obj *pgxcockroachImpl) Get_Project_By_Id(ctx context.Context,
|
||||
project *Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_id.value())
|
||||
@ -17579,7 +17670,7 @@ func (obj *pgxcockroachImpl) Get_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return (*Project)(nil), obj.makeErr(err)
|
||||
}
|
||||
@ -17631,11 +17722,33 @@ func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_By_Id(ctx context.Contex
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
row *MaxBuckets_Row, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.max_buckets FROM projects WHERE projects.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
row = &MaxBuckets_Row{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&row.MaxBuckets)
|
||||
if err != nil {
|
||||
return (*MaxBuckets_Row)(nil), obj.makeErr(err)
|
||||
}
|
||||
return row, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) All_Project(ctx context.Context) (
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
||||
|
||||
var __values []interface{}
|
||||
|
||||
@ -17650,7 +17763,7 @@ func (obj *pgxcockroachImpl) All_Project(ctx context.Context) (
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -17668,7 +17781,7 @@ func (obj *pgxcockroachImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_created_at_less.value())
|
||||
@ -17684,7 +17797,7 @@ func (obj *pgxcockroachImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -17702,7 +17815,7 @@ func (obj *pgxcockroachImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx co
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_owner_id.value())
|
||||
@ -17718,7 +17831,7 @@ func (obj *pgxcockroachImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx co
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -17736,7 +17849,7 @@ func (obj *pgxcockroachImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_P
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_member_member_id.value())
|
||||
@ -17752,7 +17865,7 @@ func (obj *pgxcockroachImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_P
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -17771,7 +17884,7 @@ func (obj *pgxcockroachImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_Creat
|
||||
rows []*Project, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, project_created_at_less.value())
|
||||
@ -17789,7 +17902,7 @@ func (obj *pgxcockroachImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_Creat
|
||||
|
||||
for __rows.Next() {
|
||||
project := &Project{}
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -18883,6 +18996,28 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
||||
count int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT COUNT(*) FROM bucket_metainfos WHERE bucket_metainfos.project_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, bucket_metainfo_project_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, obj.makeErr(err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
||||
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
||||
graceful_exit_progress *GracefulExitProgress, err error) {
|
||||
@ -20273,7 +20408,7 @@ func (obj *pgxcockroachImpl) Update_Project_By_Id(ctx context.Context,
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
@ -20304,6 +20439,11 @@ func (obj *pgxcockroachImpl) Update_Project_By_Id(ctx context.Context,
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("rate_limit = ?"))
|
||||
}
|
||||
|
||||
if update.MaxBuckets._set {
|
||||
__values = append(__values, update.MaxBuckets.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("max_buckets = ?"))
|
||||
}
|
||||
|
||||
if len(__sets_sql.SQLs) == 0 {
|
||||
return nil, emptyUpdate()
|
||||
}
|
||||
@ -20317,7 +20457,7 @@ func (obj *pgxcockroachImpl) Update_Project_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
project = &Project{}
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -22343,6 +22483,16 @@ func (rx *Rx) All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCe
|
||||
return tx.All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx, user_credit_user_id, user_credit_expires_at_greater)
|
||||
}
|
||||
|
||||
func (rx *Rx) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
||||
count int64, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Count_BucketMetainfo_Name_By_ProjectId(ctx, bucket_metainfo_project_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Count_UserCredit_By_ReferredBy(ctx context.Context,
|
||||
user_credit_referred_by UserCredit_ReferredBy_Field) (
|
||||
count int64, err error) {
|
||||
@ -23396,6 +23546,16 @@ func (rx *Rx) Get_Project_By_Id(ctx context.Context,
|
||||
return tx.Get_Project_By_Id(ctx, project_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
row *MaxBuckets_Row, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Get_Project_MaxBuckets_By_Id(ctx, project_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
row *UsageLimit_Row, err error) {
|
||||
@ -24094,6 +24254,10 @@ type Methods interface {
|
||||
user_credit_expires_at_greater UserCredit_ExpiresAt_Field) (
|
||||
rows []*UserCredit, err error)
|
||||
|
||||
Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
||||
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
||||
count int64, err error)
|
||||
|
||||
Count_UserCredit_By_ReferredBy(ctx context.Context,
|
||||
user_credit_referred_by UserCredit_ReferredBy_Field) (
|
||||
count int64, err error)
|
||||
@ -24601,6 +24765,10 @@ type Methods interface {
|
||||
project_id Project_Id_Field) (
|
||||
project *Project, err error)
|
||||
|
||||
Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
row *MaxBuckets_Row, err error)
|
||||
|
||||
Get_Project_UsageLimit_By_Id(ctx context.Context,
|
||||
project_id Project_Id_Field) (
|
||||
row *UsageLimit_Row, err error)
|
||||
|
@ -242,6 +242,7 @@ CREATE TABLE projects (
|
||||
usage_limit bigint NOT NULL DEFAULT 0,
|
||||
bandwidth_limit bigint NOT NULL DEFAULT 0,
|
||||
rate_limit integer,
|
||||
max_buckets integer NOT NULL DEFAULT 0,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
@ -413,7 +414,8 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id )
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
|
@ -242,6 +242,7 @@ CREATE TABLE projects (
|
||||
usage_limit bigint NOT NULL DEFAULT 0,
|
||||
bandwidth_limit bigint NOT NULL DEFAULT 0,
|
||||
rate_limit integer,
|
||||
max_buckets integer NOT NULL DEFAULT 0,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
@ -413,7 +414,8 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id )
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
|
@ -154,7 +154,7 @@ func flattenMigration(m *migrate.Migration) (*migrate.Migration, error) {
|
||||
var db tagsql.DB
|
||||
var version int
|
||||
var statements migrate.SQL
|
||||
var steps = []*migrate.Step{}
|
||||
var steps []*migrate.Step
|
||||
|
||||
pushMerged := func() {
|
||||
if len(statements) == 0 {
|
||||
@ -1219,6 +1219,16 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
|
||||
);
|
||||
`},
|
||||
},
|
||||
{
|
||||
DB: db.DB,
|
||||
Description: "add max_buckets field to projects and an implicit index on bucket_metainfos project_id,name",
|
||||
SeparateTx: true,
|
||||
Version: 118,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE projects ADD COLUMN max_buckets INTEGER NOT NULL DEFAULT 0;`,
|
||||
`ALTER TABLE bucket_metainfos ADD UNIQUE (project_id, name);`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func (projects *projects) Insert(ctx context.Context, project *console.Project)
|
||||
createFields.PartnerId = dbx.Project_PartnerId(project.PartnerID[:])
|
||||
}
|
||||
createFields.RateLimit = dbx.Project_RateLimit_Raw(project.RateLimit)
|
||||
createFields.UsageLimit = dbx.Project_UsageLimit(0)
|
||||
createFields.MaxBuckets = dbx.Project_MaxBuckets(project.MaxBuckets)
|
||||
|
||||
createdProject, err := projects.db.Create_Project(ctx,
|
||||
dbx.Project_Id(projectID[:]),
|
||||
@ -221,6 +221,7 @@ func projectFromDBX(ctx context.Context, project *dbx.Project) (_ *console.Proje
|
||||
PartnerID: partnerID,
|
||||
OwnerID: ownerID,
|
||||
RateLimit: project.RateLimit,
|
||||
MaxBuckets: project.MaxBuckets,
|
||||
CreatedAt: project.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
@ -245,3 +246,14 @@ func projectsFromDbxSlice(ctx context.Context, projectsDbx []*dbx.Project) (_ []
|
||||
|
||||
return projects, errs.Combine(errors...)
|
||||
}
|
||||
|
||||
// GetMaxBuckets is a method to get the maximum number of buckets allowed for the project
|
||||
func (projects *projects) GetMaxBuckets(ctx context.Context, id uuid.UUID) (maxBuckets int, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
dbxRow, err := projects.db.Get_Project_MaxBuckets_By_Id(ctx, dbx.Project_Id(id[:]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return dbxRow.MaxBuckets, nil
|
||||
}
|
||||
|
588
satellite/satellitedb/testdata/postgres.v118.sql
vendored
Normal file
588
satellite/satellitedb/testdata/postgres.v118.sql
vendored
Normal file
@ -0,0 +1,588 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE credits (
|
||||
user_id bytea NOT NULL,
|
||||
transaction_id text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( transaction_id )
|
||||
);
|
||||
CREATE TABLE credits_spendings (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
num_healthy_pieces integer NOT NULL DEFAULT 52,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE nodes_offline_times (
|
||||
node_id bytea NOT NULL,
|
||||
tracked_at timestamp with time zone NOT NULL,
|
||||
seconds integer NOT NULL,
|
||||
PRIMARY KEY ( node_id, tracked_at )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL DEFAULT 0,
|
||||
bandwidth_limit bigint NOT NULL DEFAULT 0,
|
||||
rate_limit integer,
|
||||
max_buckets integer NOT NULL DEFAULT 0,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
invoice_id bytea NOT NULL,
|
||||
start_date timestamp with time zone NOT NULL,
|
||||
end_date timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, start_date, end_date ),
|
||||
UNIQUE ( invoice_id )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00');
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', 0, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "credits" ("user_id", "transaction_id", "amount", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'transactionID', 10, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "credits_spendings" ("id", "user_id", "project_id", "amount", "status", "period", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\275|\\342N\\347\\014'::bytea, E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 0, 'epoch', '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('0', '\x0a0130120100', 52);
|
||||
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 30);
|
||||
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 51);
|
||||
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 40);
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', 0, 0, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
-- NEW DATA --
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', 0, 0, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
15
scripts/testdata/satellite-config.yaml.lock
vendored
15
scripts/testdata/satellite-config.yaml.lock
vendored
@ -358,6 +358,15 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# timeout for a single delete request
|
||||
# metainfo.piece-deletion.request-timeout: 1m0s
|
||||
|
||||
# the default bandwidth usage limit
|
||||
# metainfo.project-limits.default-max-bandwidth: 50.0 GB
|
||||
|
||||
# the default storage usage limit
|
||||
# metainfo.project-limits.default-max-usage: 50.0 GB
|
||||
|
||||
# max bucket count for a project.
|
||||
# metainfo.project-limits.max-buckets: 100
|
||||
|
||||
# number of projects to cache.
|
||||
# metainfo.rate-limiter.cache-capacity: 10000
|
||||
|
||||
@ -583,12 +592,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# default queue batch size
|
||||
# reported-rollup.queue-batch-size: 10000
|
||||
|
||||
# the default bandwidth usage limit
|
||||
# rollup.default-max-bandwidth: 50.0 GB
|
||||
|
||||
# the default storage usage limit
|
||||
# rollup.default-max-usage: 50.0 GB
|
||||
|
||||
# option for deleting tallies after they are rolled up
|
||||
# rollup.delete-tallies: true
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user