diff --git a/satellite/admin/project_test.go b/satellite/admin/project_test.go index 2ca69e0b5..59a3f5a49 100644 --- a/satellite/admin/project_test.go +++ b/satellite/admin/project_test.go @@ -45,7 +45,7 @@ func TestProjectGet(t *testing.T) { t.Run("OK", func(t *testing.T) { link := "http://" + address.String() + "/api/projects/" + project.ID.String() expected := fmt.Sprintf( - `{"id":"%s","name":"%s","description":"%s","partnerId":"%s","userAgent":null,"ownerId":"%s","rateLimit":null,"burstLimit":null,"maxBuckets":null,"createdAt":"%s","memberCount":0,"storageLimit":"25.00 GB","bandwidthLimit":"25.00 GB"}`, + `{"id":"%s","name":"%s","description":"%s","partnerId":"%s","userAgent":null,"ownerId":"%s","rateLimit":null,"burstLimit":null,"maxBuckets":null,"createdAt":"%s","memberCount":0,"storageLimit":"25.00 GB","bandwidthLimit":"25.00 GB","segmentLimit":140000}`, project.ID.String(), project.Name, project.Description, diff --git a/satellite/api.go b/satellite/api.go index 7b82bb346..28c53dc2d 100644 --- a/satellite/api.go +++ b/satellite/api.go @@ -316,7 +316,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, peer.ProjectLimits.Cache = accounting.NewProjectLimitCache(peer.DB.ProjectAccounting(), config.Console.Config.UsageLimits.Storage.Free, config.Console.Config.UsageLimits.Bandwidth.Free, - 1000000, // TODO this will be correctly populated with up coming change + config.Console.Config.UsageLimits.Segment.Free, config.ProjectLimit, ) } diff --git a/satellite/console/projects.go b/satellite/console/projects.go index cf03861c5..01c76ae6d 100644 --- a/satellite/console/projects.go +++ b/satellite/console/projects.go @@ -53,6 +53,7 @@ type Projects interface { type UsageLimitsConfig struct { Storage StorageLimitConfig Bandwidth BandwidthLimitConfig + Segment SegmentLimitConfig } // StorageLimitConfig is a configuration struct for default storage per-project usage limits. @@ -67,6 +68,12 @@ type BandwidthLimitConfig struct { Paid memory.Size `help:"the default paid-tier bandwidth usage limit" default:"100.00TB" testDefault:"25.00 GB"` } +// SegmentLimitConfig is a configuration struct for default segments per-project usage limits. +type SegmentLimitConfig struct { + Free int64 `help:"the default free-tier segment usage limit" default:"140000"` + Paid int64 `help:"the default paid-tier segment usage limit" default:"1000000"` +} + // Project is a database object that describes Project entity. type Project struct { ID uuid.UUID `json:"id"` @@ -83,6 +90,7 @@ type Project struct { MemberCount int `json:"memberCount"` StorageLimit *memory.Size `json:"storageLimit"` BandwidthLimit *memory.Size `json:"bandwidthLimit"` + SegmentLimit *int64 `json:"segmentLimit"` } // ProjectInfo holds data needed to create/update Project. @@ -91,6 +99,7 @@ type ProjectInfo struct { Description string `json:"description"` StorageLimit memory.Size `json:"project specific storage limit"` BandwidthLimit memory.Size `json:"project specific bandwidth limit"` + SegmentLimit int64 `json:"project specific segment limit"` CreatedAt time.Time `json:"createdAt"` } diff --git a/satellite/console/projectusagelimits.go b/satellite/console/projectusagelimits.go index 0e0b37ce8..257fb11ca 100644 --- a/satellite/console/projectusagelimits.go +++ b/satellite/console/projectusagelimits.go @@ -15,8 +15,9 @@ type ProjectUsageLimits struct { SegmentCount int64 `json:"segmentCount"` } -// UserProjectLimits holds a users storage and bandwidth limits for new projects. +// UserProjectLimits holds a users storage, bandwidth, and segment limits for new projects. type UserProjectLimits struct { BandwidthLimit memory.Size `json:"bandwidthLimit"` StorageLimit memory.Size `json:"storageUsed"` + SegmentLimit int64 `json:"segmentLimit"` } diff --git a/satellite/console/service.go b/satellite/console/service.go index 5ac468f72..9fe63d03f 100644 --- a/satellite/console/service.go +++ b/satellite/console/service.go @@ -271,7 +271,9 @@ func (paymentService PaymentsService) AddCreditCard(ctx context.Context, creditC // put this user into the paid tier and convert projects to upgraded limits. err = paymentService.service.store.Users().UpdatePaidTier(ctx, auth.User.ID, true, paymentService.service.config.UsageLimits.Bandwidth.Paid, - paymentService.service.config.UsageLimits.Storage.Paid) + paymentService.service.config.UsageLimits.Storage.Paid, + paymentService.service.config.UsageLimits.Segment.Paid, + ) if err != nil { return Error.Wrap(err) } @@ -289,6 +291,9 @@ func (paymentService PaymentsService) AddCreditCard(ctx context.Context, creditC project.BandwidthLimit = new(memory.Size) *project.BandwidthLimit = paymentService.service.config.UsageLimits.Bandwidth.Paid } + if project.SegmentLimit == nil || *project.SegmentLimit < paymentService.service.config.UsageLimits.Segment.Paid { + *project.SegmentLimit = paymentService.service.config.UsageLimits.Segment.Paid + } err = paymentService.service.store.Projects().Update(ctx, &project) if err != nil { return Error.Wrap(err) @@ -640,6 +645,7 @@ func (s *Service) CreateUser(ctx context.Context, user CreateUser, tokenSecret R // TODO: move the project limits into the registration token. newUser.ProjectStorageLimit = s.config.UsageLimits.Storage.Free.Int64() newUser.ProjectBandwidthLimit = s.config.UsageLimits.Bandwidth.Free.Int64() + newUser.ProjectSegmentLimit = s.config.UsageLimits.Segment.Free u, err = tx.Users().Insert(ctx, newUser, @@ -1124,6 +1130,7 @@ func (s *Service) CreateProject(ctx context.Context, projectInfo ProjectInfo) (p UserAgent: auth.User.UserAgent, StorageLimit: &newProjectLimits.StorageLimit, BandwidthLimit: &newProjectLimits.BandwidthLimit, + SegmentLimit: &newProjectLimits.SegmentLimit, }, ) if err != nil { @@ -1204,7 +1211,10 @@ func (s *Service) UpdateProject(ctx context.Context, projectID uuid.UUID, projec if project.StorageLimit != nil && *project.StorageLimit == 0 { return nil, Error.New("current storage limit for project is set to 0 (updating disabled)") } - if projectInfo.StorageLimit <= 0 || projectInfo.BandwidthLimit <= 0 { + if project.SegmentLimit != nil && *project.SegmentLimit == 0 { + return nil, Error.New("current segment limit for project is set to 0 (updating disabled)") + } + if projectInfo.StorageLimit <= 0 || projectInfo.BandwidthLimit <= 0 || projectInfo.SegmentLimit <= 0 { return nil, Error.New("project limits must be greater than 0") } @@ -1216,6 +1226,10 @@ func (s *Service) UpdateProject(ctx context.Context, projectID uuid.UUID, projec return nil, Error.New("specified bandwidth limit exceeds allowed maximum for current tier") } + if projectInfo.SegmentLimit > s.config.UsageLimits.Segment.Paid { + return nil, Error.New("specified segment limit exceeds allowed maximum for current tier") + } + storageUsed, err := s.projectUsage.GetProjectStorageTotals(ctx, projectID) if err != nil { return nil, Error.Wrap(err) @@ -1236,6 +1250,7 @@ func (s *Service) UpdateProject(ctx context.Context, projectID uuid.UUID, projec *project.StorageLimit = projectInfo.StorageLimit project.BandwidthLimit = new(memory.Size) *project.BandwidthLimit = projectInfo.BandwidthLimit + *project.SegmentLimit = projectInfo.SegmentLimit } err = s.store.Projects().Update(ctx, project) @@ -1834,6 +1849,7 @@ func (s *Service) getUserProjectLimits(ctx context.Context, userID uuid.UUID) (_ return &UserProjectLimits{ StorageLimit: result.ProjectStorageLimit, BandwidthLimit: result.ProjectBandwidthLimit, + SegmentLimit: result.ProjectSegmentLimit, }, nil } diff --git a/satellite/console/service_test.go b/satellite/console/service_test.go index cddf9d32a..f43970381 100644 --- a/satellite/console/service_test.go +++ b/satellite/console/service_test.go @@ -64,6 +64,7 @@ func TestService(t *testing.T) { updatedDescription := "newDescription" updatedStorageLimit := memory.Size(100) updatedBandwidthLimit := memory.Size(100) + updatedSegmentLimit := int64(100) // user should be in free tier user, err := service.GetUser(ctx, up1Pro1.OwnerID) @@ -85,6 +86,7 @@ func TestService(t *testing.T) { Description: updatedDescription, StorageLimit: updatedStorageLimit, BandwidthLimit: updatedBandwidthLimit, + SegmentLimit: updatedSegmentLimit, }) require.NoError(t, err) require.NotEqual(t, up1Pro1.Name, updatedProject.Name) @@ -95,6 +97,8 @@ func TestService(t *testing.T) { require.Equal(t, updatedStorageLimit, *updatedProject.StorageLimit) require.NotEqual(t, *up1Pro1.BandwidthLimit, *updatedProject.BandwidthLimit) require.Equal(t, updatedBandwidthLimit, *updatedProject.BandwidthLimit) + require.NotEqual(t, *up1Pro1.SegmentLimit, *updatedProject.SegmentLimit) + require.Equal(t, updatedSegmentLimit, *updatedProject.SegmentLimit) // Updating someone else project details should not work updatedProject, err = service.UpdateProject(authCtx1, up2Pro1.ID, console.ProjectInfo{ @@ -102,6 +106,7 @@ func TestService(t *testing.T) { Description: "TestUpdate", StorageLimit: memory.Size(100), BandwidthLimit: memory.Size(100), + SegmentLimit: 100, }) require.Error(t, err) require.Nil(t, updatedProject) @@ -113,6 +118,7 @@ func TestService(t *testing.T) { *size100 = memory.Size(100) up1Pro1.StorageLimit = size0 + up1Pro1.SegmentLimit = (*int64)(size0) err = sat.DB.Console().Projects().Update(ctx, up1Pro1) require.NoError(t, err) @@ -121,6 +127,7 @@ func TestService(t *testing.T) { Description: "1 2 3", StorageLimit: memory.Size(123), BandwidthLimit: memory.Size(123), + SegmentLimit: 123, } updatedProject, err = service.UpdateProject(authCtx1, up1Pro1.ID, updateInfo) require.Error(t, err) @@ -128,6 +135,7 @@ func TestService(t *testing.T) { up1Pro1.StorageLimit = size100 up1Pro1.BandwidthLimit = size0 + err = sat.DB.Console().Projects().Update(ctx, up1Pro1) require.NoError(t, err) @@ -137,6 +145,7 @@ func TestService(t *testing.T) { up1Pro1.StorageLimit = size100 up1Pro1.BandwidthLimit = size100 + up1Pro1.SegmentLimit = (*int64)(size100) err = sat.DB.Console().Projects().Update(ctx, up1Pro1) require.NoError(t, err) @@ -148,6 +157,7 @@ func TestService(t *testing.T) { require.NotNil(t, updatedProject.BandwidthLimit) require.Equal(t, updateInfo.StorageLimit, *updatedProject.StorageLimit) require.Equal(t, updateInfo.BandwidthLimit, *updatedProject.BandwidthLimit) + require.Equal(t, updateInfo.SegmentLimit, *updatedProject.SegmentLimit) }) t.Run("TestAddProjectMembers", func(t *testing.T) { @@ -307,6 +317,10 @@ func TestPaidTier(t *testing.T) { Free: 2 * memory.GB, Paid: 2 * memory.TB, }, + Segment: console.SegmentLimitConfig{ + Free: 10, + Paid: 50, + }, } testplanet.Run(t, testplanet.Config{ @@ -325,6 +339,7 @@ func TestPaidTier(t *testing.T) { require.NoError(t, err) require.Equal(t, usageConfig.Storage.Free, *proj1.StorageLimit) require.Equal(t, usageConfig.Bandwidth.Free, *proj1.BandwidthLimit) + require.Equal(t, usageConfig.Segment.Free, *proj1.SegmentLimit) // user should be in free tier user, err := service.GetUser(ctx, proj1.OwnerID) @@ -352,12 +367,13 @@ func TestPaidTier(t *testing.T) { require.NoError(t, err) require.Equal(t, usageConfig.Storage.Paid, *proj1.StorageLimit) require.Equal(t, usageConfig.Bandwidth.Paid, *proj1.BandwidthLimit) + require.Equal(t, usageConfig.Segment.Paid, *proj1.SegmentLimit) // expect new project to be created with paid tier usage limits proj2, err := service.CreateProject(authCtx, console.ProjectInfo{Name: "Project 2"}) require.NoError(t, err) require.Equal(t, usageConfig.Storage.Paid, *proj2.StorageLimit) - require.Equal(t, usageConfig.Bandwidth.Paid, *proj2.BandwidthLimit) + require.Equal(t, usageConfig.Segment.Paid, *proj2.SegmentLimit) }) } diff --git a/satellite/console/users.go b/satellite/console/users.go index 15b067f18..9004435ea 100644 --- a/satellite/console/users.go +++ b/satellite/console/users.go @@ -29,7 +29,7 @@ type Users interface { // Update is a method for updating user entity. Update(ctx context.Context, user *User) error // UpdatePaidTier sets whether the user is in the paid tier. - UpdatePaidTier(ctx context.Context, id uuid.UUID, paidTier bool, projectBandwidthLimit, projectStorageLimit memory.Size) error + UpdatePaidTier(ctx context.Context, id uuid.UUID, paidTier bool, projectBandwidthLimit, projectStorageLimit memory.Size, projectSegmentLimit int64) error // GetProjectLimit is a method to get the users project limit GetProjectLimit(ctx context.Context, id uuid.UUID) (limit int, err error) // GetUserProjectLimits is a method to get the users storage and bandwidth limits for new projects. @@ -98,6 +98,7 @@ func (user *CreateUser) IsValid() error { type ProjectLimits struct { ProjectBandwidthLimit memory.Size `json:"projectBandwidthLimit"` ProjectStorageLimit memory.Size `json:"projectStorageLimit"` + ProjectSegmentLimit int64 `json:"projectSegmentLimit"` } // AuthUser holds info for user authentication token requests. @@ -139,6 +140,7 @@ type User struct { ProjectLimit int `json:"projectLimit"` ProjectStorageLimit int64 `json:"projectStorageLimit"` ProjectBandwidthLimit int64 `json:"projectBandwidthLimit"` + ProjectSegmentLimit int64 `json:"projectSegmentLimit"` PaidTier bool `json:"paidTier"` IsProfessional bool `json:"isProfessional"` diff --git a/satellite/console/users_test.go b/satellite/console/users_test.go index 196a4190c..0e3523f80 100644 --- a/satellite/console/users_test.go +++ b/satellite/console/users_test.go @@ -128,6 +128,7 @@ func TestUserUpdatePaidTier(t *testing.T) { password := "password" projectBandwidthLimit := memory.Size(50000000000) storageStorageLimit := memory.Size(50000000000) + segmentLimit := int64(100) newUser := &console.User{ ID: testrand.UUID(), FullName: fullName, @@ -144,7 +145,7 @@ func TestUserUpdatePaidTier(t *testing.T) { require.Equal(t, shortName, createdUser.ShortName) require.False(t, createdUser.PaidTier) - err = db.Console().Users().UpdatePaidTier(ctx, createdUser.ID, true, projectBandwidthLimit, storageStorageLimit) + err = db.Console().Users().UpdatePaidTier(ctx, createdUser.ID, true, projectBandwidthLimit, storageStorageLimit, segmentLimit) require.NoError(t, err) retrievedUser, err := db.Console().Users().Get(ctx, createdUser.ID) @@ -154,7 +155,7 @@ func TestUserUpdatePaidTier(t *testing.T) { require.Equal(t, shortName, retrievedUser.ShortName) require.True(t, retrievedUser.PaidTier) - err = db.Console().Users().UpdatePaidTier(ctx, createdUser.ID, false, projectBandwidthLimit, storageStorageLimit) + err = db.Console().Users().UpdatePaidTier(ctx, createdUser.ID, false, projectBandwidthLimit, storageStorageLimit, segmentLimit) require.NoError(t, err) retrievedUser, err = db.Console().Users().Get(ctx, createdUser.ID) diff --git a/satellite/satellitedb/dbx/satellitedb.dbx b/satellite/satellitedb/dbx/satellitedb.dbx index 2f78c3f57..d4b9ad655 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx +++ b/satellite/satellitedb/dbx/satellitedb.dbx @@ -291,6 +291,7 @@ model user ( field project_limit int ( updatable, default 0 ) field project_bandwidth_limit int64 ( updatable, default 0 ) field project_storage_limit int64 ( updatable, default 0 ) + field project_segment_limit int64 ( updatable, default 0 ) field paid_tier bool ( updatable, default false ) field position text ( updatable, nullable ) @@ -331,7 +332,7 @@ read one ( ) read one ( - select user.project_storage_limit user.project_bandwidth_limit + select user.project_storage_limit user.project_bandwidth_limit user.project_segment_limit where user.id = ? ) diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index ffea3630a..41d103495 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -704,6 +704,7 @@ CREATE TABLE users ( project_limit integer NOT NULL DEFAULT 0, project_bandwidth_limit bigint NOT NULL DEFAULT 0, project_storage_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, paid_tier boolean NOT NULL DEFAULT false, position text, company_name text, @@ -1287,6 +1288,7 @@ CREATE TABLE users ( project_limit integer NOT NULL DEFAULT 0, project_bandwidth_limit bigint NOT NULL DEFAULT 0, project_storage_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, paid_tier boolean NOT NULL DEFAULT false, position text, company_name text, @@ -7946,6 +7948,7 @@ type User struct { ProjectLimit int ProjectBandwidthLimit int64 ProjectStorageLimit int64 + ProjectSegmentLimit int64 PaidTier bool Position *string CompanyName *string @@ -7969,6 +7972,7 @@ type User_Create_Fields struct { ProjectLimit User_ProjectLimit_Field ProjectBandwidthLimit User_ProjectBandwidthLimit_Field ProjectStorageLimit User_ProjectStorageLimit_Field + ProjectSegmentLimit User_ProjectSegmentLimit_Field PaidTier User_PaidTier_Field Position User_Position_Field CompanyName User_CompanyName_Field @@ -7993,6 +7997,7 @@ type User_Update_Fields struct { ProjectLimit User_ProjectLimit_Field ProjectBandwidthLimit User_ProjectBandwidthLimit_Field ProjectStorageLimit User_ProjectStorageLimit_Field + ProjectSegmentLimit User_ProjectSegmentLimit_Field PaidTier User_PaidTier_Field Position User_Position_Field CompanyName User_CompanyName_Field @@ -8293,6 +8298,25 @@ func (f User_ProjectStorageLimit_Field) value() interface{} { func (User_ProjectStorageLimit_Field) _Column() string { return "project_storage_limit" } +type User_ProjectSegmentLimit_Field struct { + _set bool + _null bool + _value int64 +} + +func User_ProjectSegmentLimit(v int64) User_ProjectSegmentLimit_Field { + return User_ProjectSegmentLimit_Field{_set: true, _value: v} +} + +func (f User_ProjectSegmentLimit_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_ProjectSegmentLimit_Field) _Column() string { return "project_segment_limit" } + type User_PaidTier_Field struct { _set bool _null bool @@ -10229,9 +10253,10 @@ type ProjectLimit_Row struct { ProjectLimit int } -type ProjectStorageLimit_ProjectBandwidthLimit_Row struct { +type ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row struct { ProjectStorageLimit int64 ProjectBandwidthLimit int64 + ProjectSegmentLimit int64 } type SegmentLimit_Row struct { @@ -10425,7 +10450,7 @@ func (obj *pgxImpl) Create_User(ctx context.Context, var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} var __values []interface{} __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __user_agent_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val, __mfa_secret_key_val, __mfa_recovery_codes_val, __signup_promo_code_val) @@ -10451,6 +10476,12 @@ func (obj *pgxImpl) Create_User(ctx context.Context, __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) } + if optional.ProjectSegmentLimit._set { + __values = append(__values, optional.ProjectSegmentLimit.value()) + __optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("project_segment_limit")) + __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) + } + if optional.PaidTier._set { __values = append(__values, optional.PaidTier.value()) __optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("paid_tier")) @@ -10487,7 +10518,7 @@ func (obj *pgxImpl) Create_User(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return nil, obj.makeErr(err) } @@ -11471,7 +11502,7 @@ func (obj *pgxImpl) All_User_By_NormalizedEmail(ctx context.Context, rows []*User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ?") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -11489,7 +11520,7 @@ func (obj *pgxImpl) All_User_By_NormalizedEmail(ctx context.Context, for __rows.Next() { user := &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return nil, err } @@ -11516,7 +11547,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -11540,7 +11571,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return nil, err } @@ -11574,7 +11605,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context, user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -11583,7 +11614,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return (*User)(nil), obj.makeErr(err) } @@ -11613,12 +11644,12 @@ func (obj *pgxImpl) Get_User_ProjectLimit_By_Id(ctx context.Context, } -func (obj *pgxImpl) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_Id(ctx context.Context, +func (obj *pgxImpl) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_User_ProjectSegmentLimit_By_Id(ctx context.Context, user_id User_Id_Field) ( - row *ProjectStorageLimit_ProjectBandwidthLimit_Row, err error) { + row *ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.project_storage_limit, users.project_bandwidth_limit FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.project_storage_limit, users.project_bandwidth_limit, users.project_segment_limit FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -11626,10 +11657,10 @@ func (obj *pgxImpl) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_I var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) - row = &ProjectStorageLimit_ProjectBandwidthLimit_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ProjectStorageLimit, &row.ProjectBandwidthLimit) + row = &ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row{} + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ProjectStorageLimit, &row.ProjectBandwidthLimit, &row.ProjectSegmentLimit) if err != nil { - return (*ProjectStorageLimit_ProjectBandwidthLimit_Row)(nil), obj.makeErr(err) + return (*ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row)(nil), obj.makeErr(err) } return row, nil @@ -14772,7 +14803,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -14823,6 +14854,11 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_storage_limit = ?")) } + if update.ProjectSegmentLimit._set { + __values = append(__values, update.ProjectSegmentLimit.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_segment_limit = ?")) + } + if update.PaidTier._set { __values = append(__values, update.PaidTier.value()) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("paid_tier = ?")) @@ -14896,7 +14932,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err == sql.ErrNoRows { return nil, nil } @@ -16433,7 +16469,7 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} var __values []interface{} __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __user_agent_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val, __mfa_secret_key_val, __mfa_recovery_codes_val, __signup_promo_code_val) @@ -16459,6 +16495,12 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) } + if optional.ProjectSegmentLimit._set { + __values = append(__values, optional.ProjectSegmentLimit.value()) + __optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("project_segment_limit")) + __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) + } + if optional.PaidTier._set { __values = append(__values, optional.PaidTier.value()) __optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("paid_tier")) @@ -16495,7 +16537,7 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return nil, obj.makeErr(err) } @@ -17479,7 +17521,7 @@ func (obj *pgxcockroachImpl) All_User_By_NormalizedEmail(ctx context.Context, rows []*User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ?") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -17497,7 +17539,7 @@ func (obj *pgxcockroachImpl) All_User_By_NormalizedEmail(ctx context.Context, for __rows.Next() { user := &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return nil, err } @@ -17524,7 +17566,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -17548,7 +17590,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return nil, err } @@ -17582,7 +17624,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context, user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -17591,7 +17633,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err != nil { return (*User)(nil), obj.makeErr(err) } @@ -17621,12 +17663,12 @@ func (obj *pgxcockroachImpl) Get_User_ProjectLimit_By_Id(ctx context.Context, } -func (obj *pgxcockroachImpl) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_Id(ctx context.Context, +func (obj *pgxcockroachImpl) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_User_ProjectSegmentLimit_By_Id(ctx context.Context, user_id User_Id_Field) ( - row *ProjectStorageLimit_ProjectBandwidthLimit_Row, err error) { + row *ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.project_storage_limit, users.project_bandwidth_limit FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.project_storage_limit, users.project_bandwidth_limit, users.project_segment_limit FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -17634,10 +17676,10 @@ func (obj *pgxcockroachImpl) Get_User_ProjectStorageLimit_User_ProjectBandwidthL var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) - row = &ProjectStorageLimit_ProjectBandwidthLimit_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ProjectStorageLimit, &row.ProjectBandwidthLimit) + row = &ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row{} + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ProjectStorageLimit, &row.ProjectBandwidthLimit, &row.ProjectSegmentLimit) if err != nil { - return (*ProjectStorageLimit_ProjectBandwidthLimit_Row)(nil), obj.makeErr(err) + return (*ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row)(nil), obj.makeErr(err) } return row, nil @@ -20780,7 +20822,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -20831,6 +20873,11 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_storage_limit = ?")) } + if update.ProjectSegmentLimit._set { + __values = append(__values, update.ProjectSegmentLimit.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_segment_limit = ?")) + } + if update.PaidTier._set { __values = append(__values, update.PaidTier.value()) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("paid_tier = ?")) @@ -20904,7 +20951,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode) if err == sql.ErrNoRows { return nil, nil } @@ -23314,14 +23361,14 @@ func (rx *Rx) Get_User_ProjectLimit_By_Id(ctx context.Context, return tx.Get_User_ProjectLimit_By_Id(ctx, user_id) } -func (rx *Rx) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_Id(ctx context.Context, +func (rx *Rx) Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_User_ProjectSegmentLimit_By_Id(ctx context.Context, user_id User_Id_Field) ( - row *ProjectStorageLimit_ProjectBandwidthLimit_Row, err error) { + row *ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row, err error) { var tx *Tx if tx, err = rx.getTx(ctx); err != nil { return } - return tx.Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_Id(ctx, user_id) + return tx.Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_User_ProjectSegmentLimit_By_Id(ctx, user_id) } func (rx *Rx) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context, @@ -24241,9 +24288,9 @@ type Methods interface { user_id User_Id_Field) ( row *ProjectLimit_Row, err error) - Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_Id(ctx context.Context, + Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_User_ProjectSegmentLimit_By_Id(ctx context.Context, user_id User_Id_Field) ( - row *ProjectStorageLimit_ProjectBandwidthLimit_Row, err error) + row *ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row, err error) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context, value_attribution_project_id ValueAttribution_ProjectId_Field, diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql index 6c36ef05d..547cae8cb 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql @@ -384,6 +384,7 @@ CREATE TABLE users ( project_limit integer NOT NULL DEFAULT 0, project_bandwidth_limit bigint NOT NULL DEFAULT 0, project_storage_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, paid_tier boolean NOT NULL DEFAULT false, position text, company_name text, diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql index 6c36ef05d..547cae8cb 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql @@ -384,6 +384,7 @@ CREATE TABLE users ( project_limit integer NOT NULL DEFAULT 0, project_bandwidth_limit bigint NOT NULL DEFAULT 0, project_storage_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, paid_tier boolean NOT NULL DEFAULT false, position text, company_name text, diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index 4d28dc124..8e3b69226 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -1727,6 +1727,14 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration { `ALTER TABLE projects ADD COLUMN segment_limit bigint DEFAULT 1000000`, }, }, + { + DB: &db.migrationDB, + Description: "add project_segment_limit to the users table", + Version: 183, + Action: migrate.SQL{ + `ALTER TABLE users ADD COLUMN project_segment_limit bigint NOT NULL DEFAULT 0`, + }, + }, // NB: after updating testdata in `testdata`, run // `go generate` to update `migratez.go`. diff --git a/satellite/satellitedb/migratez.go b/satellite/satellitedb/migratez.go index d53fb2d5a..62e35810b 100755 --- a/satellite/satellitedb/migratez.go +++ b/satellite/satellitedb/migratez.go @@ -13,7 +13,7 @@ func (db *satelliteDB) testMigration() *migrate.Migration { { DB: &db.migrationDB, Description: "Testing setup", - Version: 182, + Version: 183, Action: migrate.SQL{`-- AUTOGENERATED BY storj.io/dbx -- DO NOT EDIT CREATE TABLE accounting_rollups ( @@ -402,6 +402,7 @@ CREATE TABLE users ( project_limit integer NOT NULL DEFAULT 0, project_storage_limit bigint NOT NULL DEFAULT 0, project_bandwidth_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, paid_tier boolean NOT NULL DEFAULT false, position text, company_name text, diff --git a/satellite/satellitedb/projects.go b/satellite/satellitedb/projects.go index 2d9e519fd..772b0ca95 100644 --- a/satellite/satellitedb/projects.go +++ b/satellite/satellitedb/projects.go @@ -106,6 +106,9 @@ func (projects *projects) Insert(ctx context.Context, project *console.Project) if project.BandwidthLimit != nil { createFields.BandwidthLimit = dbx.Project_BandwidthLimit(project.BandwidthLimit.Int64()) } + if project.SegmentLimit != nil { + createFields.SegmentLimit = dbx.Project_SegmentLimit(*project.SegmentLimit) + } createFields.RateLimit = dbx.Project_RateLimit_Raw(project.RateLimit) createFields.MaxBuckets = dbx.Project_MaxBuckets_Raw(project.MaxBuckets) @@ -149,6 +152,9 @@ func (projects *projects) Update(ctx context.Context, project *console.Project) if project.BandwidthLimit != nil { updateFields.BandwidthLimit = dbx.Project_BandwidthLimit(project.BandwidthLimit.Int64()) } + if project.SegmentLimit != nil { + updateFields.SegmentLimit = dbx.Project_SegmentLimit(*project.SegmentLimit) + } _, err = projects.db.Update_Project_By_Id(ctx, dbx.Project_Id(project.ID[:]), @@ -354,6 +360,7 @@ func projectFromDBX(ctx context.Context, project *dbx.Project) (_ *console.Proje CreatedAt: project.CreatedAt, StorageLimit: (*memory.Size)(project.UsageLimit), BandwidthLimit: (*memory.Size)(project.BandwidthLimit), + SegmentLimit: project.SegmentLimit, }, nil } diff --git a/satellite/satellitedb/testdata/postgres.v183.sql b/satellite/satellitedb/testdata/postgres.v183.sql new file mode 100644 index 000000000..304af2db4 --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v183.sql @@ -0,0 +1,620 @@ +-- AUTOGENERATED BY storj.io/dbx +-- DO NOT EDIT +CREATE TABLE accounting_rollups ( + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + PRIMARY KEY ( node_id, start_time ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_bandwidth_rollup_archives ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + total_bytes bigint NOT NULL DEFAULT 0, + inline bigint NOT NULL, + remote bigint NOT NULL, + total_segments_count integer NOT NULL DEFAULT 0, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE coinpayments_transactions ( + id text NOT NULL, + user_id bytea NOT NULL, + address text NOT NULL, + amount bytea NOT NULL, + received bytea NOT NULL, + status integer NOT NULL, + key text NOT NULL, + timeout integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE coupons ( + id bytea NOT NULL, + user_id bytea NOT NULL, + amount bigint NOT NULL, + description text NOT NULL, + type integer NOT NULL, + status integer NOT NULL, + duration bigint NOT NULL, + billing_periods bigint, + coupon_code_name text, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE coupon_codes ( + id bytea NOT NULL, + name text NOT NULL, + amount bigint NOT NULL, + description text NOT NULL, + type integer NOT NULL, + billing_periods bigint, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( name ) +); +CREATE TABLE coupon_usages ( + coupon_id bytea NOT NULL, + amount bigint NOT NULL, + status integer NOT NULL, + period timestamp with time zone NOT NULL, + PRIMARY KEY ( coupon_id, period ) +); +CREATE TABLE graceful_exit_progress ( + node_id bytea NOT NULL, + bytes_transferred bigint NOT NULL, + pieces_transferred bigint NOT NULL DEFAULT 0, + pieces_failed bigint NOT NULL DEFAULT 0, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE graceful_exit_segment_transfer_queue ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_num integer NOT NULL, + root_piece_id bytea, + durability_ratio double precision NOT NULL, + queued_at timestamp with time zone NOT NULL, + requested_at timestamp with time zone, + last_failed_at timestamp with time zone, + last_failed_code integer, + failed_count integer, + finished_at timestamp with time zone, + order_limit_send_count integer NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, stream_id, position, piece_num ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL DEFAULT '', + last_net text NOT NULL, + last_ip_port text, + protocol integer NOT NULL DEFAULT 0, + type integer NOT NULL DEFAULT 0, + email text NOT NULL, + wallet text NOT NULL, + wallet_features text NOT NULL DEFAULT '', + free_disk bigint NOT NULL DEFAULT -1, + piece_count bigint NOT NULL DEFAULT 0, + major bigint NOT NULL DEFAULT 0, + minor bigint NOT NULL DEFAULT 0, + patch bigint NOT NULL DEFAULT 0, + hash text NOT NULL DEFAULT '', + timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00', + release boolean NOT NULL DEFAULT false, + latency_90 bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch', + last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch', + contained boolean NOT NULL DEFAULT false, + disqualified timestamp with time zone, + disqualification_reason integer, + suspended timestamp with time zone, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + exit_initiated_at timestamp with time zone, + exit_loop_completed_at timestamp with time zone, + exit_finished_at timestamp with time zone, + exit_success boolean NOT NULL DEFAULT false, + country_code text, + PRIMARY KEY ( id ) +); +CREATE TABLE node_api_versions ( + id bytea NOT NULL, + api_version integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE offers ( + id serial NOT NULL, + name text NOT NULL, + description text NOT NULL, + award_credit_in_cents integer NOT NULL DEFAULT 0, + invitee_credit_in_cents integer NOT NULL DEFAULT 0, + award_credit_duration_days integer, + invitee_credit_duration_days integer, + redeemable_cap integer, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + status integer NOT NULL, + type integer NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE peer_identities ( + node_id bytea NOT NULL, + leaf_serial_number bytea NOT NULL, + chain bytea NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + name text NOT NULL, + description text NOT NULL, + usage_limit bigint, + bandwidth_limit bigint, + segment_limit bigint DEFAULT 1000000, + rate_limit integer, + burst_limit integer, + max_buckets integer, + partner_id bytea, + user_agent bytea, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE project_bandwidth_daily_rollups ( + project_id bytea NOT NULL, + interval_day date NOT NULL, + egress_allocated bigint NOT NULL, + egress_settled bigint NOT NULL, + egress_dead bigint NOT NULL DEFAULT 0, + PRIMARY KEY ( project_id, interval_day ) +); +CREATE TABLE project_bandwidth_rollups ( + project_id bytea NOT NULL, + interval_month date NOT NULL, + egress_allocated bigint NOT NULL, + PRIMARY KEY ( project_id, interval_month ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE repair_queue ( + stream_id bytea NOT NULL, + position bigint NOT NULL, + attempted_at timestamp with time zone, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + segment_health double precision NOT NULL DEFAULT 1, + PRIMARY KEY ( stream_id, position ) +); +CREATE TABLE reputations ( + id bytea NOT NULL, + audit_success_count bigint NOT NULL DEFAULT 0, + total_audit_count bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + contained boolean NOT NULL DEFAULT false, + disqualified timestamp with time zone, + suspended timestamp with time zone, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + online_score double precision NOT NULL DEFAULT 1, + audit_history bytea NOT NULL, + audit_reputation_alpha double precision NOT NULL DEFAULT 1, + audit_reputation_beta double precision NOT NULL DEFAULT 0, + unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1, + unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0, + PRIMARY KEY ( id ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE revocations ( + revoked bytea NOT NULL, + api_key_id bytea NOT NULL, + PRIMARY KEY ( revoked ) +); +CREATE TABLE segment_pending_audits ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollup_archives ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollups_phase2 ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_payments ( + id bigserial NOT NULL, + created_at timestamp with time zone NOT NULL, + node_id bytea NOT NULL, + period text NOT NULL, + amount bigint NOT NULL, + receipt text, + notes text, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_paystubs ( + period text NOT NULL, + node_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + codes text NOT NULL, + usage_at_rest double precision NOT NULL, + usage_get bigint NOT NULL, + usage_put bigint NOT NULL, + usage_get_repair bigint NOT NULL, + usage_put_repair bigint NOT NULL, + usage_get_audit bigint NOT NULL, + comp_at_rest bigint NOT NULL, + comp_get bigint NOT NULL, + comp_put bigint NOT NULL, + comp_get_repair bigint NOT NULL, + comp_put_repair bigint NOT NULL, + comp_get_audit bigint NOT NULL, + surge_percent bigint NOT NULL, + held bigint NOT NULL, + owed bigint NOT NULL, + disposed bigint NOT NULL, + paid bigint NOT NULL, + distributed bigint NOT NULL, + PRIMARY KEY ( period, node_id ) +); +CREATE TABLE storagenode_storage_tallies ( + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( interval_end_time, node_id ) +); +CREATE TABLE stripe_customers ( + user_id bytea NOT NULL, + customer_id text NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ), + UNIQUE ( customer_id ) +); +CREATE TABLE stripecoinpayments_invoice_project_records ( + id bytea NOT NULL, + project_id bytea NOT NULL, + storage double precision NOT NULL, + egress bigint NOT NULL, + objects bigint, + segments bigint, + period_start timestamp with time zone NOT NULL, + period_end timestamp with time zone NOT NULL, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( project_id, period_start, period_end ) +); +CREATE TABLE stripecoinpayments_tx_conversion_rates ( + tx_id text NOT NULL, + rate bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + email text NOT NULL, + normalized_email text NOT NULL, + full_name text NOT NULL, + short_name text, + password_hash bytea NOT NULL, + status integer NOT NULL, + partner_id bytea, + user_agent bytea, + created_at timestamp with time zone NOT NULL, + project_limit integer NOT NULL DEFAULT 0, + project_storage_limit bigint NOT NULL DEFAULT 0, + project_bandwidth_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, + paid_tier boolean NOT NULL DEFAULT false, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, + employee_count text, + have_sales_contact boolean NOT NULL DEFAULT false, + mfa_enabled boolean NOT NULL DEFAULT false, + mfa_secret_key text, + mfa_recovery_codes text, + signup_promo_code text, + PRIMARY KEY ( id ) +); +CREATE TABLE value_attributions ( + project_id bytea NOT NULL, + bucket_name bytea NOT NULL, + partner_id bytea NOT NULL, + user_agent bytea, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, bucket_name ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + head bytea NOT NULL, + name text NOT NULL, + secret bytea NOT NULL, + partner_id bytea, + user_agent bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( head ), + UNIQUE ( name, project_id ) +); +CREATE TABLE bucket_metainfos ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ), + name bytea NOT NULL, + partner_id bytea, + user_agent bytea, + path_cipher integer NOT NULL, + created_at timestamp with time zone NOT NULL, + default_segment_size integer NOT NULL, + default_encryption_cipher_suite integer NOT NULL, + default_encryption_block_size integer NOT NULL, + default_redundancy_algorithm integer NOT NULL, + default_redundancy_share_size integer NOT NULL, + default_redundancy_required_shares integer NOT NULL, + default_redundancy_repair_shares integer NOT NULL, + default_redundancy_optimal_shares integer NOT NULL, + default_redundancy_total_shares integer NOT NULL, + placement integer, + PRIMARY KEY ( id ), + UNIQUE ( project_id, name ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE stripecoinpayments_apply_balance_intents ( + tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE user_credits ( + id serial NOT NULL, + user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + offer_id integer NOT NULL REFERENCES offers( id ), + referred_by bytea REFERENCES users( id ) ON DELETE SET NULL, + type text NOT NULL, + credits_earned_in_cents integer NOT NULL, + credits_used_in_cents integer NOT NULL, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( id, offer_id ) +); +CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time ) ; +CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start ) ; +CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id ) ; +CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start ) ; +CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id ) ; +CREATE INDEX bucket_storage_tallies_project_id_interval_start_index ON bucket_storage_tallies ( project_id, interval_start ) ; +CREATE INDEX graceful_exit_segment_transfer_nid_dr_qa_fa_lfa_index ON graceful_exit_segment_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at ) ; +CREATE INDEX node_last_ip ON nodes ( last_net ) ; +CREATE INDEX nodes_dis_unk_off_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, offline_suspended, exit_finished_at, last_contact_success ) ; +CREATE INDEX nodes_type_last_cont_success_free_disk_ma_mi_patch_vetted_partial_index ON nodes ( type, last_contact_success, free_disk, major, minor, patch, vetted_at ) WHERE nodes.disqualified is NULL AND nodes.unknown_audit_suspended is NULL AND nodes.exit_initiated_at is NULL AND nodes.release = true AND nodes.last_net != '' ; +CREATE INDEX nodes_dis_unk_aud_exit_init_rel_type_last_cont_success_stored_index ON nodes ( disqualified, unknown_audit_suspended, exit_initiated_at, release, type, last_contact_success ) WHERE nodes.disqualified is NULL AND nodes.unknown_audit_suspended is NULL AND nodes.exit_initiated_at is NULL AND nodes.release = true ; +CREATE INDEX repair_queue_updated_at_index ON repair_queue ( updated_at ) ; +CREATE INDEX repair_queue_num_healthy_pieces_attempted_at_index ON repair_queue ( segment_health, attempted_at ) ; +CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start ) ; +CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start ) ; +CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period ) ; +CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id ) ; +CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id ) ; +CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id ) ; + +INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14); +INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14); + +-- MAIN DATA -- + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended","exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended","exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL,false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended","exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL,false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended","exit_success", "vetted_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false, '2020-03-18 12:00:00.000000+00'); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended","exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NUll, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false, 10, 50000000000, 50000000000, false); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "employee_count", "project_limit", "project_bandwidth_limit", "project_storage_limit", "have_sales_contact") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\304\\313\\206\\311",'::bytea, 'Ian', 'Pires', '3email3@mail.test', '3EMAIL3@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-03-18 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 51, true, '1-50', 10, 50000000000, 50000000000, true); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "employee_count", "project_limit", "project_bandwidth_limit", "project_storage_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\312",'::bytea, 'Campbell', 'Wright', '4email4@mail.test', '4EMAIL4@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-07-17 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 82, true, '1-50', 10, 50000000000, 50000000000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true, 10, 50000000000, 50000000000, false, false, NULL, NULL); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 5e11, 5e11, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00'); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00'); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "user_agent", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, NULL, '2019-02-14 08:07:31.028103+00'); + +INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10); + +INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00'); + +INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024); + +INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "billing_periods", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, 2, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "billing_periods", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\012'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, 2, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "billing_periods", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, 2, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00'); +INSERT INTO "coupon_codes" ("id", "name", "amount", "description", "type", "billing_periods", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'STORJ50', 50, '$50 for your first 5 months', 0, NULL, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "coupon_codes" ("id", "name", "amount", "description", "type", "billing_periods", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015'::bytea, 'STORJ75', 75, '$75 for your first 5 months', 0, 2, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00'); + +INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000); +INSERT INTO "project_bandwidth_daily_rollups"("project_id", "interval_day", egress_allocated, egress_settled, egress_dead) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2021-04-22', 10000, 5000, 0); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', 5e11, 5e11, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00'); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', 5e11, 5e11, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', 5e11, 5e11, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL); + +INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); + +INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 117); +INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117); + +INSERT INTO "reputations"("id", "audit_success_count", "total_audit_count", "created_at", "updated_at", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "online_score", "audit_history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', false, NULL, NULL, 50, 0, 1, 0, 1, '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "graceful_exit_segment_transfer_queue" ("node_id", "stream_id", "position", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 10 , 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "segment_pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "stream_id", position) VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, '\x010101', 1); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\342U\\303\\312\\204",'::bytea, 'Noahson', 'William', '100email1@mail.test', '100EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false, 10, 100000000000000, 25000000000000, true); + +INSERT INTO "repair_queue" ("stream_id", "position", "attempted_at", "segment_health", "updated_at", "inserted_at") VALUES ('\x01', 1, null, 1, '2020-09-01 00:00:00.000000+00', '2021-09-01 00:00:00.000000+00'); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_limit", "project_bandwidth_limit", "project_storage_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\303\\312\\204",'::bytea, 'Noahson William', '101email1@mail.test', '101EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6g7h8"]', 3, 50000000000, 50000000000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "burst_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\251\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, 4000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\303\\312\\205",'::bytea, 'Felicia Smith', '99email1@mail.test', '99EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "segments", "period_start", "period_end", "state", "created_at") VALUES (E'\\300\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\300\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success", "country_code") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2021-02-14 08:07:31.028103+00', '2021-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, NULL, false, 'DE'); +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares", "placement") VALUES (E'\\144/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketotheruniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10, 1); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "disqualification_reason", "suspended", "exit_success", "country_code") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\017', '127.0.0.1:55517', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', false, '2021-10-13 08:07:31.108963+00', 0, NULL, false, NULL); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\267\\342U\\303\\312\\203",'::bytea, 'Jessica Thompson', '143email1@mail.test', '143EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-11-04 08:27:56.614594+00', true, 'mfa secret key', '["2b3c4d5e","f6a7e8e9"]', 'promo123', 3, '150000000000', '150000000000'); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Heather Jackson', '762email@mail.test', '762EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-11-05 03:22:39.614594+00', true, 'mfa secret key', '["5e4d3c2b","e9e8a7f6"]', 'promo123', 3, '100000000000000', '25000000000000'); + +-- NEW DATA -- \ No newline at end of file diff --git a/satellite/satellitedb/users.go b/satellite/satellitedb/users.go index 7a4262ecb..364034216 100644 --- a/satellite/satellitedb/users.go +++ b/satellite/satellitedb/users.go @@ -103,6 +103,9 @@ func (users *users) Insert(ctx context.Context, user *console.User) (_ *console. if user.ProjectBandwidthLimit != 0 { optional.ProjectBandwidthLimit = dbx.User_ProjectBandwidthLimit(user.ProjectBandwidthLimit) } + if user.ProjectSegmentLimit != 0 { + optional.ProjectSegmentLimit = dbx.User_ProjectSegmentLimit(user.ProjectSegmentLimit) + } if user.IsProfessional { optional.Position = dbx.User_Position(user.Position) optional.CompanyName = dbx.User_CompanyName(user.CompanyName) @@ -154,7 +157,7 @@ func (users *users) Update(ctx context.Context, user *console.User) (err error) } // UpdatePaidTier sets whether the user is in the paid tier. -func (users *users) UpdatePaidTier(ctx context.Context, id uuid.UUID, paidTier bool, projectBandwidthLimit, projectStorageLimit memory.Size) (err error) { +func (users *users) UpdatePaidTier(ctx context.Context, id uuid.UUID, paidTier bool, projectBandwidthLimit, projectStorageLimit memory.Size, projectSegmentLimit int64) (err error) { defer mon.Task()(&ctx)(&err) _, err = users.db.Update_User_By_Id( @@ -164,6 +167,7 @@ func (users *users) UpdatePaidTier(ctx context.Context, id uuid.UUID, paidTier b PaidTier: dbx.User_PaidTier(paidTier), ProjectBandwidthLimit: dbx.User_ProjectBandwidthLimit(projectBandwidthLimit.Int64()), ProjectStorageLimit: dbx.User_ProjectStorageLimit(projectStorageLimit.Int64()), + ProjectSegmentLimit: dbx.User_ProjectSegmentLimit(projectSegmentLimit), }, ) @@ -185,7 +189,7 @@ func (users *users) GetProjectLimit(ctx context.Context, id uuid.UUID) (limit in func (users *users) GetUserProjectLimits(ctx context.Context, id uuid.UUID) (limits *console.ProjectLimits, err error) { defer mon.Task()(&ctx)(&err) - row, err := users.db.Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_By_Id(ctx, dbx.User_Id(id[:])) + row, err := users.db.Get_User_ProjectStorageLimit_User_ProjectBandwidthLimit_User_ProjectSegmentLimit_By_Id(ctx, dbx.User_Id(id[:])) if err != nil { return nil, err } @@ -204,6 +208,7 @@ func toUpdateUser(user *console.User) (*dbx.User_Update_Fields, error) { ProjectLimit: dbx.User_ProjectLimit(user.ProjectLimit), ProjectStorageLimit: dbx.User_ProjectStorageLimit(user.ProjectStorageLimit), ProjectBandwidthLimit: dbx.User_ProjectBandwidthLimit(user.ProjectBandwidthLimit), + ProjectSegmentLimit: dbx.User_ProjectSegmentLimit(user.ProjectSegmentLimit), PaidTier: dbx.User_PaidTier(user.PaidTier), MfaEnabled: dbx.User_MfaEnabled(user.MFAEnabled), } @@ -253,6 +258,7 @@ func userFromDBX(ctx context.Context, user *dbx.User) (_ *console.User, err erro ProjectLimit: user.ProjectLimit, ProjectBandwidthLimit: user.ProjectBandwidthLimit, ProjectStorageLimit: user.ProjectStorageLimit, + ProjectSegmentLimit: user.ProjectSegmentLimit, PaidTier: user.PaidTier, IsProfessional: user.IsProfessional, HaveSalesContact: user.HaveSalesContact, @@ -306,7 +312,7 @@ func userFromDBX(ctx context.Context, user *dbx.User) (_ *console.User, err erro } // limitsFromDBX is used for creating user project limits entity from autogenerated dbx.User struct. -func limitsFromDBX(ctx context.Context, limits *dbx.ProjectStorageLimit_ProjectBandwidthLimit_Row) (_ *console.ProjectLimits, err error) { +func limitsFromDBX(ctx context.Context, limits *dbx.ProjectStorageLimit_ProjectBandwidthLimit_ProjectSegmentLimit_Row) (_ *console.ProjectLimits, err error) { defer mon.Task()(&ctx)(&err) if limits == nil { return nil, errs.New("user parameter is nil") @@ -315,6 +321,7 @@ func limitsFromDBX(ctx context.Context, limits *dbx.ProjectStorageLimit_ProjectB result := console.ProjectLimits{ ProjectBandwidthLimit: memory.Size(limits.ProjectBandwidthLimit), ProjectStorageLimit: memory.Size(limits.ProjectStorageLimit), + ProjectSegmentLimit: limits.ProjectSegmentLimit, } return &result, nil } diff --git a/scripts/testdata/satellite-config.yaml.lock b/scripts/testdata/satellite-config.yaml.lock index 98fb616cb..794ff036a 100755 --- a/scripts/testdata/satellite-config.yaml.lock +++ b/scripts/testdata/satellite-config.yaml.lock @@ -28,6 +28,12 @@ # the default paid-tier bandwidth usage limit # admin.console-config.usage-limits.bandwidth.paid: 100.00 TB +# the default free-tier segment usage limit +# admin.console-config.usage-limits.segment.free: 140000 + +# the default paid-tier segment usage limit +# admin.console-config.usage-limits.segment.paid: 1000000 + # the default free-tier storage usage limit # admin.console-config.usage-limits.storage.free: 150.00 GB @@ -247,6 +253,12 @@ compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0 # the default paid-tier bandwidth usage limit # console.usage-limits.bandwidth.paid: 100.00 TB +# the default free-tier segment usage limit +# console.usage-limits.segment.free: 140000 + +# the default paid-tier segment usage limit +# console.usage-limits.segment.paid: 1000000 + # the default free-tier storage usage limit # console.usage-limits.storage.free: 150.00 GB