diff --git a/satellite/satellitedb/dbx/project.dbx b/satellite/satellitedb/dbx/project.dbx index 93ba008e2..e9abe8983 100644 --- a/satellite/satellitedb/dbx/project.dbx +++ b/satellite/satellitedb/dbx/project.dbx @@ -39,9 +39,6 @@ model project ( field burst_limit int ( nullable, updatable ) // max_buckets is the maximum number of buckets that can be created for the project. field max_buckets int ( nullable, updatable ) - // partner_id is an UUID that refers to rewards.PartnersStaticDB. - // deprecated: use user_agent instead. - field partner_id blob ( nullable ) // user_agent is the referred partner who created the project. field user_agent blob ( nullable ) // owner_id refers to the user UUID in user.id. @@ -206,9 +203,6 @@ model api_key ( field name text (updatable) // secret is the macaroon secret. field secret blob - // partner_id is an UUID that refers to rewards.PartnersStaticDB. - // deprecated: use user_agent instead. - field partner_id blob (nullable) // user_agent is the referred partner who created the project. field user_agent blob (nullable) // created_at indicates when the api key was added. @@ -237,4 +231,4 @@ read one ( join project.id = api_key.project_id where api_key.name = ? where api_key.project_id = ? -) \ No newline at end of file +) diff --git a/satellite/satellitedb/dbx/project_bucket.dbx b/satellite/satellitedb/dbx/project_bucket.dbx index 823f473d9..1a9d1e951 100644 --- a/satellite/satellitedb/dbx/project_bucket.dbx +++ b/satellite/satellitedb/dbx/project_bucket.dbx @@ -10,10 +10,6 @@ model bucket_metainfo ( // name is a unique name inside the project. // it's a alphanumeric string similar to domain names. field name blob - // partner_id is an UUID that refers to rewards.PartnersStaticDB. - // deprecated: use user_agent instead. - // note: this field is duplicated in value_attribution.project_id. - field partner_id blob (nullable, updatable) // user_agent is the first User-Agent that was used to upload data, // unless the user signed up with a specific partner. // note: this field is duplicated in value_attribution.user_agent. @@ -148,10 +144,6 @@ model value_attribution ( // This does not use the id, because we need the attribution to last // beyond the lifetime of bucket_metainfo row. field bucket_name blob - // partner_id is an UUID that refers to rewards.PartnersStaticDB. - // deprecated: use user_agent instead. - // note: this field is duplicated in bucket_metainfo.project_id. - field partner_id blob ( nullable ) // user_agent is the first User-Agent that was used to upload data. // unless the user signed up with a specific partner. // note: this field is duplicated in bucket_metainfo.user_agent. diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index 770cb2368..c03b0f6d7 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -545,7 +545,6 @@ CREATE TABLE projects ( rate_limit integer, burst_limit integer, max_buckets integer, - partner_id bytea, user_agent bytea, owner_id bytea NOT NULL, salt bytea, @@ -755,7 +754,6 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, @@ -792,7 +790,6 @@ CREATE TABLE user_settings ( CREATE TABLE value_attributions ( project_id bytea NOT NULL, bucket_name bytea NOT NULL, - partner_id bytea, user_agent bytea, last_updated timestamp with time zone NOT NULL, PRIMARY KEY ( project_id, bucket_name ) @@ -820,7 +817,6 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), @@ -831,7 +827,6 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, - partner_id bytea, user_agent bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, @@ -1228,7 +1223,6 @@ CREATE TABLE projects ( rate_limit integer, burst_limit integer, max_buckets integer, - partner_id bytea, user_agent bytea, owner_id bytea NOT NULL, salt bytea, @@ -1438,7 +1432,6 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, @@ -1475,7 +1468,6 @@ CREATE TABLE user_settings ( CREATE TABLE value_attributions ( project_id bytea NOT NULL, bucket_name bytea NOT NULL, - partner_id bytea, user_agent bytea, last_updated timestamp with time zone NOT NULL, PRIMARY KEY ( project_id, bucket_name ) @@ -1503,7 +1495,6 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), @@ -1514,7 +1505,6 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, - partner_id bytea, user_agent bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, @@ -5516,7 +5506,6 @@ type Project struct { RateLimit *int BurstLimit *int MaxBuckets *int - PartnerId []byte UserAgent []byte OwnerId []byte Salt []byte @@ -5535,7 +5524,6 @@ type Project_Create_Fields struct { RateLimit Project_RateLimit_Field BurstLimit Project_BurstLimit_Field MaxBuckets Project_MaxBuckets_Field - PartnerId Project_PartnerId_Field UserAgent Project_UserAgent_Field Salt Project_Salt_Field } @@ -5904,38 +5892,6 @@ func (f Project_MaxBuckets_Field) value() interface{} { func (Project_MaxBuckets_Field) _Column() string { return "max_buckets" } -type Project_PartnerId_Field struct { - _set bool - _null bool - _value []byte -} - -func Project_PartnerId(v []byte) Project_PartnerId_Field { - return Project_PartnerId_Field{_set: true, _value: v} -} - -func Project_PartnerId_Raw(v []byte) Project_PartnerId_Field { - if v == nil { - return Project_PartnerId_Null() - } - return Project_PartnerId(v) -} - -func Project_PartnerId_Null() Project_PartnerId_Field { - return Project_PartnerId_Field{_set: true, _null: true} -} - -func (f Project_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } - -func (f Project_PartnerId_Field) value() interface{} { - if !f._set || f._null { - return nil - } - return f._value -} - -func (Project_PartnerId_Field) _Column() string { return "partner_id" } - type Project_UserAgent_Field struct { _set bool _null bool @@ -9252,7 +9208,6 @@ type User struct { ShortName *string PasswordHash []byte Status int - PartnerId []byte UserAgent []byte CreatedAt time.Time ProjectLimit int @@ -9281,7 +9236,6 @@ func (User) _Table() string { return "users" } type User_Create_Fields struct { ShortName User_ShortName_Field - PartnerId User_PartnerId_Field UserAgent User_UserAgent_Field ProjectLimit User_ProjectLimit_Field ProjectBandwidthLimit User_ProjectBandwidthLimit_Field @@ -9479,38 +9433,6 @@ func (f User_Status_Field) value() interface{} { func (User_Status_Field) _Column() string { return "status" } -type User_PartnerId_Field struct { - _set bool - _null bool - _value []byte -} - -func User_PartnerId(v []byte) User_PartnerId_Field { - return User_PartnerId_Field{_set: true, _value: v} -} - -func User_PartnerId_Raw(v []byte) User_PartnerId_Field { - if v == nil { - return User_PartnerId_Null() - } - return User_PartnerId(v) -} - -func User_PartnerId_Null() User_PartnerId_Field { - return User_PartnerId_Field{_set: true, _null: true} -} - -func (f User_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } - -func (f User_PartnerId_Field) value() interface{} { - if !f._set || f._null { - return nil - } - return f._value -} - -func (User_PartnerId_Field) _Column() string { return "partner_id" } - type User_UserAgent_Field struct { _set bool _null bool @@ -10276,7 +10198,6 @@ func (UserSettings_OnboardingStep_Field) _Column() string { return "onboarding_s type ValueAttribution struct { ProjectId []byte BucketName []byte - PartnerId []byte UserAgent []byte LastUpdated time.Time } @@ -10284,7 +10205,6 @@ type ValueAttribution struct { func (ValueAttribution) _Table() string { return "value_attributions" } type ValueAttribution_Create_Fields struct { - PartnerId ValueAttribution_PartnerId_Field UserAgent ValueAttribution_UserAgent_Field } @@ -10329,38 +10249,6 @@ func (f ValueAttribution_BucketName_Field) value() interface{} { func (ValueAttribution_BucketName_Field) _Column() string { return "bucket_name" } -type ValueAttribution_PartnerId_Field struct { - _set bool - _null bool - _value []byte -} - -func ValueAttribution_PartnerId(v []byte) ValueAttribution_PartnerId_Field { - return ValueAttribution_PartnerId_Field{_set: true, _value: v} -} - -func ValueAttribution_PartnerId_Raw(v []byte) ValueAttribution_PartnerId_Field { - if v == nil { - return ValueAttribution_PartnerId_Null() - } - return ValueAttribution_PartnerId(v) -} - -func ValueAttribution_PartnerId_Null() ValueAttribution_PartnerId_Field { - return ValueAttribution_PartnerId_Field{_set: true, _null: true} -} - -func (f ValueAttribution_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } - -func (f ValueAttribution_PartnerId_Field) value() interface{} { - if !f._set || f._null { - return nil - } - return f._value -} - -func (ValueAttribution_PartnerId_Field) _Column() string { return "partner_id" } - type ValueAttribution_UserAgent_Field struct { _set bool _null bool @@ -10676,7 +10564,6 @@ type ApiKey struct { Head []byte Name string Secret []byte - PartnerId []byte UserAgent []byte CreatedAt time.Time } @@ -10684,7 +10571,6 @@ type ApiKey struct { func (ApiKey) _Table() string { return "api_keys" } type ApiKey_Create_Fields struct { - PartnerId ApiKey_PartnerId_Field UserAgent ApiKey_UserAgent_Field } @@ -10787,38 +10673,6 @@ func (f ApiKey_Secret_Field) value() interface{} { func (ApiKey_Secret_Field) _Column() string { return "secret" } -type ApiKey_PartnerId_Field struct { - _set bool - _null bool - _value []byte -} - -func ApiKey_PartnerId(v []byte) ApiKey_PartnerId_Field { - return ApiKey_PartnerId_Field{_set: true, _value: v} -} - -func ApiKey_PartnerId_Raw(v []byte) ApiKey_PartnerId_Field { - if v == nil { - return ApiKey_PartnerId_Null() - } - return ApiKey_PartnerId(v) -} - -func ApiKey_PartnerId_Null() ApiKey_PartnerId_Field { - return ApiKey_PartnerId_Field{_set: true, _null: true} -} - -func (f ApiKey_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } - -func (f ApiKey_PartnerId_Field) value() interface{} { - if !f._set || f._null { - return nil - } - return f._value -} - -func (ApiKey_PartnerId_Field) _Column() string { return "partner_id" } - type ApiKey_UserAgent_Field struct { _set bool _null bool @@ -10874,7 +10728,6 @@ type BucketMetainfo struct { Id []byte ProjectId []byte Name []byte - PartnerId []byte UserAgent []byte PathCipher int CreatedAt time.Time @@ -10893,13 +10746,11 @@ type BucketMetainfo struct { func (BucketMetainfo) _Table() string { return "bucket_metainfos" } type BucketMetainfo_Create_Fields struct { - PartnerId BucketMetainfo_PartnerId_Field UserAgent BucketMetainfo_UserAgent_Field Placement BucketMetainfo_Placement_Field } type BucketMetainfo_Update_Fields struct { - PartnerId BucketMetainfo_PartnerId_Field UserAgent BucketMetainfo_UserAgent_Field DefaultSegmentSize BucketMetainfo_DefaultSegmentSize_Field DefaultEncryptionCipherSuite BucketMetainfo_DefaultEncryptionCipherSuite_Field @@ -10970,38 +10821,6 @@ func (f BucketMetainfo_Name_Field) value() interface{} { func (BucketMetainfo_Name_Field) _Column() string { return "name" } -type BucketMetainfo_PartnerId_Field struct { - _set bool - _null bool - _value []byte -} - -func BucketMetainfo_PartnerId(v []byte) BucketMetainfo_PartnerId_Field { - return BucketMetainfo_PartnerId_Field{_set: true, _value: v} -} - -func BucketMetainfo_PartnerId_Raw(v []byte) BucketMetainfo_PartnerId_Field { - if v == nil { - return BucketMetainfo_PartnerId_Null() - } - return BucketMetainfo_PartnerId(v) -} - -func BucketMetainfo_PartnerId_Null() BucketMetainfo_PartnerId_Field { - return BucketMetainfo_PartnerId_Field{_set: true, _null: true} -} - -func (f BucketMetainfo_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } - -func (f BucketMetainfo_PartnerId_Field) value() interface{} { - if !f._set || f._null { - return nil - } - return f._value -} - -func (BucketMetainfo_PartnerId_Field) _Column() string { return "partner_id" } - type BucketMetainfo_UserAgent_Field struct { _set bool _null bool @@ -12837,20 +12656,19 @@ func (obj *pgxImpl) Create_Project(ctx context.Context, __rate_limit_val := optional.RateLimit.value() __burst_limit_val := optional.BurstLimit.value() __max_buckets_val := optional.MaxBuckets.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __owner_id_val := project_owner_id.value() __salt_val := optional.Salt.value() __created_at_val := __now - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, public_id, name, description, usage_limit, bandwidth_limit, user_specified_usage_limit, user_specified_bandwidth_limit, rate_limit, burst_limit, max_buckets, partner_id, user_agent, owner_id, salt, created_at")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, public_id, name, description, usage_limit, bandwidth_limit, user_specified_usage_limit, user_specified_bandwidth_limit, rate_limit, burst_limit, max_buckets, user_agent, owner_id, salt, created_at")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} var __values []interface{} - __values = append(__values, __id_val, __public_id_val, __name_val, __description_val, __usage_limit_val, __bandwidth_limit_val, __user_specified_usage_limit_val, __user_specified_bandwidth_limit_val, __rate_limit_val, __burst_limit_val, __max_buckets_val, __partner_id_val, __user_agent_val, __owner_id_val, __salt_val, __created_at_val) + __values = append(__values, __id_val, __public_id_val, __name_val, __description_val, __usage_limit_val, __bandwidth_limit_val, __user_specified_usage_limit_val, __user_specified_bandwidth_limit_val, __rate_limit_val, __burst_limit_val, __max_buckets_val, __user_agent_val, __owner_id_val, __salt_val, __created_at_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -12873,7 +12691,7 @@ func (obj *pgxImpl) Create_Project(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -12955,20 +12773,19 @@ func (obj *pgxImpl) Create_ApiKey(ctx context.Context, __head_val := api_key_head.value() __name_val := api_key_name.value() __secret_val := api_key_secret.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, user_agent, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, user_agent, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at") var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __user_agent_val, __created_at_val) + __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __user_agent_val, __created_at_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.UserAgent, &api_key.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.UserAgent, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -12998,7 +12815,6 @@ func (obj *pgxImpl) Create_BucketMetainfo(ctx context.Context, __id_val := bucket_metainfo_id.value() __project_id_val := bucket_metainfo_project_id.value() __name_val := bucket_metainfo_name.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __path_cipher_val := bucket_metainfo_path_cipher.value() __created_at_val := __now @@ -13013,16 +12829,16 @@ func (obj *pgxImpl) Create_BucketMetainfo(ctx context.Context, __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() __placement_val := optional.Placement.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement") var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __name_val, __partner_id_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val) + __values = append(__values, __id_val, __project_id_val, __name_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return nil, obj.makeErr(err) } @@ -13040,20 +12856,19 @@ func (obj *pgxImpl) Create_ValueAttribution(ctx context.Context, __now := obj.db.Hooks.Now().UTC() __project_id_val := value_attribution_project_id.value() __bucket_name_val := value_attribution_bucket_name.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __last_updated_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, user_agent, last_updated ) VALUES ( ?, ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.user_agent, value_attributions.last_updated") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, user_agent, last_updated ) VALUES ( ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.user_agent, value_attributions.last_updated") var __values []interface{} - __values = append(__values, __project_id_val, __bucket_name_val, __partner_id_val, __user_agent_val, __last_updated_val) + __values = append(__values, __project_id_val, __bucket_name_val, __user_agent_val, __last_updated_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) value_attribution = &ValueAttribution{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.UserAgent, &value_attribution.LastUpdated) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.UserAgent, &value_attribution.LastUpdated) if err != nil { return nil, obj.makeErr(err) } @@ -13079,7 +12894,6 @@ func (obj *pgxImpl) Create_User(ctx context.Context, __short_name_val := optional.ShortName.value() __password_hash_val := user_password_hash.value() __status_val := int(0) - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __created_at_val := __now __position_val := optional.Position.value() @@ -13094,14 +12908,14 @@ func (obj *pgxImpl) Create_User(ctx context.Context, __login_lockout_expiration_val := optional.LoginLockoutExpiration.value() __signup_captcha_val := optional.SignupCaptcha.value() - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, user_agent, created_at, position, company_name, company_size, working_on, employee_count, mfa_secret_key, mfa_recovery_codes, signup_promo_code, failed_login_count, login_lockout_expiration, signup_captcha")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, user_agent, created_at, position, company_name, company_size, working_on, employee_count, mfa_secret_key, mfa_recovery_codes, signup_promo_code, failed_login_count, login_lockout_expiration, signup_captcha")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} var __values []interface{} - __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __user_agent_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val, __mfa_secret_key_val, __mfa_recovery_codes_val, __signup_promo_code_val, __failed_login_count_val, __login_lockout_expiration_val, __signup_captcha_val) + __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __user_agent_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val, __mfa_secret_key_val, __mfa_recovery_codes_val, __signup_promo_code_val, __failed_login_count_val, __login_lockout_expiration_val, __signup_captcha_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -13172,7 +12986,7 @@ func (obj *pgxImpl) Create_User(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return nil, obj.makeErr(err) } @@ -15305,7 +15119,7 @@ func (obj *pgxImpl) Get_Project_By_PublicId(ctx context.Context, var __cond_0 = &__sqlbundle_Condition{Left: "projects.public_id", Equal: true, Right: "?", Null: true} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE "), __cond_0, __sqlbundle_Literal(" LIMIT 2")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE "), __cond_0, __sqlbundle_Literal(" LIMIT 2")}} var __values []interface{} if !project_public_id.isnull() { @@ -15332,7 +15146,7 @@ func (obj *pgxImpl) Get_Project_By_PublicId(ctx context.Context, } project = &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -15366,7 +15180,7 @@ func (obj *pgxImpl) Get_Project_By_Id(ctx context.Context, project *Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.id = ?") var __values []interface{} __values = append(__values, project_id.value()) @@ -15375,7 +15189,7 @@ func (obj *pgxImpl) Get_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return (*Project)(nil), obj.makeErr(err) } @@ -15541,7 +15355,7 @@ func (obj *pgxImpl) All_Project(ctx context.Context) ( rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects") var __values []interface{} @@ -15558,7 +15372,7 @@ func (obj *pgxImpl) All_Project(ctx context.Context) ( for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -15585,7 +15399,7 @@ func (obj *pgxImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx cont rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") var __values []interface{} __values = append(__values, project_created_at_less.value()) @@ -15603,7 +15417,7 @@ func (obj *pgxImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx cont for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -15630,7 +15444,7 @@ func (obj *pgxImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Con rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at") var __values []interface{} __values = append(__values, project_owner_id.value()) @@ -15648,7 +15462,7 @@ func (obj *pgxImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Con for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -15675,7 +15489,7 @@ func (obj *pgxImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Na rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") var __values []interface{} __values = append(__values, project_member_member_id.value()) @@ -15693,7 +15507,7 @@ func (obj *pgxImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Na for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -15721,7 +15535,7 @@ func (obj *pgxImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, project_created_at_less.value()) @@ -15741,7 +15555,7 @@ func (obj *pgxImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -15927,7 +15741,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx context.Context row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") var __values []interface{} __values = append(__values, api_key_id.value()) @@ -15936,7 +15750,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx context.Context obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.PartnerId, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -15949,7 +15763,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Head(ctx context.Conte row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") var __values []interface{} __values = append(__values, api_key_head.value()) @@ -15958,7 +15772,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Head(ctx context.Conte obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.PartnerId, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -15972,7 +15786,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiKey_Projec row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") var __values []interface{} __values = append(__values, api_key_name.value(), api_key_project_id.value()) @@ -15981,7 +15795,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiKey_Projec obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.PartnerId, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -15995,7 +15809,7 @@ func (obj *pgxImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context bucket_metainfo *BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) @@ -16004,7 +15818,7 @@ func (obj *pgxImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return (*BucketMetainfo)(nil), obj.makeErr(err) } @@ -16133,7 +15947,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_ rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) @@ -16153,7 +15967,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_ for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return nil, err } @@ -16183,7 +15997,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value()) @@ -16203,7 +16017,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return nil, err } @@ -16254,7 +16068,7 @@ func (obj *pgxImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context value_attribution *ValueAttribution, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.user_agent, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.user_agent, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?") var __values []interface{} __values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value()) @@ -16263,7 +16077,7 @@ func (obj *pgxImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context obj.logStmt(__stmt, __values...) value_attribution = &ValueAttribution{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.UserAgent, &value_attribution.LastUpdated) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.UserAgent, &value_attribution.LastUpdated) if err != nil { return (*ValueAttribution)(nil), obj.makeErr(err) } @@ -16276,7 +16090,7 @@ func (obj *pgxImpl) All_User_By_NormalizedEmail(ctx context.Context, rows []*User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ?") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -16294,7 +16108,7 @@ func (obj *pgxImpl) All_User_By_NormalizedEmail(ctx context.Context, for __rows.Next() { user := &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return nil, err } @@ -16321,7 +16135,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -16345,7 +16159,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return nil, err } @@ -16379,7 +16193,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context, user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -16388,7 +16202,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return (*User)(nil), obj.makeErr(err) } @@ -18214,7 +18028,7 @@ func (obj *pgxImpl) Update_Project_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -18283,7 +18097,7 @@ func (obj *pgxImpl) Update_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -18338,17 +18152,12 @@ func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Cont defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} var __args []interface{} - if update.PartnerId._set { - __values = append(__values, update.PartnerId.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?")) - } - if update.UserAgent._set { __values = append(__values, update.UserAgent.value()) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("user_agent = ?")) @@ -18417,7 +18226,7 @@ func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Cont obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err == sql.ErrNoRows { return nil, nil } @@ -18434,7 +18243,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -18578,7 +18387,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err == sql.ErrNoRows { return nil, nil } @@ -20660,20 +20469,19 @@ func (obj *pgxcockroachImpl) Create_Project(ctx context.Context, __rate_limit_val := optional.RateLimit.value() __burst_limit_val := optional.BurstLimit.value() __max_buckets_val := optional.MaxBuckets.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __owner_id_val := project_owner_id.value() __salt_val := optional.Salt.value() __created_at_val := __now - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, public_id, name, description, usage_limit, bandwidth_limit, user_specified_usage_limit, user_specified_bandwidth_limit, rate_limit, burst_limit, max_buckets, partner_id, user_agent, owner_id, salt, created_at")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, public_id, name, description, usage_limit, bandwidth_limit, user_specified_usage_limit, user_specified_bandwidth_limit, rate_limit, burst_limit, max_buckets, user_agent, owner_id, salt, created_at")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO projects "), __clause, __sqlbundle_Literal(" RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} var __values []interface{} - __values = append(__values, __id_val, __public_id_val, __name_val, __description_val, __usage_limit_val, __bandwidth_limit_val, __user_specified_usage_limit_val, __user_specified_bandwidth_limit_val, __rate_limit_val, __burst_limit_val, __max_buckets_val, __partner_id_val, __user_agent_val, __owner_id_val, __salt_val, __created_at_val) + __values = append(__values, __id_val, __public_id_val, __name_val, __description_val, __usage_limit_val, __bandwidth_limit_val, __user_specified_usage_limit_val, __user_specified_bandwidth_limit_val, __rate_limit_val, __burst_limit_val, __max_buckets_val, __user_agent_val, __owner_id_val, __salt_val, __created_at_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -20696,7 +20504,7 @@ func (obj *pgxcockroachImpl) Create_Project(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -20778,20 +20586,19 @@ func (obj *pgxcockroachImpl) Create_ApiKey(ctx context.Context, __head_val := api_key_head.value() __name_val := api_key_name.value() __secret_val := api_key_secret.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, user_agent, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, user_agent, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at") var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __user_agent_val, __created_at_val) + __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __user_agent_val, __created_at_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.UserAgent, &api_key.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.UserAgent, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -20821,7 +20628,6 @@ func (obj *pgxcockroachImpl) Create_BucketMetainfo(ctx context.Context, __id_val := bucket_metainfo_id.value() __project_id_val := bucket_metainfo_project_id.value() __name_val := bucket_metainfo_name.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __path_cipher_val := bucket_metainfo_path_cipher.value() __created_at_val := __now @@ -20836,16 +20642,16 @@ func (obj *pgxcockroachImpl) Create_BucketMetainfo(ctx context.Context, __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() __placement_val := optional.Placement.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement") var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __name_val, __partner_id_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val) + __values = append(__values, __id_val, __project_id_val, __name_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return nil, obj.makeErr(err) } @@ -20863,20 +20669,19 @@ func (obj *pgxcockroachImpl) Create_ValueAttribution(ctx context.Context, __now := obj.db.Hooks.Now().UTC() __project_id_val := value_attribution_project_id.value() __bucket_name_val := value_attribution_bucket_name.value() - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __last_updated_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, user_agent, last_updated ) VALUES ( ?, ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.user_agent, value_attributions.last_updated") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, user_agent, last_updated ) VALUES ( ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.user_agent, value_attributions.last_updated") var __values []interface{} - __values = append(__values, __project_id_val, __bucket_name_val, __partner_id_val, __user_agent_val, __last_updated_val) + __values = append(__values, __project_id_val, __bucket_name_val, __user_agent_val, __last_updated_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) value_attribution = &ValueAttribution{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.UserAgent, &value_attribution.LastUpdated) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.UserAgent, &value_attribution.LastUpdated) if err != nil { return nil, obj.makeErr(err) } @@ -20902,7 +20707,6 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, __short_name_val := optional.ShortName.value() __password_hash_val := user_password_hash.value() __status_val := int(0) - __partner_id_val := optional.PartnerId.value() __user_agent_val := optional.UserAgent.value() __created_at_val := __now __position_val := optional.Position.value() @@ -20917,14 +20721,14 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, __login_lockout_expiration_val := optional.LoginLockoutExpiration.value() __signup_captcha_val := optional.SignupCaptcha.value() - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, user_agent, created_at, position, company_name, company_size, working_on, employee_count, mfa_secret_key, mfa_recovery_codes, signup_promo_code, failed_login_count, login_lockout_expiration, signup_captcha")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, user_agent, created_at, position, company_name, company_size, working_on, employee_count, mfa_secret_key, mfa_recovery_codes, signup_promo_code, failed_login_count, login_lockout_expiration, signup_captcha")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} var __values []interface{} - __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __user_agent_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val, __mfa_secret_key_val, __mfa_recovery_codes_val, __signup_promo_code_val, __failed_login_count_val, __login_lockout_expiration_val, __signup_captcha_val) + __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __user_agent_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val, __mfa_secret_key_val, __mfa_recovery_codes_val, __signup_promo_code_val, __failed_login_count_val, __login_lockout_expiration_val, __signup_captcha_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -20995,7 +20799,7 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return nil, obj.makeErr(err) } @@ -23128,7 +22932,7 @@ func (obj *pgxcockroachImpl) Get_Project_By_PublicId(ctx context.Context, var __cond_0 = &__sqlbundle_Condition{Left: "projects.public_id", Equal: true, Right: "?", Null: true} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE "), __cond_0, __sqlbundle_Literal(" LIMIT 2")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE "), __cond_0, __sqlbundle_Literal(" LIMIT 2")}} var __values []interface{} if !project_public_id.isnull() { @@ -23155,7 +22959,7 @@ func (obj *pgxcockroachImpl) Get_Project_By_PublicId(ctx context.Context, } project = &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -23189,7 +22993,7 @@ func (obj *pgxcockroachImpl) Get_Project_By_Id(ctx context.Context, project *Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.id = ?") var __values []interface{} __values = append(__values, project_id.value()) @@ -23198,7 +23002,7 @@ func (obj *pgxcockroachImpl) Get_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return (*Project)(nil), obj.makeErr(err) } @@ -23364,7 +23168,7 @@ func (obj *pgxcockroachImpl) All_Project(ctx context.Context) ( rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects") var __values []interface{} @@ -23381,7 +23185,7 @@ func (obj *pgxcockroachImpl) All_Project(ctx context.Context) ( for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -23408,7 +23212,7 @@ func (obj *pgxcockroachImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") var __values []interface{} __values = append(__values, project_created_at_less.value()) @@ -23426,7 +23230,7 @@ func (obj *pgxcockroachImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -23453,7 +23257,7 @@ func (obj *pgxcockroachImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx co rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at") var __values []interface{} __values = append(__values, project_owner_id.value()) @@ -23471,7 +23275,7 @@ func (obj *pgxcockroachImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx co for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -23498,7 +23302,7 @@ func (obj *pgxcockroachImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_P rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") var __values []interface{} __values = append(__values, project_member_member_id.value()) @@ -23516,7 +23320,7 @@ func (obj *pgxcockroachImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_P for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -23544,7 +23348,7 @@ func (obj *pgxcockroachImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_Creat rows []*Project, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, project_created_at_less.value()) @@ -23564,7 +23368,7 @@ func (obj *pgxcockroachImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_Creat for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err != nil { return nil, err } @@ -23750,7 +23554,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx contex row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") var __values []interface{} __values = append(__values, api_key_id.value()) @@ -23759,7 +23563,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx contex obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.PartnerId, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -23772,7 +23576,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Head(ctx cont row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") var __values []interface{} __values = append(__values, api_key_head.value()) @@ -23781,7 +23585,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Head(ctx cont obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.PartnerId, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -23795,7 +23599,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiK row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") var __values []interface{} __values = append(__values, api_key_name.value(), api_key_project_id.value()) @@ -23804,7 +23608,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiK obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.PartnerId, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -23818,7 +23622,7 @@ func (obj *pgxcockroachImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx contex bucket_metainfo *BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) @@ -23827,7 +23631,7 @@ func (obj *pgxcockroachImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx contex obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return (*BucketMetainfo)(nil), obj.makeErr(err) } @@ -23956,7 +23760,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) @@ -23976,7 +23780,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return nil, err } @@ -24006,7 +23810,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value()) @@ -24026,7 +23830,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err != nil { return nil, err } @@ -24077,7 +23881,7 @@ func (obj *pgxcockroachImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ct value_attribution *ValueAttribution, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.user_agent, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.user_agent, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?") var __values []interface{} __values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value()) @@ -24086,7 +23890,7 @@ func (obj *pgxcockroachImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ct obj.logStmt(__stmt, __values...) value_attribution = &ValueAttribution{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.UserAgent, &value_attribution.LastUpdated) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.UserAgent, &value_attribution.LastUpdated) if err != nil { return (*ValueAttribution)(nil), obj.makeErr(err) } @@ -24099,7 +23903,7 @@ func (obj *pgxcockroachImpl) All_User_By_NormalizedEmail(ctx context.Context, rows []*User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ?") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -24117,7 +23921,7 @@ func (obj *pgxcockroachImpl) All_User_By_NormalizedEmail(ctx context.Context, for __rows.Next() { user := &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return nil, err } @@ -24144,7 +23948,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -24168,7 +23972,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return nil, err } @@ -24202,7 +24006,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context, user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -24211,7 +24015,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err != nil { return (*User)(nil), obj.makeErr(err) } @@ -26037,7 +25841,7 @@ func (obj *pgxcockroachImpl) Update_Project_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.partner_id, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.public_id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.user_specified_usage_limit, projects.user_specified_bandwidth_limit, projects.segment_limit, projects.rate_limit, projects.burst_limit, projects.max_buckets, projects.user_agent, projects.owner_id, projects.salt, projects.created_at")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -26106,7 +25910,7 @@ func (obj *pgxcockroachImpl) Update_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.PartnerId, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.PublicId, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.UserSpecifiedUsageLimit, &project.UserSpecifiedBandwidthLimit, &project.SegmentLimit, &project.RateLimit, &project.BurstLimit, &project.MaxBuckets, &project.UserAgent, &project.OwnerId, &project.Salt, &project.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -26161,17 +25965,12 @@ func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx con defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} var __args []interface{} - if update.PartnerId._set { - __values = append(__values, update.PartnerId.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?")) - } - if update.UserAgent._set { __values = append(__values, update.UserAgent.value()) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("user_agent = ?")) @@ -26240,7 +26039,7 @@ func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx con obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) if err == sql.ErrNoRows { return nil, nil } @@ -26257,7 +26056,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.user_agent, users.created_at, users.project_limit, users.project_bandwidth_limit, users.project_storage_limit, users.project_segment_limit, users.paid_tier, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count, users.have_sales_contact, users.mfa_enabled, users.mfa_secret_key, users.mfa_recovery_codes, users.signup_promo_code, users.verification_reminders, users.failed_login_count, users.login_lockout_expiration, users.signup_captcha")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -26401,7 +26200,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.UserAgent, &user.CreatedAt, &user.ProjectLimit, &user.ProjectBandwidthLimit, &user.ProjectStorageLimit, &user.ProjectSegmentLimit, &user.PaidTier, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount, &user.HaveSalesContact, &user.MfaEnabled, &user.MfaSecretKey, &user.MfaRecoveryCodes, &user.SignupPromoCode, &user.VerificationReminders, &user.FailedLoginCount, &user.LoginLockoutExpiration, &user.SignupCaptcha) if err == sql.ErrNoRows { return nil, nil } diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql index 42d14d519..14cf885e0 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql @@ -227,7 +227,6 @@ CREATE TABLE projects ( rate_limit integer, burst_limit integer, max_buckets integer, - partner_id bytea, user_agent bytea, owner_id bytea NOT NULL, salt bytea, @@ -437,7 +436,6 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, @@ -474,7 +472,6 @@ CREATE TABLE user_settings ( CREATE TABLE value_attributions ( project_id bytea NOT NULL, bucket_name bytea NOT NULL, - partner_id bytea, user_agent bytea, last_updated timestamp with time zone NOT NULL, PRIMARY KEY ( project_id, bucket_name ) @@ -502,7 +499,6 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), @@ -513,7 +509,6 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, - partner_id bytea, user_agent bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql index 42d14d519..14cf885e0 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql @@ -227,7 +227,6 @@ CREATE TABLE projects ( rate_limit integer, burst_limit integer, max_buckets integer, - partner_id bytea, user_agent bytea, owner_id bytea NOT NULL, salt bytea, @@ -437,7 +436,6 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, @@ -474,7 +472,6 @@ CREATE TABLE user_settings ( CREATE TABLE value_attributions ( project_id bytea NOT NULL, bucket_name bytea NOT NULL, - partner_id bytea, user_agent bytea, last_updated timestamp with time zone NOT NULL, PRIMARY KEY ( project_id, bucket_name ) @@ -502,7 +499,6 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), @@ -513,7 +509,6 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, - partner_id bytea, user_agent bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, diff --git a/satellite/satellitedb/dbx/user.dbx b/satellite/satellitedb/dbx/user.dbx index c9e0c65af..a3e067d7d 100644 --- a/satellite/satellitedb/dbx/user.dbx +++ b/satellite/satellitedb/dbx/user.dbx @@ -25,10 +25,6 @@ model user ( // status indicates whether the user is inactive=0, active=1, or deleted=2. See console.UserStatus for details. field status int ( updatable, autoinsert ) - // partner_id is an UUID that refers to rewards.PartnersStaticDB. - // deprecated: use user_agent instead. - // note: this field is duplicated in value_attribution.project_id. - field partner_id blob ( nullable ) // user_agent contains the partner parameter from registration. field user_agent blob ( nullable ) // created_at indicates when the user was created. diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index d515ac2dd..c3dc5481a 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -2363,6 +2363,18 @@ func (db *satelliteDB) ProductionMigration() *migrate.Migration { `ALTER TABLE project_invitations ADD COLUMN inviter_id bytea REFERENCES users( id ) ON DELETE SET NULL;`, }, }, + { + DB: &db.migrationDB, + Description: "drop partner_id columns", + Version: 236, + Action: migrate.SQL{ + `ALTER TABLE projects DROP COLUMN partner_id;`, + `ALTER TABLE users DROP COLUMN partner_id;`, + `ALTER TABLE api_keys DROP COLUMN partner_id;`, + `ALTER TABLE bucket_metainfos DROP COLUMN partner_id;`, + `ALTER TABLE value_attributions DROP COLUMN partner_id;`, + }, + }, // NB: after updating testdata in `testdata`, run // `go generate` to update `migratez.go`. }, diff --git a/satellite/satellitedb/migratez.go b/satellite/satellitedb/migratez.go index 009c94c27..79802c869 100755 --- a/satellite/satellitedb/migratez.go +++ b/satellite/satellitedb/migratez.go @@ -13,7 +13,7 @@ func (db *satelliteDB) testMigration() *migrate.Migration { { DB: &db.migrationDB, Description: "Testing setup", - Version: 235, + Version: 236, Action: migrate.SQL{`-- AUTOGENERATED BY storj.io/dbx -- DO NOT EDIT CREATE TABLE account_freeze_events ( @@ -243,7 +243,6 @@ CREATE TABLE projects ( rate_limit integer, burst_limit integer, max_buckets integer, - partner_id bytea, user_agent bytea, owner_id bytea NOT NULL, salt bytea, @@ -453,7 +452,6 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, @@ -490,7 +488,6 @@ CREATE TABLE user_settings ( CREATE TABLE value_attributions ( project_id bytea NOT NULL, bucket_name bytea NOT NULL, - partner_id bytea, user_agent bytea, last_updated timestamp with time zone NOT NULL, PRIMARY KEY ( project_id, bucket_name ) @@ -518,7 +515,6 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, - partner_id bytea, user_agent bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), @@ -529,7 +525,6 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, - partner_id bytea, user_agent bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, diff --git a/satellite/satellitedb/testdata/postgres.v236.sql b/satellite/satellitedb/testdata/postgres.v236.sql new file mode 100644 index 000000000..fad5a8b15 --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v236.sql @@ -0,0 +1,756 @@ +-- AUTOGENERATED BY storj.io/dbx +-- DO NOT EDIT +CREATE TABLE account_freeze_events ( + user_id bytea NOT NULL, + event integer NOT NULL, + limits jsonb, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + PRIMARY KEY ( user_id, event ) +); +CREATE TABLE accounting_rollups ( + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + interval_end_time timestamp with time zone, + PRIMARY KEY ( node_id, start_time ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE billing_balances ( + user_id bytea NOT NULL, + balance bigint NOT NULL, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ) +); +CREATE TABLE billing_transactions ( + id bigserial NOT NULL, + user_id bytea NOT NULL, + amount bigint NOT NULL, + currency text NOT NULL, + description text NOT NULL, + source text NOT NULL, + status text NOT NULL, + type text NOT NULL, + metadata jsonb NOT NULL, + timestamp timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( project_id, bucket_name, interval_start, action ) +); +CREATE TABLE bucket_bandwidth_rollup_archives ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + total_bytes bigint NOT NULL DEFAULT 0, + inline bigint NOT NULL, + remote bigint NOT NULL, + total_segments_count integer NOT NULL DEFAULT 0, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE coinpayments_transactions ( + id text NOT NULL, + user_id bytea NOT NULL, + address text NOT NULL, + amount_numeric bigint NOT NULL, + received_numeric bigint NOT NULL, + status integer NOT NULL, + key text NOT NULL, + timeout integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE graceful_exit_progress ( + node_id bytea NOT NULL, + bytes_transferred bigint NOT NULL, + pieces_transferred bigint NOT NULL DEFAULT 0, + pieces_failed bigint NOT NULL DEFAULT 0, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE graceful_exit_segment_transfer_queue ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_num integer NOT NULL, + root_piece_id bytea, + durability_ratio double precision NOT NULL, + queued_at timestamp with time zone NOT NULL, + requested_at timestamp with time zone, + last_failed_at timestamp with time zone, + last_failed_code integer, + failed_count integer, + finished_at timestamp with time zone, + order_limit_send_count integer NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, stream_id, position, piece_num ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL DEFAULT '', + last_net text NOT NULL, + last_ip_port text, + country_code text, + protocol integer NOT NULL DEFAULT 0, + type integer NOT NULL DEFAULT 0, + email text NOT NULL, + wallet text NOT NULL, + wallet_features text NOT NULL DEFAULT '', + free_disk bigint NOT NULL DEFAULT -1, + piece_count bigint NOT NULL DEFAULT 0, + major bigint NOT NULL DEFAULT 0, + minor bigint NOT NULL DEFAULT 0, + patch bigint NOT NULL DEFAULT 0, + hash text NOT NULL DEFAULT '', + timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00', + release boolean NOT NULL DEFAULT false, + latency_90 bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch', + last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch', + disqualified timestamp with time zone, + disqualification_reason integer, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + exit_initiated_at timestamp with time zone, + exit_loop_completed_at timestamp with time zone, + exit_finished_at timestamp with time zone, + exit_success boolean NOT NULL DEFAULT false, + contained timestamp with time zone, + last_offline_email timestamp with time zone, + last_software_update_email timestamp with time zone, + noise_proto int, + noise_public_key bytea, + debounce_limit int NOT NULL DEFAULT 0, + PRIMARY KEY ( id ) +); +CREATE TABLE node_events ( + id bytea NOT NULL, + email text NOT NULL, + node_id bytea NOT NULL, + event integer NOT NULL, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_attempted timestamp with time zone, + email_sent timestamp with time zone, + PRIMARY KEY ( id ) +); +CREATE TABLE node_api_versions ( + id bytea NOT NULL, + api_version integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE oauth_clients ( + id bytea NOT NULL, + encrypted_secret bytea NOT NULL, + redirect_url text NOT NULL, + user_id bytea NOT NULL, + app_name text NOT NULL, + app_logo_url text NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE oauth_codes ( + client_id bytea NOT NULL, + user_id bytea NOT NULL, + scope text NOT NULL, + redirect_url text NOT NULL, + challenge text NOT NULL, + challenge_method text NOT NULL, + code text NOT NULL, + created_at timestamp with time zone NOT NULL, + expires_at timestamp with time zone NOT NULL, + claimed_at timestamp with time zone, + PRIMARY KEY ( code ) +); +CREATE TABLE oauth_tokens ( + client_id bytea NOT NULL, + user_id bytea NOT NULL, + scope text NOT NULL, + kind integer NOT NULL, + token bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( token ) +); +CREATE TABLE peer_identities ( + node_id bytea NOT NULL, + leaf_serial_number bytea NOT NULL, + chain bytea NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + public_id bytea, + name text NOT NULL, + description text NOT NULL, + usage_limit bigint, + bandwidth_limit bigint, + user_specified_usage_limit bigint, + user_specified_bandwidth_limit bigint, + segment_limit bigint DEFAULT 1000000, + rate_limit integer, + burst_limit integer, + max_buckets integer, + user_agent bytea, + owner_id bytea NOT NULL, + salt bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE project_bandwidth_daily_rollups ( + project_id bytea NOT NULL, + interval_day date NOT NULL, + egress_allocated bigint NOT NULL, + egress_settled bigint NOT NULL, + egress_dead bigint NOT NULL DEFAULT 0, + PRIMARY KEY ( project_id, interval_day ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE repair_queue ( + stream_id bytea NOT NULL, + position bigint NOT NULL, + attempted_at timestamp with time zone, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + segment_health double precision NOT NULL DEFAULT 1, + PRIMARY KEY ( stream_id, position ) +); +CREATE TABLE reputations ( + id bytea NOT NULL, + audit_success_count bigint NOT NULL DEFAULT 0, + total_audit_count bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + disqualified timestamp with time zone, + disqualification_reason integer, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + online_score double precision NOT NULL DEFAULT 1, + audit_history bytea NOT NULL, + audit_reputation_alpha double precision NOT NULL DEFAULT 1, + audit_reputation_beta double precision NOT NULL DEFAULT 0, + unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1, + unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0, + PRIMARY KEY ( id ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE reverification_audits ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_num integer NOT NULL, + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_attempt timestamp with time zone, + reverify_count bigint NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, stream_id, position ) +); +CREATE TABLE revocations ( + revoked bytea NOT NULL, + api_key_id bytea NOT NULL, + PRIMARY KEY ( revoked ) +); +CREATE TABLE segment_pending_audits ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollup_archives ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollups_phase2 ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_payments ( + id bigserial NOT NULL, + created_at timestamp with time zone NOT NULL, + node_id bytea NOT NULL, + period text NOT NULL, + amount bigint NOT NULL, + receipt text, + notes text, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_paystubs ( + period text NOT NULL, + node_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + codes text NOT NULL, + usage_at_rest double precision NOT NULL, + usage_get bigint NOT NULL, + usage_put bigint NOT NULL, + usage_get_repair bigint NOT NULL, + usage_put_repair bigint NOT NULL, + usage_get_audit bigint NOT NULL, + comp_at_rest bigint NOT NULL, + comp_get bigint NOT NULL, + comp_put bigint NOT NULL, + comp_get_repair bigint NOT NULL, + comp_put_repair bigint NOT NULL, + comp_get_audit bigint NOT NULL, + surge_percent bigint NOT NULL, + held bigint NOT NULL, + owed bigint NOT NULL, + disposed bigint NOT NULL, + paid bigint NOT NULL, + distributed bigint NOT NULL, + PRIMARY KEY ( period, node_id ) +); +CREATE TABLE storagenode_storage_tallies ( + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( interval_end_time, node_id ) +); +CREATE TABLE storjscan_payments ( + block_hash bytea NOT NULL, + block_number bigint NOT NULL, + transaction bytea NOT NULL, + log_index integer NOT NULL, + from_address bytea NOT NULL, + to_address bytea NOT NULL, + token_value bigint NOT NULL, + usd_value bigint NOT NULL, + status text NOT NULL, + timestamp timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( block_hash, log_index ) +); +CREATE TABLE storjscan_wallets ( + user_id bytea NOT NULL, + wallet_address bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id, wallet_address ) +); +CREATE TABLE stripe_customers ( + user_id bytea NOT NULL, + customer_id text NOT NULL, + package_plan text, + purchased_package_at timestamp with time zone, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ), + UNIQUE ( customer_id ) +); +CREATE TABLE stripecoinpayments_invoice_project_records ( + id bytea NOT NULL, + project_id bytea NOT NULL, + storage double precision NOT NULL, + egress bigint NOT NULL, + objects bigint, + segments bigint, + period_start timestamp with time zone NOT NULL, + period_end timestamp with time zone NOT NULL, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( project_id, period_start, period_end ) +); +CREATE TABLE stripecoinpayments_tx_conversion_rates ( + tx_id text NOT NULL, + rate_numeric double precision NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + email text NOT NULL, + normalized_email text NOT NULL, + full_name text NOT NULL, + short_name text, + password_hash bytea NOT NULL, + status integer NOT NULL, + user_agent bytea, + created_at timestamp with time zone NOT NULL, + project_limit integer NOT NULL DEFAULT 0, + project_bandwidth_limit bigint NOT NULL DEFAULT 0, + project_storage_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, + paid_tier boolean NOT NULL DEFAULT false, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, + employee_count text, + have_sales_contact boolean NOT NULL DEFAULT false, + mfa_enabled boolean NOT NULL DEFAULT false, + mfa_secret_key text, + mfa_recovery_codes text, + signup_promo_code text, + verification_reminders integer NOT NULL DEFAULT 0, + failed_login_count integer, + login_lockout_expiration timestamp with time zone, + signup_captcha double precision, + PRIMARY KEY ( id ) +); +CREATE TABLE user_settings ( + user_id bytea NOT NULL, + session_minutes integer, + passphrase_prompt boolean, + onboarding_start boolean NOT NULL DEFAULT true, + onboarding_end boolean NOT NULL DEFAULT true, + onboarding_step text, + PRIMARY KEY ( user_id ) +); +CREATE TABLE value_attributions ( + project_id bytea NOT NULL, + bucket_name bytea NOT NULL, + user_agent bytea, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, bucket_name ) +); +CREATE TABLE verification_audits ( + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + stream_id bytea NOT NULL, + position bigint NOT NULL, + expires_at timestamp with time zone, + encrypted_size integer NOT NULL, + PRIMARY KEY ( inserted_at, stream_id, position ) +); +CREATE TABLE webapp_sessions ( + id bytea NOT NULL, + user_id bytea NOT NULL, + ip_address text NOT NULL, + user_agent text NOT NULL, + status integer NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + head bytea NOT NULL, + name text NOT NULL, + secret bytea NOT NULL, + user_agent bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( head ), + UNIQUE ( name, project_id ) +); +CREATE TABLE bucket_metainfos ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ), + name bytea NOT NULL, + user_agent bytea, + path_cipher integer NOT NULL, + created_at timestamp with time zone NOT NULL, + default_segment_size integer NOT NULL, + default_encryption_cipher_suite integer NOT NULL, + default_encryption_block_size integer NOT NULL, + default_redundancy_algorithm integer NOT NULL, + default_redundancy_share_size integer NOT NULL, + default_redundancy_required_shares integer NOT NULL, + default_redundancy_repair_shares integer NOT NULL, + default_redundancy_optimal_shares integer NOT NULL, + default_redundancy_total_shares integer NOT NULL, + placement integer, + PRIMARY KEY ( id ), + UNIQUE ( project_id, name ) +); +CREATE TABLE project_invitations ( + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + email text NOT NULL, + inviter_id bytea REFERENCES users( id ) ON DELETE SET NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, email ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE stripecoinpayments_apply_balance_intents ( + tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time ) ; +CREATE INDEX billing_transactions_timestamp_index ON billing_transactions ( timestamp ) ; +CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start ) ; +CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id ) ; +CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start ) ; +CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id ) ; +CREATE INDEX project_bandwidth_daily_rollup_interval_day_index ON project_bandwidth_daily_rollups ( interval_day ) ; +CREATE INDEX bucket_storage_tallies_project_id_interval_start_index ON bucket_storage_tallies ( project_id, interval_start ) ; +CREATE INDEX graceful_exit_segment_transfer_nid_dr_qa_fa_lfa_index ON graceful_exit_segment_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at ) ; +CREATE INDEX node_last_ip ON nodes ( last_net ) ; +CREATE INDEX nodes_dis_unk_off_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, offline_suspended, exit_finished_at, last_contact_success ) ; +CREATE INDEX nodes_type_last_cont_success_free_disk_ma_mi_patch_vetted_partial_index ON nodes ( type, last_contact_success, free_disk, major, minor, patch, vetted_at ) WHERE nodes.disqualified is NULL AND nodes.unknown_audit_suspended is NULL AND nodes.exit_initiated_at is NULL AND nodes.release = true AND nodes.last_net != '' ; +CREATE INDEX nodes_dis_unk_aud_exit_init_rel_type_last_cont_success_stored_index ON nodes ( disqualified, unknown_audit_suspended, exit_initiated_at, release, type, last_contact_success ) WHERE nodes.disqualified is NULL AND nodes.unknown_audit_suspended is NULL AND nodes.exit_initiated_at is NULL AND nodes.release = true ; +CREATE INDEX node_events_email_event_created_at_index ON node_events ( email, event, created_at ) WHERE node_events.email_sent is NULL ; +CREATE INDEX oauth_clients_user_id_index ON oauth_clients ( user_id ) ; +CREATE INDEX oauth_codes_user_id_index ON oauth_codes ( user_id ) ; +CREATE INDEX oauth_codes_client_id_index ON oauth_codes ( client_id ) ; +CREATE INDEX oauth_tokens_user_id_index ON oauth_tokens ( user_id ) ; +CREATE INDEX oauth_tokens_client_id_index ON oauth_tokens ( client_id ) ; +CREATE INDEX projects_public_id_index ON projects ( public_id ) ; +CREATE INDEX projects_owner_id_index ON projects ( owner_id ) ; +CREATE INDEX repair_queue_updated_at_index ON repair_queue ( updated_at ) ; +CREATE INDEX repair_queue_num_healthy_pieces_attempted_at_index ON repair_queue ( segment_health, attempted_at ) ; +CREATE INDEX reverification_audits_inserted_at_index ON reverification_audits ( inserted_at ) ; +CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start ) ; +CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start ) ; +CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period ) ; +CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id ) ; +CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id ) ; +CREATE INDEX storjscan_payments_block_number_log_index_index ON storjscan_payments ( block_number, log_index ) ; +CREATE INDEX storjscan_wallets_wallet_address_index ON storjscan_wallets ( wallet_address ) ; +CREATE INDEX webapp_sessions_user_id_index ON webapp_sessions ( user_id ) ; +CREATE INDEX users_email_status_index ON users ( normalized_email, status ) ; +CREATE INDEX project_invitations_project_id_index ON project_invitations ( project_id ) ; +CREATE INDEX project_invitations_email_index ON project_invitations ( email ) ; + +-- MAIN DATA -- + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "vetted_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, '2020-03-18 12:00:00.000000+00'); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NUll, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', false, 10, 50000000000, 50000000000, false, 150000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "employee_count", "project_limit", "project_bandwidth_limit", "project_storage_limit", "have_sales_contact", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\304\\313\\206\\311",'::bytea, 'Ian', 'Pires', '3email3@mail.test', '3EMAIL3@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-03-18 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 51, true, '1-50', 10, 50000000000, 50000000000, true, 150000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "employee_count", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\312",'::bytea, 'Campbell', 'Wright', '4email4@mail.test', '4EMAIL4@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-07-17 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 82, true, '1-50', 10, 50000000000, 50000000000, 150000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true, 10, 50000000000, 50000000000, false, false, NULL, NULL, 150000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00', 150000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00', 150000); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00'); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "value_attributions" ("project_id", "bucket_name", "user_agent", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, NULL, '2019-02-14 08:07:31.028103+00'); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10); + +INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00'); + +INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate_numeric", "created_at") VALUES ('tx_id', '1.929883831', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount_numeric", "received_numeric", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', 1411112222, 1311112222, 1, 'key', 60, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024); + +INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "owner_id", "created_at", "segment_limit") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, 2000000, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00', 150000); + +INSERT INTO "project_bandwidth_daily_rollups"("project_id", "interval_day", egress_allocated, egress_settled, egress_dead) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2021-04-22', 10000, 5000, 0); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "owner_id", "created_at", "segment_limit") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', 5e11, 5e11, NULL, 2000000, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00', 150000); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', 5e11, 5e11, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000, 150000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', 5e11, 5e11, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000, 150000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101, 150000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000); + +INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); + +INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 117); +INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117); + +INSERT INTO "reputations"("id", "audit_success_count", "total_audit_count", "created_at", "updated_at", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "online_score", "audit_history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', NULL, 1000, 0, 1, 0, 1, '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "graceful_exit_segment_transfer_queue" ("node_id", "stream_id", "position", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 10 , 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "segment_pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "stream_id", position) VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, '\x010101', 1); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\342U\\303\\312\\204",'::bytea, 'Noahson', 'William', '100email1@mail.test', '100EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', false, 10, 100000000000000, 25000000000000, true, 100000000); + +INSERT INTO "repair_queue" ("stream_id", "position", "attempted_at", "segment_health", "updated_at", "inserted_at") VALUES ('\x01', 1, null, 1, '2020-09-01 00:00:00.000000+00', '2021-09-01 00:00:00.000000+00'); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\303\\312\\204",'::bytea, 'Noahson William', '101email1@mail.test', '101EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6g7h8"]', 3, 50000000000, 50000000000, 150000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "burst_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\251\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, 4000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\303\\312\\205",'::bytea, 'Felicia Smith', '99email1@mail.test', '99EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000, 150000); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "segments", "period_start", "period_end", "state", "created_at") VALUES (E'\\300\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\300\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2021-02-14 08:07:31.028103+00', '2021-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, 'DE'); +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares", "placement") VALUES (E'\\144/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketotheruniquename'::bytea, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10, 1); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\017', '127.0.0.1:55517', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', '2021-10-13 08:07:31.108963+00', 0, false, NULL); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\267\\342U\\303\\312\\203",'::bytea, 'Jessica Thompson', '143email1@mail.test', '143EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-11-04 08:27:56.614594+00', true, 'mfa secret key', '["2b3c4d5e","f6a7e8e9"]', 'promo123', 3, '150000000000', '150000000000', 150000); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Heather Jackson', '762email@mail.test', '762EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-11-05 03:22:39.614594+00', true, 'mfa secret key', '["5e4d3c2b","e9e8a7f6"]', 'promo123', 3, '100000000000000', '25000000000000', 150000); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Michael Mint', '333email2@mail.test', '333EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-10-05 03:22:39.614594+00', true, 'mfa secret key', '["5e4d3c2c","e9e8a7f7"]', 'promo123', 3, '100000000000000', '25000000000000', 150000); + +INSERT INTO "oauth_clients"("id", "encrypted_secret", "redirect_url", "user_id", "app_name", "app_logo_url") VALUES (E'FD6209C0-7A17-4FC3-895C-E57A6C7CBBE1'::bytea, E'610B723B-E1FF-4B1D-B372-521250690C6E'::bytea, 'https://example.test/callback/storj', E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Example App', 'https://example.test/logo.png'); + +INSERT INTO "oauth_codes"("client_id", "user_id", "scope", "redirect_url", "challenge", "challenge_method", "code", "created_at", "expires_at", "claimed_at") VALUES (E'FD6209C0-7A17-4FC3-895C-E57A6C7CBBE1'::bytea, E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'scope', 'http://localhost:12345/callback', 'challenge', 'challenge method', 'plaintext code', '2021-12-05 03:22:39.614594+00', '2021-12-05 03:22:39.614594+00', '2021-12-05 03:22:39.614594+00'); + +INSERT INTO "oauth_tokens"("client_id", "user_id", "scope", "kind", "token", "created_at", "expires_at") VALUES (E'FD6209C0-7A17-4FC3-895C-E57A6C7CBBE1'::bytea, E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'scope', 1, E'B9C93D5F-CBD7-4615-9184-E714CFE14365'::bytea, '2021-12-05 03:22:39.614594+00', '2021-12-05 03:22:39.614594+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount_numeric", "received_numeric", "status", "key", "timeout", "created_at") VALUES ('different_tx_id_from_before', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', 125419938429, 1, 1, 'key', 60, '2021-07-28 20:24:11.932313-05'); +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate_numeric", "created_at") VALUES ('different_tx_id_from_before', 3.14159265359, '2021-07-28 20:24:11.932313-05'); + +INSERT INTO "webapp_sessions"("id", "user_id", "ip_address", "user_agent", "status", "expires_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '127.0.0.1', 'Firefox', 0, '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit", "verification_reminders") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\304\\312\\205",'::bytea, 'Felicia Smith', '1testemail1@mail.test', '1TESTEMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000, 150000, 1); + +INSERT INTO "reputations"("id", "audit_success_count", "total_audit_count", "created_at", "updated_at", "disqualified", "disqualification_reason", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "online_score", "audit_history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\002', 2, 5, '2022-04-20 04:20:59.028103+00', '2022-04-20 04:21:09.028103+00', '2022-04-20 04:22:09.028103+00', 3, 50, 0, 1, 0, 1, '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "storjscan_wallets" ("user_id", "wallet_address", "created_at") VALUES (E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, E'\\343\\301\\042w\\222\\263Ci\\245\\312U\\304\\312\\202",'::bytea, '2021-07-28 20:04:11.932313+00'); + +INSERT INTO "storjscan_payments" ("block_hash", "block_number", "transaction", "log_index", "from_address", "to_address", "token_value", "usd_value", "status", "timestamp", "created_at") VALUES (E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, 0, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, 0, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, 1, 1, 'example', '2022-04-20 04:22:09.028103+00', '2022-04-20 04:22:09.028103+00'); + +INSERT INTO "projects"("id", "public_id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "burst_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\347\\342\\363\\371>+F\\251\\247'::bytea, E'300\\273|\\342N\\347\\347\\363\\347\\363\\371>+F\\241\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, 4000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000); + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total", "interval_end_time") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-10 00:00:00+00', 2875, 5750, 8635, 11500, 0, 14375, '2019-02-10 23:00:00+00'); + +INSERT INTO "billing_transactions" ("id", "user_id", "amount", "currency", "description", "source", "status", "type", "metadata", "timestamp", "created_at") VALUES (1, E'\\363\\331\\032w\\212\\213Ci\\245\\322U\\314\\302\\202",'::bytea, 113219736213, 'usd', 'some_description', 'some_source', 'some_status', 'some_type', '{ "Wallet": "0x1234", "ReferenceID": "0987654321"}'::jsonb, '2021-07-28 19:14:11.932313+00', '2021-07-28 19:34:11.932323+00'); + +INSERT INTO "billing_balances" ("user_id", "balance", "last_updated") VALUES (E'\\363\\331\\032w\\222\\203Ci\\245\\312U\\304\\322\\212",'::bytea, 113219736213, '2021-07-28 19:34:11.932323+00'); + +INSERT INTO "projects"("id", "public_id", "name", "description", "usage_limit", "bandwidth_limit", "user_specified_usage_limit", "user_specified_bandwidth_limit", "rate_limit", "burst_limit", "owner_id", "created_at", "max_buckets", "segment_limit", "salt") VALUES (E'300\\273|\\342N\\347\\347\\347\\342\\363\\371>+F\\252\\247'::bytea, E'300\\273|\\342N\\347\\347\\363\\347\\363\\371>+F\\241\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, NULL, NULL, 2000000, 4000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000, E'300\\273|\\342N\\347\\347\\347\\342\\363\\371>+F\\252\\247'::bytea); + +INSERT INTO "users" ("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit", "verification_reminders", "signup_captcha") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\304\\312\\206",'::bytea, 'Harold Smith', '1testemail206@mail.test', '1TESTEMAIL206@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000, 150000, 1, 1); + +INSERT INTO "reverification_audits" ("node_id", "stream_id", "position", "piece_num", "inserted_at", "last_attempt", "reverify_count") VALUES (E'\\xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', E'\\x01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b', 1152921504606846976, 4, '2008-06-06 14:13:08.845574-07', '2009-08-23 02:19:52.922832-07', 5); + +INSERT INTO "node_events" ("id", "email", "node_id", "event", "created_at", "email_sent") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\017', 'test@storj.test', E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:28:24.614594+00', '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "verification_audits" ("inserted_at", "stream_id", "position", "expires_at", "encrypted_size") VALUES ('2022-10-31 00:00:00.000000+00', E'\\xb5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c', 42949672970, NULL, 2147483647); +INSERT INTO "verification_audits" ("inserted_at", "stream_id", "position", "expires_at", "encrypted_size") VALUES ('2022-10-31 00:01:00.000000+00', E'\\x6e96e45029870a9b08cff2ed6ac840ccde3edce244327cc1bddefa1e555bc81f', 450971566185, '2023-01-01 23:59:59.999999+13', 12); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "contained") VALUES (E'\\342\\341\\363\\342>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, '2022-06-14 05:07:31.108963+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code", "last_offline_email") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\345\\017', '127.0.0.1:55517', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', '2021-10-13 08:07:31.108963+00', 0, false, NULL, '2021-10-13 08:07:31.108963+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code", "last_software_update_email") VALUES (E'\\362\\341\\363\\371>+F\\256\\262\\300\\273|\\342N\\347\\017', '127.0.0.1:55517', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', '2021-10-13 08:07:31.108963+00', 0, false, NULL, '2021-10-13 08:07:31.108963+00'); + +INSERT INTO "node_events"("id", "email", "node_id", "event", "created_at", "last_attempted", "email_sent") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 'test@storj.test', E'\\153\\313\\234\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:28:24.614594+00', '2020-02-14 08:28:24.614594+00', '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "account_freeze_events"("user_id", "event", "limits", "created_at") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 0, '{"userLimits": {"storage": 100, "egress": 100}, "projectLimits": {"projectID0": {"storage": 100, "egress": 100}}}'::jsonb, '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "user_settings"("user_id", "session_minutes", "passphrase_prompt", "onboarding_start", "onboarding_end", "onboarding_step") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 15, NULL, true, true, NULL); + +INSERT INTO "stripe_customers"("user_id", "customer_id", "package_plan", "purchased_package_at", "created_at") VALUES (E'\\363\\312\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id0', 'package-name', '2023-03-22 15:34:07.123456+00','2019-06-01 08:28:24.267934+00'); + +INSERT INTO "project_invitations"("project_id", "email", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300', '3EMAIL3@MAIL.TEST', '2023-04-24 00:00:00+00'); +INSERT INTO "project_invitations"("project_id", "email", "inviter_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '3EMAIL3@MAIL.TEST', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",', '2023-05-09 00:00:00+00'); + +-- NEW DATA --