diff --git a/pkg/storj/bucket.go b/pkg/storj/bucket.go index eed105bb5..f210fcf62 100644 --- a/pkg/storj/bucket.go +++ b/pkg/storj/bucket.go @@ -26,6 +26,7 @@ type Bucket struct { ID uuid.UUID Name string ProjectID uuid.UUID + PartnerID uuid.UUID Created time.Time PathCipher CipherSuite DefaultSegmentsSize int64 diff --git a/satellite/console/apikeys.go b/satellite/console/apikeys.go index e74e23a9f..333b4b617 100644 --- a/satellite/console/apikeys.go +++ b/satellite/console/apikeys.go @@ -30,6 +30,7 @@ type APIKeys interface { type APIKeyInfo struct { ID uuid.UUID `json:"id"` ProjectID uuid.UUID `json:"projectId"` + PartnerID uuid.UUID `json:"partnerId"` Name string `json:"name"` Secret []byte `json:"-"` CreatedAt time.Time `json:"createdAt"` diff --git a/satellite/console/projects.go b/satellite/console/projects.go index 444ab71dc..6f1f45aac 100644 --- a/satellite/console/projects.go +++ b/satellite/console/projects.go @@ -32,9 +32,10 @@ type Projects interface { type Project struct { ID uuid.UUID `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - UsageLimit int64 `json:"usageLimit"` + Name string `json:"name"` + Description string `json:"description"` + UsageLimit int64 `json:"usageLimit"` + PartnerID uuid.UUID `json:"partnerId"` CreatedAt time.Time `json:"createdAt"` } diff --git a/satellite/console/users.go b/satellite/console/users.go index 51f265443..61e14b387 100644 --- a/satellite/console/users.go +++ b/satellite/console/users.go @@ -86,7 +86,8 @@ type User struct { Email string `json:"email"` PasswordHash []byte `json:"passwordHash"` - Status UserStatus `json:"status"` + Status UserStatus `json:"status"` + PartnerID uuid.UUID `json:"partnerId"` CreatedAt time.Time `json:"createdAt"` } diff --git a/satellite/satellitedb/apikeys.go b/satellite/satellitedb/apikeys.go index 125e7292b..9d3e8b461 100644 --- a/satellite/satellitedb/apikeys.go +++ b/satellite/satellitedb/apikeys.go @@ -83,6 +83,9 @@ func (keys *apikeys) Create(ctx context.Context, head []byte, info console.APIKe dbx.ApiKey_Head(head), dbx.ApiKey_Name(info.Name), dbx.ApiKey_Secret(info.Secret), + dbx.ApiKey_Create_Fields{ + PartnerId: dbx.ApiKey_PartnerId(info.PartnerID[:]), + }, ) if err != nil { diff --git a/satellite/satellitedb/buckets.go b/satellite/satellitedb/buckets.go index fe0a53d1a..6a4c844e6 100644 --- a/satellite/satellitedb/buckets.go +++ b/satellite/satellitedb/buckets.go @@ -42,6 +42,9 @@ func (db *bucketsDB) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ s dbx.BucketMetainfo_DefaultRedundancyRepairShares(int(bucket.DefaultRedundancyScheme.RepairShares)), dbx.BucketMetainfo_DefaultRedundancyOptimalShares(int(bucket.DefaultRedundancyScheme.OptimalShares)), dbx.BucketMetainfo_DefaultRedundancyTotalShares(int(bucket.DefaultRedundancyScheme.TotalShares)), + dbx.BucketMetainfo_Create_Fields{ + PartnerId: dbx.BucketMetainfo_PartnerId(bucket.PartnerID[:]), + }, ) if err != nil { return storj.Bucket{}, storj.ErrBucket.Wrap(err) diff --git a/satellite/satellitedb/buckets_test.go b/satellite/satellitedb/buckets_test.go new file mode 100644 index 000000000..455d66203 --- /dev/null +++ b/satellite/satellitedb/buckets_test.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package satellitedb_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "storj.io/storj/internal/testcontext" + "storj.io/storj/internal/testrand" + "storj.io/storj/pkg/storj" + "storj.io/storj/satellite" + "storj.io/storj/satellite/console" + "storj.io/storj/satellite/satellitedb/satellitedbtest" +) + +func TestUsers(t *testing.T) { + satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) { + ctx := testcontext.New(t) + defer ctx.Cleanup() + + consoleDB := db.Console() + + // create user + userPassHash := testrand.Bytes(8) + + // create an user with partnerID + _, err := consoleDB.Users().Insert(ctx, &console.User{ + FullName: "John Doe", + Email: "john@mail.test", + PasswordHash: userPassHash, + Status: console.Active, + PartnerID: testrand.UUID(), + }) + require.NoError(t, err) + + // create an user with no partnerID + _, err = consoleDB.Users().Insert(ctx, &console.User{ + FullName: "John Doe", + Email: "john@mail.test", + PasswordHash: userPassHash, + Status: console.Active, + }) + require.NoError(t, err) + + // create a project with partnerID + _, err = consoleDB.Projects().Insert(ctx, &console.Project{ + ID: testrand.UUID(), + Name: "John Doe", + Description: "some description", + UsageLimit: int64(1000), + PartnerID: testrand.UUID(), + CreatedAt: time.Now(), + }) + require.NoError(t, err) + + // create a project with no partnerID + proj, err := consoleDB.Projects().Insert(ctx, &console.Project{ + ID: testrand.UUID(), + Name: "John Doe", + Description: "some description", + UsageLimit: int64(1000), + PartnerID: testrand.UUID(), + CreatedAt: time.Now(), + }) + require.NoError(t, err) + + // create a APIKey with no partnerID + _, err = consoleDB.APIKeys().Create(ctx, testrand.Bytes(8), console.APIKeyInfo{ + ID: testrand.UUID(), + ProjectID: proj.ID, + Name: "John Doe", + Secret: []byte("xyz"), + CreatedAt: time.Now(), + }) + require.NoError(t, err) + + // create a bucket with no partnerID + _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ + ID: testrand.UUID(), + Name: "testbucket", + ProjectID: proj.ID, + Created: time.Now(), + PathCipher: storj.EncAESGCM, + DefaultSegmentsSize: int64(100), + }) + require.NoError(t, err) + }) +} diff --git a/satellite/satellitedb/dbx/satellitedb.dbx b/satellite/satellitedb/dbx/satellitedb.dbx index a362ad436..7264162a8 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx +++ b/satellite/satellitedb/dbx/satellitedb.dbx @@ -213,7 +213,7 @@ model user ( field password_hash blob ( updatable ) field status int ( updatable, autoinsert ) - + field partner_id blob ( nullable ) field created_at timestamp ( autoinsert ) ) @@ -257,6 +257,7 @@ model project ( field name text field description text ( updatable ) field usage_limit int64 ( updatable ) + field partner_id blob ( nullable ) field created_at timestamp ( autoinsert ) ) @@ -379,6 +380,7 @@ model api_key ( field head blob field name text (updatable) field secret blob + field partner_id blob (nullable) field created_at timestamp (autoinsert) ) @@ -763,6 +765,7 @@ model bucket_metainfo ( field id blob field project_id project.id restrict field name blob + field partner_id blob (nullable) field path_cipher int diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index fe23be1c1..e1a6a7b43 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -409,6 +409,7 @@ CREATE TABLE projects ( name text NOT NULL, description text NOT NULL, usage_limit bigint NOT NULL, + partner_id bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ) ); @@ -457,6 +458,7 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, + partner_id bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ) ); @@ -473,6 +475,7 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, + partner_id bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), UNIQUE ( head ), @@ -482,6 +485,7 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, + partner_id bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, default_segment_size integer NOT NULL, @@ -750,6 +754,7 @@ CREATE TABLE projects ( name TEXT NOT NULL, description TEXT NOT NULL, usage_limit INTEGER NOT NULL, + partner_id BLOB, created_at TIMESTAMP NOT NULL, PRIMARY KEY ( id ) ); @@ -798,6 +803,7 @@ CREATE TABLE users ( short_name TEXT, password_hash BLOB NOT NULL, status INTEGER NOT NULL, + partner_id BLOB, created_at TIMESTAMP NOT NULL, PRIMARY KEY ( id ) ); @@ -814,6 +820,7 @@ CREATE TABLE api_keys ( head BLOB NOT NULL, name TEXT NOT NULL, secret BLOB NOT NULL, + partner_id BLOB, created_at TIMESTAMP NOT NULL, PRIMARY KEY ( id ), UNIQUE ( head ), @@ -823,6 +830,7 @@ CREATE TABLE bucket_metainfos ( id BLOB NOT NULL, project_id BLOB NOT NULL REFERENCES projects( id ), name BLOB NOT NULL, + partner_id BLOB, path_cipher INTEGER NOT NULL, created_at TIMESTAMP NOT NULL, default_segment_size INTEGER NOT NULL, @@ -3159,11 +3167,16 @@ type Project struct { Name string Description string UsageLimit int64 + PartnerId []byte CreatedAt time.Time } func (Project) _Table() string { return "projects" } +type Project_Create_Fields struct { + PartnerId Project_PartnerId_Field +} + type Project_Update_Fields struct { Description Project_Description_Field UsageLimit Project_UsageLimit_Field @@ -3245,6 +3258,38 @@ func (f Project_UsageLimit_Field) value() interface{} { func (Project_UsageLimit_Field) _Column() string { return "usage_limit" } +type Project_PartnerId_Field struct { + _set bool + _null bool + _value []byte +} + +func Project_PartnerId(v []byte) Project_PartnerId_Field { + return Project_PartnerId_Field{_set: true, _value: v} +} + +func Project_PartnerId_Raw(v []byte) Project_PartnerId_Field { + if v == nil { + return Project_PartnerId_Null() + } + return Project_PartnerId(v) +} + +func Project_PartnerId_Null() Project_PartnerId_Field { + return Project_PartnerId_Field{_set: true, _null: true} +} + +func (f Project_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f Project_PartnerId_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (Project_PartnerId_Field) _Column() string { return "partner_id" } + type Project_CreatedAt_Field struct { _set bool _null bool @@ -3754,6 +3799,7 @@ type User struct { ShortName *string PasswordHash []byte Status int + PartnerId []byte CreatedAt time.Time } @@ -3761,6 +3807,7 @@ func (User) _Table() string { return "users" } type User_Create_Fields struct { ShortName User_ShortName_Field + PartnerId User_PartnerId_Field } type User_Update_Fields struct { @@ -3898,6 +3945,38 @@ func (f User_Status_Field) value() interface{} { func (User_Status_Field) _Column() string { return "status" } +type User_PartnerId_Field struct { + _set bool + _null bool + _value []byte +} + +func User_PartnerId(v []byte) User_PartnerId_Field { + return User_PartnerId_Field{_set: true, _value: v} +} + +func User_PartnerId_Raw(v []byte) User_PartnerId_Field { + if v == nil { + return User_PartnerId_Null() + } + return User_PartnerId(v) +} + +func User_PartnerId_Null() User_PartnerId_Field { + return User_PartnerId_Field{_set: true, _null: true} +} + +func (f User_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f User_PartnerId_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_PartnerId_Field) _Column() string { return "partner_id" } + type User_CreatedAt_Field struct { _set bool _null bool @@ -4012,11 +4091,16 @@ type ApiKey struct { Head []byte Name string Secret []byte + PartnerId []byte CreatedAt time.Time } func (ApiKey) _Table() string { return "api_keys" } +type ApiKey_Create_Fields struct { + PartnerId ApiKey_PartnerId_Field +} + type ApiKey_Update_Fields struct { Name ApiKey_Name_Field } @@ -4116,6 +4200,38 @@ func (f ApiKey_Secret_Field) value() interface{} { func (ApiKey_Secret_Field) _Column() string { return "secret" } +type ApiKey_PartnerId_Field struct { + _set bool + _null bool + _value []byte +} + +func ApiKey_PartnerId(v []byte) ApiKey_PartnerId_Field { + return ApiKey_PartnerId_Field{_set: true, _value: v} +} + +func ApiKey_PartnerId_Raw(v []byte) ApiKey_PartnerId_Field { + if v == nil { + return ApiKey_PartnerId_Null() + } + return ApiKey_PartnerId(v) +} + +func ApiKey_PartnerId_Null() ApiKey_PartnerId_Field { + return ApiKey_PartnerId_Field{_set: true, _null: true} +} + +func (f ApiKey_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f ApiKey_PartnerId_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (ApiKey_PartnerId_Field) _Column() string { return "partner_id" } + type ApiKey_CreatedAt_Field struct { _set bool _null bool @@ -4139,6 +4255,7 @@ type BucketMetainfo struct { Id []byte ProjectId []byte Name []byte + PartnerId []byte PathCipher int CreatedAt time.Time DefaultSegmentSize int @@ -4154,6 +4271,10 @@ type BucketMetainfo struct { func (BucketMetainfo) _Table() string { return "bucket_metainfos" } +type BucketMetainfo_Create_Fields struct { + PartnerId BucketMetainfo_PartnerId_Field +} + type BucketMetainfo_Update_Fields struct { DefaultSegmentSize BucketMetainfo_DefaultSegmentSize_Field DefaultEncryptionCipherSuite BucketMetainfo_DefaultEncryptionCipherSuite_Field @@ -4223,6 +4344,38 @@ func (f BucketMetainfo_Name_Field) value() interface{} { func (BucketMetainfo_Name_Field) _Column() string { return "name" } +type BucketMetainfo_PartnerId_Field struct { + _set bool + _null bool + _value []byte +} + +func BucketMetainfo_PartnerId(v []byte) BucketMetainfo_PartnerId_Field { + return BucketMetainfo_PartnerId_Field{_set: true, _value: v} +} + +func BucketMetainfo_PartnerId_Raw(v []byte) BucketMetainfo_PartnerId_Field { + if v == nil { + return BucketMetainfo_PartnerId_Null() + } + return BucketMetainfo_PartnerId(v) +} + +func BucketMetainfo_PartnerId_Null() BucketMetainfo_PartnerId_Field { + return BucketMetainfo_PartnerId_Field{_set: true, _null: true} +} + +func (f BucketMetainfo_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f BucketMetainfo_PartnerId_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (BucketMetainfo_PartnerId_Field) _Column() string { return "partner_id" } + type BucketMetainfo_PathCipher_Field struct { _set bool _null bool @@ -5513,15 +5666,16 @@ func (obj *postgresImpl) Create_User(ctx context.Context, __short_name_val := optional.ShortName.value() __password_hash_val := user_password_hash.value() __status_val := int(0) + __partner_id_val := optional.PartnerId.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, full_name, short_name, password_hash, status, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, full_name, short_name, password_hash, status, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __created_at_val) + obj.logStmt(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) user = &User{} - err = obj.driver.QueryRow(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __created_at_val).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = obj.driver.QueryRow(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -5557,7 +5711,8 @@ func (obj *postgresImpl) Create_Project(ctx context.Context, project_id Project_Id_Field, project_name Project_Name_Field, project_description Project_Description_Field, - project_usage_limit Project_UsageLimit_Field) ( + project_usage_limit Project_UsageLimit_Field, + optional Project_Create_Fields) ( project *Project, err error) { __now := obj.db.Hooks.Now().UTC() @@ -5565,15 +5720,16 @@ func (obj *postgresImpl) Create_Project(ctx context.Context, __name_val := project_name.value() __description_val := project_description.value() __usage_limit_val := project_usage_limit.value() + __partner_id_val := optional.PartnerId.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, created_at ) VALUES ( ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val) + obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __created_at_val) project = &Project{} - err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -5667,7 +5823,8 @@ func (obj *postgresImpl) Create_ApiKey(ctx context.Context, api_key_project_id ApiKey_ProjectId_Field, api_key_head ApiKey_Head_Field, api_key_name ApiKey_Name_Field, - api_key_secret ApiKey_Secret_Field) ( + api_key_secret ApiKey_Secret_Field, + optional ApiKey_Create_Fields) ( api_key *ApiKey, err error) { __now := obj.db.Hooks.Now().UTC() @@ -5676,15 +5833,16 @@ func (obj *postgresImpl) Create_ApiKey(ctx context.Context, __head_val := api_key_head.value() __name_val := api_key_name.value() __secret_val := api_key_secret.value() + __partner_id_val := optional.PartnerId.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, created_at ) VALUES ( ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __created_at_val) + obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __created_at_val).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -5991,13 +6149,15 @@ func (obj *postgresImpl) Create_BucketMetainfo(ctx context.Context, bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field, bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field, bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field, - bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field) ( + bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field, + optional BucketMetainfo_Create_Fields) ( bucket_metainfo *BucketMetainfo, err error) { __now := obj.db.Hooks.Now().UTC() __id_val := bucket_metainfo_id.value() __project_id_val := bucket_metainfo_project_id.value() __name_val := bucket_metainfo_name.value() + __partner_id_val := optional.PartnerId.value() __path_cipher_val := bucket_metainfo_path_cipher.value() __created_at_val := __now __default_segment_size_val := bucket_metainfo_default_segment_size.value() @@ -6010,13 +6170,13 @@ func (obj *postgresImpl) Create_BucketMetainfo(ctx context.Context, __default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value() __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) + obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) bucket_metainfo = &BucketMetainfo{} - err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __name_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) + err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) if err != nil { return nil, obj.makeErr(err) } @@ -6330,7 +6490,7 @@ func (obj *postgresImpl) Get_User_By_Email_And_Status_Not_Number(ctx context.Con user_email User_Email_Field) ( user *User, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at FROM users WHERE users.email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_email.value()) @@ -6352,7 +6512,7 @@ func (obj *postgresImpl) Get_User_By_Email_And_Status_Not_Number(ctx context.Con } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = __rows.Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6373,7 +6533,7 @@ func (obj *postgresImpl) Get_User_By_Id(ctx context.Context, user_id User_Id_Field) ( user *User, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -6382,7 +6542,7 @@ func (obj *postgresImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6415,7 +6575,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context, project_id Project_Id_Field) ( project *Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects WHERE projects.id = ?") var __values []interface{} __values = append(__values, project_id.value()) @@ -6424,7 +6584,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6435,7 +6595,7 @@ func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context, func (obj *postgresImpl) All_Project(ctx context.Context) ( rows []*Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects") var __values []interface{} __values = append(__values) @@ -6451,7 +6611,7 @@ func (obj *postgresImpl) All_Project(ctx context.Context) ( for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6468,7 +6628,7 @@ func (obj *postgresImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx project_created_at_less Project_CreatedAt_Field) ( rows []*Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") var __values []interface{} __values = append(__values, project_created_at_less.value()) @@ -6484,7 +6644,7 @@ func (obj *postgresImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6501,7 +6661,7 @@ func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Proje project_member_member_id ProjectMember_MemberId_Field) ( rows []*Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") var __values []interface{} __values = append(__values, project_member_member_id.value()) @@ -6517,7 +6677,7 @@ func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Proje for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6810,7 +6970,7 @@ func (obj *postgresImpl) Get_ApiKey_By_Id(ctx context.Context, api_key_id ApiKey_Id_Field) ( api_key *ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") var __values []interface{} __values = append(__values, api_key_id.value()) @@ -6819,7 +6979,7 @@ func (obj *postgresImpl) Get_ApiKey_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6831,7 +6991,7 @@ func (obj *postgresImpl) Get_ApiKey_By_Head(ctx context.Context, api_key_head ApiKey_Head_Field) ( api_key *ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.head = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?") var __values []interface{} __values = append(__values, api_key_head.value()) @@ -6840,7 +7000,7 @@ func (obj *postgresImpl) Get_ApiKey_By_Head(ctx context.Context, obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -6852,7 +7012,7 @@ func (obj *postgresImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Co api_key_project_id ApiKey_ProjectId_Field) ( rows []*ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name") var __values []interface{} __values = append(__values, api_key_project_id.value()) @@ -6868,7 +7028,7 @@ func (obj *postgresImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Co for __rows.Next() { api_key := &ApiKey{} - err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -7496,7 +7656,7 @@ func (obj *postgresImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Co bucket_metainfo_name BucketMetainfo_Name_Field) ( bucket_metainfo *BucketMetainfo, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) @@ -7505,7 +7665,7 @@ func (obj *postgresImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Co obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) if err != nil { return nil, obj.makeErr(err) } @@ -7519,7 +7679,7 @@ func (obj *postgresImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrE limit int, offset int64) ( rows []*BucketMetainfo, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) @@ -7537,7 +7697,7 @@ func (obj *postgresImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrE for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) if err != nil { return nil, obj.makeErr(err) } @@ -7899,7 +8059,7 @@ func (obj *postgresImpl) Update_User_By_Id(ctx context.Context, user *User, err error) { var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -7943,7 +8103,7 @@ func (obj *postgresImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -7959,7 +8119,7 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context, project *Project, err error) { var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -7988,7 +8148,7 @@ func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -8044,7 +8204,7 @@ func (obj *postgresImpl) Update_ApiKey_By_Id(ctx context.Context, api_key *ApiKey, err error) { var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ? RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ? RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -8068,7 +8228,7 @@ func (obj *postgresImpl) Update_ApiKey_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -9192,14 +9352,15 @@ func (obj *sqlite3Impl) Create_User(ctx context.Context, __short_name_val := optional.ShortName.value() __password_hash_val := user_password_hash.value() __status_val := int(0) + __partner_id_val := optional.PartnerId.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, full_name, short_name, password_hash, status, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? )") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, full_name, short_name, password_hash, status, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __created_at_val) + obj.logStmt(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) - __res, err := obj.driver.Exec(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __created_at_val) + __res, err := obj.driver.Exec(__stmt, __id_val, __email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) if err != nil { return nil, obj.makeErr(err) } @@ -9242,7 +9403,8 @@ func (obj *sqlite3Impl) Create_Project(ctx context.Context, project_id Project_Id_Field, project_name Project_Name_Field, project_description Project_Description_Field, - project_usage_limit Project_UsageLimit_Field) ( + project_usage_limit Project_UsageLimit_Field, + optional Project_Create_Fields) ( project *Project, err error) { __now := obj.db.Hooks.Now().UTC() @@ -9250,14 +9412,15 @@ func (obj *sqlite3Impl) Create_Project(ctx context.Context, __name_val := project_name.value() __description_val := project_description.value() __usage_limit_val := project_usage_limit.value() + __partner_id_val := optional.PartnerId.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, created_at ) VALUES ( ?, ?, ?, ?, ? )") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ? )") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val) + obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __created_at_val) - __res, err := obj.driver.Exec(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __created_at_val) + __res, err := obj.driver.Exec(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __created_at_val) if err != nil { return nil, obj.makeErr(err) } @@ -9364,7 +9527,8 @@ func (obj *sqlite3Impl) Create_ApiKey(ctx context.Context, api_key_project_id ApiKey_ProjectId_Field, api_key_head ApiKey_Head_Field, api_key_name ApiKey_Name_Field, - api_key_secret ApiKey_Secret_Field) ( + api_key_secret ApiKey_Secret_Field, + optional ApiKey_Create_Fields) ( api_key *ApiKey, err error) { __now := obj.db.Hooks.Now().UTC() @@ -9373,14 +9537,15 @@ func (obj *sqlite3Impl) Create_ApiKey(ctx context.Context, __head_val := api_key_head.value() __name_val := api_key_name.value() __secret_val := api_key_secret.value() + __partner_id_val := optional.PartnerId.value() __created_at_val := __now - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, created_at ) VALUES ( ?, ?, ?, ?, ?, ? )") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? )") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __created_at_val) + obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val) - __res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __created_at_val) + __res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val) if err != nil { return nil, obj.makeErr(err) } @@ -9721,13 +9886,15 @@ func (obj *sqlite3Impl) Create_BucketMetainfo(ctx context.Context, bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field, bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field, bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field, - bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field) ( + bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field, + optional BucketMetainfo_Create_Fields) ( bucket_metainfo *BucketMetainfo, err error) { __now := obj.db.Hooks.Now().UTC() __id_val := bucket_metainfo_id.value() __project_id_val := bucket_metainfo_project_id.value() __name_val := bucket_metainfo_name.value() + __partner_id_val := optional.PartnerId.value() __path_cipher_val := bucket_metainfo_path_cipher.value() __created_at_val := __now __default_segment_size_val := bucket_metainfo_default_segment_size.value() @@ -9740,12 +9907,12 @@ func (obj *sqlite3Impl) Create_BucketMetainfo(ctx context.Context, __default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value() __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) + obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) - __res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __name_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) + __res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) if err != nil { return nil, obj.makeErr(err) } @@ -10063,7 +10230,7 @@ func (obj *sqlite3Impl) Get_User_By_Email_And_Status_Not_Number(ctx context.Cont user_email User_Email_Field) ( user *User, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at FROM users WHERE users.email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_email.value()) @@ -10085,7 +10252,7 @@ func (obj *sqlite3Impl) Get_User_By_Email_And_Status_Not_Number(ctx context.Cont } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = __rows.Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10106,7 +10273,7 @@ func (obj *sqlite3Impl) Get_User_By_Id(ctx context.Context, user_id User_Id_Field) ( user *User, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -10115,7 +10282,7 @@ func (obj *sqlite3Impl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10148,7 +10315,7 @@ func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context, project_id Project_Id_Field) ( project *Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects WHERE projects.id = ?") var __values []interface{} __values = append(__values, project_id.value()) @@ -10157,7 +10324,7 @@ func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) project = &Project{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10168,7 +10335,7 @@ func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context, func (obj *sqlite3Impl) All_Project(ctx context.Context) ( rows []*Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects") var __values []interface{} __values = append(__values) @@ -10184,7 +10351,7 @@ func (obj *sqlite3Impl) All_Project(ctx context.Context) ( for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10201,7 +10368,7 @@ func (obj *sqlite3Impl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx project_created_at_less Project_CreatedAt_Field) ( rows []*Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") var __values []interface{} __values = append(__values, project_created_at_less.value()) @@ -10217,7 +10384,7 @@ func (obj *sqlite3Impl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10234,7 +10401,7 @@ func (obj *sqlite3Impl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Projec project_member_member_id ProjectMember_MemberId_Field) ( rows []*Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") var __values []interface{} __values = append(__values, project_member_member_id.value()) @@ -10250,7 +10417,7 @@ func (obj *sqlite3Impl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Projec for __rows.Next() { project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10543,7 +10710,7 @@ func (obj *sqlite3Impl) Get_ApiKey_By_Id(ctx context.Context, api_key_id ApiKey_Id_Field) ( api_key *ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") var __values []interface{} __values = append(__values, api_key_id.value()) @@ -10552,7 +10719,7 @@ func (obj *sqlite3Impl) Get_ApiKey_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10564,7 +10731,7 @@ func (obj *sqlite3Impl) Get_ApiKey_By_Head(ctx context.Context, api_key_head ApiKey_Head_Field) ( api_key *ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.head = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?") var __values []interface{} __values = append(__values, api_key_head.value()) @@ -10573,7 +10740,7 @@ func (obj *sqlite3Impl) Get_ApiKey_By_Head(ctx context.Context, obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -10585,7 +10752,7 @@ func (obj *sqlite3Impl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Con api_key_project_id ApiKey_ProjectId_Field) ( rows []*ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name") var __values []interface{} __values = append(__values, api_key_project_id.value()) @@ -10601,7 +10768,7 @@ func (obj *sqlite3Impl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Con for __rows.Next() { api_key := &ApiKey{} - err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -11229,7 +11396,7 @@ func (obj *sqlite3Impl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Con bucket_metainfo_name BucketMetainfo_Name_Field) ( bucket_metainfo *BucketMetainfo, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) @@ -11238,7 +11405,7 @@ func (obj *sqlite3Impl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Con obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) if err != nil { return nil, obj.makeErr(err) } @@ -11252,7 +11419,7 @@ func (obj *sqlite3Impl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEq limit int, offset int64) ( rows []*BucketMetainfo, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) @@ -11270,7 +11437,7 @@ func (obj *sqlite3Impl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEq for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) if err != nil { return nil, obj.makeErr(err) } @@ -11721,12 +11888,12 @@ func (obj *sqlite3Impl) Update_User_By_Id(ctx context.Context, return nil, obj.makeErr(err) } - var __embed_stmt_get = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at FROM users WHERE users.id = ?") + var __embed_stmt_get = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?") var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -11776,12 +11943,12 @@ func (obj *sqlite3Impl) Update_Project_By_Id(ctx context.Context, return nil, obj.makeErr(err) } - var __embed_stmt_get = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE projects.id = ?") + var __embed_stmt_get = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects WHERE projects.id = ?") var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -11876,12 +12043,12 @@ func (obj *sqlite3Impl) Update_ApiKey_By_Id(ctx context.Context, return nil, obj.makeErr(err) } - var __embed_stmt_get = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") + var __embed_stmt_get = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err == sql.ErrNoRows { return nil, nil } @@ -12613,13 +12780,13 @@ func (obj *sqlite3Impl) getLastUser(ctx context.Context, pk int64) ( user *User, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.created_at FROM users WHERE _rowid_ = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE _rowid_ = ?") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, pk) user = &User{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.CreatedAt) + err = obj.driver.QueryRow(__stmt, pk).Scan(&user.Id, &user.Email, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -12649,13 +12816,13 @@ func (obj *sqlite3Impl) getLastProject(ctx context.Context, pk int64) ( project *Project, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.created_at FROM projects WHERE _rowid_ = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.created_at FROM projects WHERE _rowid_ = ?") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, pk) project = &Project{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.CreatedAt) + err = obj.driver.QueryRow(__stmt, pk).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -12721,13 +12888,13 @@ func (obj *sqlite3Impl) getLastApiKey(ctx context.Context, pk int64) ( api_key *ApiKey, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.created_at FROM api_keys WHERE _rowid_ = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE _rowid_ = ?") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, pk) api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.CreatedAt) + err = obj.driver.QueryRow(__stmt, pk).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) if err != nil { return nil, obj.makeErr(err) } @@ -12919,13 +13086,13 @@ func (obj *sqlite3Impl) getLastBucketMetainfo(ctx context.Context, pk int64) ( bucket_metainfo *BucketMetainfo, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE _rowid_ = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE _rowid_ = ?") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, pk) bucket_metainfo = &BucketMetainfo{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) + err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) if err != nil { return nil, obj.makeErr(err) } @@ -13473,13 +13640,14 @@ func (rx *Rx) Create_ApiKey(ctx context.Context, api_key_project_id ApiKey_ProjectId_Field, api_key_head ApiKey_Head_Field, api_key_name ApiKey_Name_Field, - api_key_secret ApiKey_Secret_Field) ( + api_key_secret ApiKey_Secret_Field, + optional ApiKey_Create_Fields) ( api_key *ApiKey, err error) { var tx *Tx if tx, err = rx.getTx(ctx); err != nil { return } - return tx.Create_ApiKey(ctx, api_key_id, api_key_project_id, api_key_head, api_key_name, api_key_secret) + return tx.Create_ApiKey(ctx, api_key_id, api_key_project_id, api_key_head, api_key_name, api_key_secret, optional) } @@ -13496,13 +13664,14 @@ func (rx *Rx) Create_BucketMetainfo(ctx context.Context, bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field, bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field, bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field, - bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field) ( + bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field, + optional BucketMetainfo_Create_Fields) ( bucket_metainfo *BucketMetainfo, err error) { var tx *Tx if tx, err = rx.getTx(ctx); err != nil { return } - return tx.Create_BucketMetainfo(ctx, bucket_metainfo_id, bucket_metainfo_project_id, bucket_metainfo_name, bucket_metainfo_path_cipher, bucket_metainfo_default_segment_size, bucket_metainfo_default_encryption_cipher_suite, bucket_metainfo_default_encryption_block_size, bucket_metainfo_default_redundancy_algorithm, bucket_metainfo_default_redundancy_share_size, bucket_metainfo_default_redundancy_required_shares, bucket_metainfo_default_redundancy_repair_shares, bucket_metainfo_default_redundancy_optimal_shares, bucket_metainfo_default_redundancy_total_shares) + return tx.Create_BucketMetainfo(ctx, bucket_metainfo_id, bucket_metainfo_project_id, bucket_metainfo_name, bucket_metainfo_path_cipher, bucket_metainfo_default_segment_size, bucket_metainfo_default_encryption_cipher_suite, bucket_metainfo_default_encryption_block_size, bucket_metainfo_default_redundancy_algorithm, bucket_metainfo_default_redundancy_share_size, bucket_metainfo_default_redundancy_required_shares, bucket_metainfo_default_redundancy_repair_shares, bucket_metainfo_default_redundancy_optimal_shares, bucket_metainfo_default_redundancy_total_shares, optional) } @@ -13650,13 +13819,14 @@ func (rx *Rx) Create_Project(ctx context.Context, project_id Project_Id_Field, project_name Project_Name_Field, project_description Project_Description_Field, - project_usage_limit Project_UsageLimit_Field) ( + project_usage_limit Project_UsageLimit_Field, + optional Project_Create_Fields) ( project *Project, err error) { var tx *Tx if tx, err = rx.getTx(ctx); err != nil { return } - return tx.Create_Project(ctx, project_id, project_name, project_description, project_usage_limit) + return tx.Create_Project(ctx, project_id, project_name, project_description, project_usage_limit, optional) } @@ -14576,7 +14746,8 @@ type Methods interface { api_key_project_id ApiKey_ProjectId_Field, api_key_head ApiKey_Head_Field, api_key_name ApiKey_Name_Field, - api_key_secret ApiKey_Secret_Field) ( + api_key_secret ApiKey_Secret_Field, + optional ApiKey_Create_Fields) ( api_key *ApiKey, err error) Create_BucketMetainfo(ctx context.Context, @@ -14592,7 +14763,8 @@ type Methods interface { bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field, bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field, bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field, - bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field) ( + bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field, + optional BucketMetainfo_Create_Fields) ( bucket_metainfo *BucketMetainfo, err error) Create_BucketStorageTally(ctx context.Context, @@ -14690,7 +14862,8 @@ type Methods interface { project_id Project_Id_Field, project_name Project_Name_Field, project_description Project_Description_Field, - project_usage_limit Project_UsageLimit_Field) ( + project_usage_limit Project_UsageLimit_Field, + optional Project_Create_Fields) ( project *Project, err error) Create_ProjectInvoiceStamp(ctx context.Context, diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql b/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql index 3304f5a25..385bae84d 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql @@ -137,6 +137,7 @@ CREATE TABLE projects ( name text NOT NULL, description text NOT NULL, usage_limit bigint NOT NULL, + partner_id bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ) ); @@ -185,6 +186,7 @@ CREATE TABLE users ( short_name text, password_hash bytea NOT NULL, status integer NOT NULL, + partner_id bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ) ); @@ -201,6 +203,7 @@ CREATE TABLE api_keys ( head bytea NOT NULL, name text NOT NULL, secret bytea NOT NULL, + partner_id bytea, created_at timestamp with time zone NOT NULL, PRIMARY KEY ( id ), UNIQUE ( head ), @@ -210,6 +213,7 @@ CREATE TABLE bucket_metainfos ( id bytea NOT NULL, project_id bytea NOT NULL REFERENCES projects( id ), name bytea NOT NULL, + partner_id bytea, path_cipher integer NOT NULL, created_at timestamp with time zone NOT NULL, default_segment_size integer NOT NULL, diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql b/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql index fab12923f..4f726359f 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql @@ -137,6 +137,7 @@ CREATE TABLE projects ( name TEXT NOT NULL, description TEXT NOT NULL, usage_limit INTEGER NOT NULL, + partner_id BLOB, created_at TIMESTAMP NOT NULL, PRIMARY KEY ( id ) ); @@ -185,6 +186,7 @@ CREATE TABLE users ( short_name TEXT, password_hash BLOB NOT NULL, status INTEGER NOT NULL, + partner_id BLOB, created_at TIMESTAMP NOT NULL, PRIMARY KEY ( id ) ); @@ -201,6 +203,7 @@ CREATE TABLE api_keys ( head BLOB NOT NULL, name TEXT NOT NULL, secret BLOB NOT NULL, + partner_id BLOB, created_at TIMESTAMP NOT NULL, PRIMARY KEY ( id ), UNIQUE ( head ), @@ -210,6 +213,7 @@ CREATE TABLE bucket_metainfos ( id BLOB NOT NULL, project_id BLOB NOT NULL REFERENCES projects( id ), name BLOB NOT NULL, + partner_id BLOB, path_cipher INTEGER NOT NULL, created_at TIMESTAMP NOT NULL, default_segment_size INTEGER NOT NULL, diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index 21b2b6490..6930d7876 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -1009,6 +1009,16 @@ func (db *DB) PostgresMigration() *migrate.Migration { `CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );`, }, }, + { + Description: "Add partner id field to support OSPP", + Version: 45, + Action: migrate.SQL{ + `ALTER TABLE projects ADD COLUMN partner_id BYTEA`, + `ALTER TABLE users ADD COLUMN partner_id BYTEA`, + `ALTER TABLE api_keys ADD COLUMN partner_id BYTEA`, + `ALTER TABLE bucket_metainfos ADD COLUMN partner_id BYTEA`, + }, + }, }, } } diff --git a/satellite/satellitedb/projects.go b/satellite/satellitedb/projects.go index d069668b4..09d0ffba7 100644 --- a/satellite/satellitedb/projects.go +++ b/satellite/satellitedb/projects.go @@ -80,6 +80,9 @@ func (projects *projects) Insert(ctx context.Context, project *console.Project) dbx.Project_Name(project.Name), dbx.Project_Description(project.Description), dbx.Project_UsageLimit(0), + dbx.Project_Create_Fields{ + PartnerId: dbx.Project_PartnerId(project.PartnerID[:]), + }, ) if err != nil { diff --git a/satellite/satellitedb/testdata/postgres.v45.sql b/satellite/satellitedb/testdata/postgres.v45.sql new file mode 100644 index 000000000..a3989f0e2 --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v45.sql @@ -0,0 +1,347 @@ +-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1 +-- DO NOT EDIT +CREATE TABLE accounting_rollups ( + id bigserial NOT NULL, + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp NOT NULL, + inline bigint NOT NULL, + remote bigint NOT NULL, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE bucket_usages ( + id bytea NOT NULL, + bucket_id bytea NOT NULL, + rollup_end_time timestamp with time zone NOT NULL, + remote_stored_data bigint NOT NULL, + inline_stored_data bigint NOT NULL, + remote_segments integer NOT NULL, + inline_segments integer NOT NULL, + objects integer NOT NULL, + metadata_size bigint NOT NULL, + repair_egress bigint NOT NULL, + get_egress bigint NOT NULL, + audit_egress bigint NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE certRecords ( + publickey bytea NOT NULL, + id bytea NOT NULL, + update_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE injuredsegments ( + path bytea NOT NULL, + data bytea NOT NULL, + attempted timestamp, + PRIMARY KEY ( path ) +); +CREATE TABLE irreparabledbs ( + segmentpath bytea NOT NULL, + segmentdetail bytea NOT NULL, + pieces_lost_count bigint NOT NULL, + seg_damaged_unix_sec bigint NOT NULL, + repair_attempt_count bigint NOT NULL, + PRIMARY KEY ( segmentpath ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL, + last_net text NOT NULL, + protocol integer NOT NULL, + type integer NOT NULL, + email text NOT NULL, + wallet text NOT NULL, + free_bandwidth bigint NOT NULL, + free_disk bigint NOT NULL, + major bigint NOT NULL, + minor bigint NOT NULL, + patch bigint NOT NULL, + hash text NOT NULL, + timestamp timestamp with time zone NOT NULL, + release boolean NOT NULL, + latency_90 bigint NOT NULL, + audit_success_count bigint NOT NULL, + total_audit_count bigint NOT NULL, + uptime_success_count bigint NOT NULL, + total_uptime_count bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + last_contact_success timestamp with time zone NOT NULL, + last_contact_failure timestamp with time zone NOT NULL, + contained boolean NOT NULL, + disqualified timestamp with time zone, + audit_reputation_alpha double precision NOT NULL, + audit_reputation_beta double precision NOT NULL, + uptime_reputation_alpha double precision NOT NULL, + uptime_reputation_beta double precision NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE offers ( + id serial NOT NULL, + name text NOT NULL, + description text NOT NULL, + award_credit_in_cents integer NOT NULL, + invitee_credit_in_cents integer NOT NULL, + award_credit_duration_days integer, + invitee_credit_duration_days integer, + redeemable_cap integer, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + status integer NOT NULL, + type integer NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + name text NOT NULL, + description text NOT NULL, + usage_limit bigint NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE serial_numbers ( + id serial NOT NULL, + serial_number bytea NOT NULL, + bucket_id bytea NOT NULL, + expires_at timestamp NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_storage_tallies ( + id bigserial NOT NULL, + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + email text NOT NULL, + full_name text NOT NULL, + short_name text, + password_hash bytea NOT NULL, + status integer NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE value_attributions ( + project_id bytea NOT NULL, + bucket_name bytea NOT NULL, + partner_id bytea NOT NULL, + last_updated timestamp NOT NULL, + PRIMARY KEY ( project_id, bucket_name ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + head bytea NOT NULL, + name text NOT NULL, + secret bytea NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( head ), + UNIQUE ( name, project_id ) +); +CREATE TABLE bucket_metainfos ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ), + name bytea NOT NULL, + partner_id bytea, + path_cipher integer NOT NULL, + created_at timestamp with time zone NOT NULL, + default_segment_size integer NOT NULL, + default_encryption_cipher_suite integer NOT NULL, + default_encryption_block_size integer NOT NULL, + default_redundancy_algorithm integer NOT NULL, + default_redundancy_share_size integer NOT NULL, + default_redundancy_required_shares integer NOT NULL, + default_redundancy_repair_shares integer NOT NULL, + default_redundancy_optimal_shares integer NOT NULL, + default_redundancy_total_shares integer NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( name, project_id ) +); +CREATE TABLE project_invoice_stamps ( + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + invoice_id bytea NOT NULL, + start_date timestamp with time zone NOT NULL, + end_date timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, start_date, end_date ), + UNIQUE ( invoice_id ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE used_serials ( + serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE, + storage_node_id bytea NOT NULL, + PRIMARY KEY ( serial_number_id, storage_node_id ) +); +CREATE TABLE user_credits ( + id serial NOT NULL, + user_id bytea NOT NULL REFERENCES users( id ), + offer_id integer NOT NULL REFERENCES offers( id ), + referred_by bytea REFERENCES users( id ), + credits_earned_in_cents integer NOT NULL, + credits_used_in_cents integer NOT NULL, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE user_payments ( + user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + customer_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ), + UNIQUE ( customer_id ) +); +CREATE TABLE project_payments ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + payer_id bytea NOT NULL REFERENCES user_payments( user_id ) ON DELETE CASCADE, + payment_method_id bytea NOT NULL, + is_default boolean NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds ); +CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); +CREATE INDEX node_last_ip ON nodes ( last_net ); +CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); +CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); +CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds ); + +--- + +INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 5, 100, 5); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 1, 100, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 100, 300, 100); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, '2019-02-14 08:28:24.254934+00'); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00'); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, '2019-02-14 08:28:24.636949+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10); + +INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100'); +INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a'); +INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a'); +INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "certrecords" VALUES (E'0Y0\\023\\006\\007*\\206H\\316=\\002\\001\\006\\010*\\206H\\316=\\003\\001\\007\\003B\\000\\004\\360\\267\\227\\377\\253u\\222\\337Y\\324C:GQ\\010\\277v\\010\\315D\\271\\333\\337.\\203\\023=C\\343\\014T%6\\027\\362?\\214\\326\\017U\\334\\000\\260\\224\\260J\\221\\304\\331F\\304\\221\\236zF,\\325\\326l\\215\\306\\365\\200\\022', E'L\\301|\\200\\247}F|1\\320\\232\\037n\\335\\241\\206\\244\\242\\207\\204.\\253\\357\\326\\352\\033Dt\\202`\\022\\325', '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00'); +INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0); +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1); + +INSERT INTO "offers" ("name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "status", "type") VALUES ('testOffer', 'Test offer 1', 0, 0, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0); +INSERT INTO "offers" ("name","description","award_credit_in_cents","invitee_credit_in_cents","expires_at","created_at","status","type") VALUES ('Default free credit offer','Is active when no active free credit offer',300,0,'2119-03-14 08:28:24.636949+00','2019-07-14 08:28:24.636949+00',1,1); + +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "user_payments" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00'); + +INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10); + +INSERT INTO "project_payments" ("id", "project_id", "payer_id", "payment_method_id", "is_default","created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, true, '2019-06-01 08:28:24.267934+00'); diff --git a/satellite/satellitedb/users.go b/satellite/satellitedb/users.go index 6cd4ded80..cec1111b7 100644 --- a/satellite/satellitedb/users.go +++ b/satellite/satellitedb/users.go @@ -56,6 +56,7 @@ func (users *users) Insert(ctx context.Context, user *console.User) (_ *console. dbx.User_PasswordHash(user.PasswordHash), dbx.User_Create_Fields{ ShortName: dbx.User_ShortName(user.ShortName), + PartnerId: dbx.User_PartnerId(user.PartnerID[:]), }, )