diff --git a/satellite/orders/orders_test.go b/satellite/orders/orders_test.go index 8db564699..31c523788 100644 --- a/satellite/orders/orders_test.go +++ b/satellite/orders/orders_test.go @@ -290,7 +290,7 @@ func BenchmarkOrders(b *testing.B) { ctx := testcontext.New(b) defer ctx.Cleanup() - counts := []int{50, 100, 250, 500, 999} //sqlite limit of 999 + counts := []int{50, 100, 250, 500, 1000} for _, c := range counts { c := c satellitedbtest.Bench(b, func(b *testing.B, db satellite.DB) { diff --git a/satellite/repair/queue/queue2_test.go b/satellite/repair/queue/queue2_test.go index 4be09c159..f5c1c7af6 100644 --- a/satellite/repair/queue/queue2_test.go +++ b/satellite/repair/queue/queue2_test.go @@ -8,8 +8,6 @@ import ( "testing" "time" - "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -74,15 +72,6 @@ func TestOrder(t *testing.T) { // TODO: remove dependency on *dbx.DB dbAccess := db.(interface{ TestDBAccess() *dbx.DB }).TestDBAccess() - var timeConvertPrefix string - switch d := dbAccess.DB.Driver().(type) { - case *sqlite3.SQLiteDriver: - timeConvertPrefix = "datetime(" - case *pq.Driver: - timeConvertPrefix = "timezone('utc', " - default: - t.Errorf("Unsupported database type %t", d) - } err := dbAccess.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error { updateList := []struct { @@ -94,7 +83,7 @@ func TestOrder(t *testing.T) { {olderRepairPath, time.Now().Add(-3 * time.Hour)}, } for _, item := range updateList { - res, err := tx.Tx.ExecContext(ctx, dbAccess.Rebind(`UPDATE injuredsegments SET attempted = `+timeConvertPrefix+`?) WHERE path = ?`), item.attempted, item.path) + res, err := tx.Tx.ExecContext(ctx, dbAccess.Rebind(`UPDATE injuredsegments SET attempted = timezone('utc', ?) WHERE path = ?`), item.attempted, item.path) if err != nil { return err } diff --git a/satellite/satellitedb/attribution.go b/satellite/satellitedb/attribution.go index 8b82fc481..f95209ec2 100644 --- a/satellite/satellitedb/attribution.go +++ b/satellite/satellitedb/attribution.go @@ -6,11 +6,8 @@ package satellitedb import ( "context" "database/sql" - "fmt" "time" - "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" "github.com/skyrings/skyring-common/tools/uuid" "github.com/zeebo/errs" @@ -47,7 +44,7 @@ const ( -- If there are more than 1 records within the hour, only the latest will be considered SELECT va.partner_id, - %v as hours, + date_trunc('hour', bst.interval_start) as hours, bst.project_id, bst.bucket_name, MAX(bst.interval_start) as max_interval @@ -109,9 +106,6 @@ const ( o.project_id, o.bucket_name; ` - // DB specific date/time truncations - slHour = "datetime(strftime('%Y-%m-%dT%H:00:00', bst.interval_start))" - pqHour = "date_trunc('hour', bst.interval_start)" ) type attributionDB struct { @@ -156,17 +150,7 @@ func (keys *attributionDB) Insert(ctx context.Context, info *attribution.Info) ( func (keys *attributionDB) QueryAttribution(ctx context.Context, partnerID uuid.UUID, start time.Time, end time.Time) (_ []*attribution.CSVRow, err error) { defer mon.Task()(&ctx)(&err) - var query string - switch t := keys.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - query = fmt.Sprintf(valueAttrQuery, slHour) - case *pq.Driver: - query = fmt.Sprintf(valueAttrQuery, pqHour) - default: - return nil, Error.New("Unsupported database %t", t) - } - - rows, err := keys.db.DB.QueryContext(ctx, keys.db.Rebind(query), partnerID[:], start.UTC(), end.UTC(), partnerID[:], start.UTC(), end.UTC()) + rows, err := keys.db.DB.QueryContext(ctx, keys.db.Rebind(valueAttrQuery), partnerID[:], start.UTC(), end.UTC(), partnerID[:], start.UTC(), end.UTC()) if err != nil { return nil, Error.Wrap(err) } diff --git a/satellite/satellitedb/database.go b/satellite/satellitedb/database.go index 4c910ad68..305c485b4 100644 --- a/satellite/satellitedb/database.go +++ b/satellite/satellitedb/database.go @@ -76,12 +76,6 @@ func (db *DB) CreateSchema(schema string) error { // should not be used outside of migration tests. func (db *DB) TestDBAccess() *dbx.DB { return db.db } -// TestDBAccess for raw database access, -// should not be used outside of tests. -func (db *locked) TestDBAccess() *dbx.DB { - return db.db.(interface{ TestDBAccess() *dbx.DB }).TestDBAccess() -} - // DropSchema drops the named schema func (db *DB) DropSchema(schema string) error { return pgutil.DropSchema(db.db, schema) diff --git a/satellite/satellitedb/dbx/gen.go b/satellite/satellitedb/dbx/gen.go index 4b9e16d04..eca083f9d 100644 --- a/satellite/satellitedb/dbx/gen.go +++ b/satellite/satellitedb/dbx/gen.go @@ -10,8 +10,8 @@ import ( "github.com/zeebo/errs" ) -//go:generate dbx.v1 schema -d postgres -d sqlite3 satellitedb.dbx . -//go:generate dbx.v1 golang -d postgres -d sqlite3 satellitedb.dbx . +//go:generate dbx.v1 schema -d postgres satellitedb.dbx . +//go:generate dbx.v1 golang -d postgres satellitedb.dbx . func init() { // catch dbx errors diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index ccbec61cf..fdd974cf0 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -17,9 +17,6 @@ import ( "unicode" "github.com/lib/pq" - - "github.com/mattn/go-sqlite3" - "math/rand" ) // Prevent conditional imports from causing build failures @@ -144,8 +141,6 @@ func Open(driver, source string) (db *DB, err error) { switch driver { case "postgres": sql_db, err = openpostgres(source) - case "sqlite3": - sql_db, err = opensqlite3(source) default: return nil, unsupportedDriver(driver) } @@ -170,8 +165,6 @@ func Open(driver, source string) (db *DB, err error) { switch driver { case "postgres": db.dbMethods = newpostgres(db) - case "sqlite3": - db.dbMethods = newsqlite3(db) default: return nil, unsupportedDriver(driver) } @@ -612,384 +605,6 @@ func postgresLogStmt(stmt string, args ...interface{}) { } } -type sqlite3Impl struct { - db *DB - dialect __sqlbundle_sqlite3 - driver driver -} - -func (obj *sqlite3Impl) Rebind(s string) string { - return obj.dialect.Rebind(s) -} - -func (obj *sqlite3Impl) logStmt(stmt string, args ...interface{}) { - sqlite3LogStmt(stmt, args...) -} - -func (obj *sqlite3Impl) makeErr(err error) error { - constraint, ok := obj.isConstraintError(err) - if ok { - return constraintViolation(err, constraint) - } - return makeErr(err) -} - -type sqlite3DB struct { - db *DB - *sqlite3Impl -} - -func newsqlite3(db *DB) *sqlite3DB { - return &sqlite3DB{ - db: db, - sqlite3Impl: &sqlite3Impl{ - db: db, - driver: db.DB, - }, - } -} - -func (obj *sqlite3DB) Schema() string { - return `CREATE TABLE accounting_rollups ( - id INTEGER NOT NULL, - node_id BLOB NOT NULL, - start_time TIMESTAMP NOT NULL, - put_total INTEGER NOT NULL, - get_total INTEGER NOT NULL, - get_audit_total INTEGER NOT NULL, - get_repair_total INTEGER NOT NULL, - put_repair_total INTEGER NOT NULL, - at_rest_total REAL NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE accounting_timestamps ( - name TEXT NOT NULL, - value TIMESTAMP NOT NULL, - PRIMARY KEY ( name ) -); -CREATE TABLE bucket_bandwidth_rollups ( - bucket_name BLOB NOT NULL, - project_id BLOB NOT NULL, - interval_start TIMESTAMP NOT NULL, - interval_seconds INTEGER NOT NULL, - action INTEGER NOT NULL, - inline INTEGER NOT NULL, - allocated INTEGER NOT NULL, - settled INTEGER NOT NULL, - PRIMARY KEY ( bucket_name, project_id, interval_start, action ) -); -CREATE TABLE bucket_storage_tallies ( - bucket_name BLOB NOT NULL, - project_id BLOB NOT NULL, - interval_start TIMESTAMP NOT NULL, - inline INTEGER NOT NULL, - remote INTEGER NOT NULL, - remote_segments_count INTEGER NOT NULL, - inline_segments_count INTEGER NOT NULL, - object_count INTEGER NOT NULL, - metadata_size INTEGER NOT NULL, - PRIMARY KEY ( bucket_name, project_id, interval_start ) -); -CREATE TABLE bucket_usages ( - id BLOB NOT NULL, - bucket_id BLOB NOT NULL, - rollup_end_time TIMESTAMP NOT NULL, - remote_stored_data INTEGER NOT NULL, - inline_stored_data INTEGER NOT NULL, - remote_segments INTEGER NOT NULL, - inline_segments INTEGER NOT NULL, - objects INTEGER NOT NULL, - metadata_size INTEGER NOT NULL, - repair_egress INTEGER NOT NULL, - get_egress INTEGER NOT NULL, - audit_egress INTEGER NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE coinpayments_transactions ( - id TEXT NOT NULL, - user_id BLOB NOT NULL, - address TEXT NOT NULL, - amount BLOB NOT NULL, - received BLOB NOT NULL, - status INTEGER NOT NULL, - key TEXT NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE graceful_exit_progress ( - node_id BLOB NOT NULL, - bytes_transferred INTEGER NOT NULL, - pieces_transferred INTEGER NOT NULL, - pieces_failed INTEGER NOT NULL, - updated_at TIMESTAMP NOT NULL, - PRIMARY KEY ( node_id ) -); -CREATE TABLE graceful_exit_transfer_queue ( - node_id BLOB NOT NULL, - path BLOB NOT NULL, - piece_num INTEGER NOT NULL, - durability_ratio REAL NOT NULL, - queued_at TIMESTAMP NOT NULL, - requested_at TIMESTAMP, - last_failed_at TIMESTAMP, - last_failed_code INTEGER, - failed_count INTEGER, - finished_at TIMESTAMP, - PRIMARY KEY ( node_id, path ) -); -CREATE TABLE injuredsegments ( - path BLOB NOT NULL, - data BLOB NOT NULL, - attempted TIMESTAMP, - PRIMARY KEY ( path ) -); -CREATE TABLE irreparabledbs ( - segmentpath BLOB NOT NULL, - segmentdetail BLOB NOT NULL, - pieces_lost_count INTEGER NOT NULL, - seg_damaged_unix_sec INTEGER NOT NULL, - repair_attempt_count INTEGER NOT NULL, - PRIMARY KEY ( segmentpath ) -); -CREATE TABLE nodes ( - id BLOB NOT NULL, - address TEXT NOT NULL, - last_net TEXT NOT NULL, - protocol INTEGER NOT NULL, - type INTEGER NOT NULL, - email TEXT NOT NULL, - wallet TEXT NOT NULL, - free_bandwidth INTEGER NOT NULL, - free_disk INTEGER NOT NULL, - piece_count INTEGER NOT NULL, - major INTEGER NOT NULL, - minor INTEGER NOT NULL, - patch INTEGER NOT NULL, - hash TEXT NOT NULL, - timestamp TIMESTAMP NOT NULL, - release INTEGER NOT NULL, - latency_90 INTEGER NOT NULL, - audit_success_count INTEGER NOT NULL, - total_audit_count INTEGER NOT NULL, - uptime_success_count INTEGER NOT NULL, - total_uptime_count INTEGER NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - last_contact_success TIMESTAMP NOT NULL, - last_contact_failure TIMESTAMP NOT NULL, - contained INTEGER NOT NULL, - disqualified TIMESTAMP, - audit_reputation_alpha REAL NOT NULL, - audit_reputation_beta REAL NOT NULL, - uptime_reputation_alpha REAL NOT NULL, - uptime_reputation_beta REAL NOT NULL, - exit_initiated_at TIMESTAMP, - exit_loop_completed_at TIMESTAMP, - exit_finished_at TIMESTAMP, - exit_success INTEGER NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE offers ( - id INTEGER NOT NULL, - name TEXT NOT NULL, - description TEXT NOT NULL, - award_credit_in_cents INTEGER NOT NULL, - invitee_credit_in_cents INTEGER NOT NULL, - award_credit_duration_days INTEGER, - invitee_credit_duration_days INTEGER, - redeemable_cap INTEGER, - expires_at TIMESTAMP NOT NULL, - created_at TIMESTAMP NOT NULL, - status INTEGER NOT NULL, - type INTEGER NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE peer_identities ( - node_id BLOB NOT NULL, - leaf_serial_number BLOB NOT NULL, - chain BLOB NOT NULL, - updated_at TIMESTAMP NOT NULL, - PRIMARY KEY ( node_id ) -); -CREATE TABLE pending_audits ( - node_id BLOB NOT NULL, - piece_id BLOB NOT NULL, - stripe_index INTEGER NOT NULL, - share_size INTEGER NOT NULL, - expected_share_hash BLOB NOT NULL, - reverify_count INTEGER NOT NULL, - path BLOB NOT NULL, - PRIMARY KEY ( node_id ) -); -CREATE TABLE projects ( - id BLOB NOT NULL, - name TEXT NOT NULL, - description TEXT NOT NULL, - usage_limit INTEGER NOT NULL, - partner_id BLOB, - owner_id BLOB NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE registration_tokens ( - secret BLOB NOT NULL, - owner_id BLOB, - project_limit INTEGER NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( secret ), - UNIQUE ( owner_id ) -); -CREATE TABLE reset_password_tokens ( - secret BLOB NOT NULL, - owner_id BLOB NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( secret ), - UNIQUE ( owner_id ) -); -CREATE TABLE serial_numbers ( - id INTEGER NOT NULL, - serial_number BLOB NOT NULL, - bucket_id BLOB NOT NULL, - expires_at TIMESTAMP NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE storagenode_bandwidth_rollups ( - storagenode_id BLOB NOT NULL, - interval_start TIMESTAMP NOT NULL, - interval_seconds INTEGER NOT NULL, - action INTEGER NOT NULL, - allocated INTEGER NOT NULL, - settled INTEGER NOT NULL, - PRIMARY KEY ( storagenode_id, interval_start, action ) -); -CREATE TABLE storagenode_storage_tallies ( - id INTEGER NOT NULL, - node_id BLOB NOT NULL, - interval_end_time TIMESTAMP NOT NULL, - data_total REAL NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE stripe_customers ( - user_id BLOB NOT NULL, - customer_id TEXT NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( user_id ), - UNIQUE ( customer_id ) -); -CREATE TABLE users ( - id BLOB NOT NULL, - email TEXT NOT NULL, - normalized_email TEXT NOT NULL, - full_name TEXT NOT NULL, - short_name TEXT, - password_hash BLOB NOT NULL, - status INTEGER NOT NULL, - partner_id BLOB, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( id ) -); -CREATE TABLE value_attributions ( - project_id BLOB NOT NULL, - bucket_name BLOB NOT NULL, - partner_id BLOB NOT NULL, - last_updated TIMESTAMP NOT NULL, - PRIMARY KEY ( project_id, bucket_name ) -); -CREATE TABLE api_keys ( - id BLOB NOT NULL, - project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, - head BLOB NOT NULL, - name TEXT NOT NULL, - secret BLOB NOT NULL, - partner_id BLOB, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( id ), - UNIQUE ( head ), - UNIQUE ( name, project_id ) -); -CREATE TABLE bucket_metainfos ( - id BLOB NOT NULL, - project_id BLOB NOT NULL REFERENCES projects( id ), - name BLOB NOT NULL, - partner_id BLOB, - path_cipher INTEGER NOT NULL, - created_at TIMESTAMP NOT NULL, - default_segment_size INTEGER NOT NULL, - default_encryption_cipher_suite INTEGER NOT NULL, - default_encryption_block_size INTEGER NOT NULL, - default_redundancy_algorithm INTEGER NOT NULL, - default_redundancy_share_size INTEGER NOT NULL, - default_redundancy_required_shares INTEGER NOT NULL, - default_redundancy_repair_shares INTEGER NOT NULL, - default_redundancy_optimal_shares INTEGER NOT NULL, - default_redundancy_total_shares INTEGER NOT NULL, - PRIMARY KEY ( id ), - UNIQUE ( name, project_id ) -); -CREATE TABLE project_invoice_stamps ( - project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, - invoice_id BLOB NOT NULL, - start_date TIMESTAMP NOT NULL, - end_date TIMESTAMP NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( project_id, start_date, end_date ), - UNIQUE ( invoice_id ) -); -CREATE TABLE project_members ( - member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE, - project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( member_id, project_id ) -); -CREATE TABLE used_serials ( - serial_number_id INTEGER NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE, - storage_node_id BLOB NOT NULL, - PRIMARY KEY ( serial_number_id, storage_node_id ) -); -CREATE TABLE user_credits ( - id INTEGER NOT NULL, - user_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE, - offer_id INTEGER NOT NULL REFERENCES offers( id ), - referred_by BLOB REFERENCES users( id ) ON DELETE SET NULL, - type TEXT NOT NULL, - credits_earned_in_cents INTEGER NOT NULL, - credits_used_in_cents INTEGER NOT NULL, - expires_at TIMESTAMP NOT NULL, - created_at TIMESTAMP NOT NULL, - PRIMARY KEY ( id ) -); -CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds ); -CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); -CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted ); -CREATE INDEX node_last_ip ON nodes ( last_net ); -CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); -CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); -CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );` -} - -func (obj *sqlite3DB) wrapTx(tx *sql.Tx) txMethods { - return &sqlite3Tx{ - dialectTx: dialectTx{tx: tx}, - sqlite3Impl: &sqlite3Impl{ - db: obj.db, - driver: tx, - }, - } -} - -type sqlite3Tx struct { - dialectTx - *sqlite3Impl -} - -func sqlite3LogStmt(stmt string, args ...interface{}) { - // TODO: render placeholders - if Logger != nil { - out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args)) - Logger(out) - } -} - type pretty []interface{} func (p pretty) Format(f fmt.State, c rune) { @@ -10423,4782 +10038,6 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error) } -func (obj *sqlite3Impl) Create_ValueAttribution(ctx context.Context, - value_attribution_project_id ValueAttribution_ProjectId_Field, - value_attribution_bucket_name ValueAttribution_BucketName_Field, - value_attribution_partner_id ValueAttribution_PartnerId_Field) ( - value_attribution *ValueAttribution, err error) { - - __now := obj.db.Hooks.Now().UTC() - __project_id_val := value_attribution_project_id.value() - __bucket_name_val := value_attribution_bucket_name.value() - __partner_id_val := value_attribution_partner_id.value() - __last_updated_val := __now.UTC() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, last_updated ) VALUES ( ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val) - - __res, err := obj.driver.Exec(__stmt, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastValueAttribution(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_PendingAudits(ctx context.Context, - pending_audits_node_id PendingAudits_NodeId_Field, - pending_audits_piece_id PendingAudits_PieceId_Field, - pending_audits_stripe_index PendingAudits_StripeIndex_Field, - pending_audits_share_size PendingAudits_ShareSize_Field, - pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field, - pending_audits_reverify_count PendingAudits_ReverifyCount_Field, - pending_audits_path PendingAudits_Path_Field) ( - pending_audits *PendingAudits, err error) { - __node_id_val := pending_audits_node_id.value() - __piece_id_val := pending_audits_piece_id.value() - __stripe_index_val := pending_audits_stripe_index.value() - __share_size_val := pending_audits_share_size.value() - __expected_share_hash_val := pending_audits_expected_share_hash.value() - __reverify_count_val := pending_audits_reverify_count.value() - __path_val := pending_audits_path.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO pending_audits ( node_id, piece_id, stripe_index, share_size, expected_share_hash, reverify_count, path ) VALUES ( ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val, __path_val) - - __res, err := obj.driver.Exec(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val, __path_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastPendingAudits(ctx, __pk) - -} - -func (obj *sqlite3Impl) CreateNoReturn_Irreparabledb(ctx context.Context, - irreparabledb_segmentpath Irreparabledb_Segmentpath_Field, - irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field, - irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field, - irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field, - irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) ( - err error) { - __segmentpath_val := irreparabledb_segmentpath.value() - __segmentdetail_val := irreparabledb_segmentdetail.value() - __pieces_lost_count_val := irreparabledb_pieces_lost_count.value() - __seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value() - __repair_attempt_count_val := irreparabledb_repair_attempt_count.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val) - - _, err = obj.driver.Exec(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_AccountingTimestamps(ctx context.Context, - accounting_timestamps_name AccountingTimestamps_Name_Field, - accounting_timestamps_value AccountingTimestamps_Value_Field) ( - err error) { - __name_val := accounting_timestamps_name.value() - __value_val := accounting_timestamps_value.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __name_val, __value_val) - - _, err = obj.driver.Exec(__stmt, __name_val, __value_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_AccountingRollup(ctx context.Context, - accounting_rollup_node_id AccountingRollup_NodeId_Field, - accounting_rollup_start_time AccountingRollup_StartTime_Field, - accounting_rollup_put_total AccountingRollup_PutTotal_Field, - accounting_rollup_get_total AccountingRollup_GetTotal_Field, - accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field, - accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field, - accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field, - accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) ( - err error) { - __node_id_val := accounting_rollup_node_id.value() - __start_time_val := accounting_rollup_start_time.value() - __put_total_val := accounting_rollup_put_total.value() - __get_total_val := accounting_rollup_get_total.value() - __get_audit_total_val := accounting_rollup_get_audit_total.value() - __get_repair_total_val := accounting_rollup_get_repair_total.value() - __put_repair_total_val := accounting_rollup_put_repair_total.value() - __at_rest_total_val := accounting_rollup_at_rest_total.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val) - - _, err = obj.driver.Exec(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_Node(ctx context.Context, - node_id Node_Id_Field, - node_address Node_Address_Field, - node_last_net Node_LastNet_Field, - node_protocol Node_Protocol_Field, - node_type Node_Type_Field, - node_email Node_Email_Field, - node_wallet Node_Wallet_Field, - node_free_bandwidth Node_FreeBandwidth_Field, - node_free_disk Node_FreeDisk_Field, - node_major Node_Major_Field, - node_minor Node_Minor_Field, - node_patch Node_Patch_Field, - node_hash Node_Hash_Field, - node_timestamp Node_Timestamp_Field, - node_release Node_Release_Field, - node_latency_90 Node_Latency90_Field, - node_audit_success_count Node_AuditSuccessCount_Field, - node_total_audit_count Node_TotalAuditCount_Field, - node_uptime_success_count Node_UptimeSuccessCount_Field, - node_total_uptime_count Node_TotalUptimeCount_Field, - node_last_contact_success Node_LastContactSuccess_Field, - node_last_contact_failure Node_LastContactFailure_Field, - node_contained Node_Contained_Field, - node_audit_reputation_alpha Node_AuditReputationAlpha_Field, - node_audit_reputation_beta Node_AuditReputationBeta_Field, - node_uptime_reputation_alpha Node_UptimeReputationAlpha_Field, - node_uptime_reputation_beta Node_UptimeReputationBeta_Field, - node_exit_success Node_ExitSuccess_Field, - optional Node_Create_Fields) ( - err error) { - - __now := obj.db.Hooks.Now().UTC() - __id_val := node_id.value() - __address_val := node_address.value() - __last_net_val := node_last_net.value() - __protocol_val := node_protocol.value() - __type_val := node_type.value() - __email_val := node_email.value() - __wallet_val := node_wallet.value() - __free_bandwidth_val := node_free_bandwidth.value() - __free_disk_val := node_free_disk.value() - __piece_count_val := int64(0) - __major_val := node_major.value() - __minor_val := node_minor.value() - __patch_val := node_patch.value() - __hash_val := node_hash.value() - __timestamp_val := node_timestamp.value() - __release_val := node_release.value() - __latency_90_val := node_latency_90.value() - __audit_success_count_val := node_audit_success_count.value() - __total_audit_count_val := node_total_audit_count.value() - __uptime_success_count_val := node_uptime_success_count.value() - __total_uptime_count_val := node_total_uptime_count.value() - __created_at_val := __now - __updated_at_val := __now - __last_contact_success_val := node_last_contact_success.value() - __last_contact_failure_val := node_last_contact_failure.value() - __contained_val := node_contained.value() - __disqualified_val := optional.Disqualified.value() - __audit_reputation_alpha_val := node_audit_reputation_alpha.value() - __audit_reputation_beta_val := node_audit_reputation_beta.value() - __uptime_reputation_alpha_val := node_uptime_reputation_alpha.value() - __uptime_reputation_beta_val := node_uptime_reputation_beta.value() - __exit_initiated_at_val := optional.ExitInitiatedAt.value() - __exit_loop_completed_at_val := optional.ExitLoopCompletedAt.value() - __exit_finished_at_val := optional.ExitFinishedAt.value() - __exit_success_val := node_exit_success.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, last_net, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val) - - _, err = obj.driver.Exec(__stmt, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) Create_User(ctx context.Context, - user_id User_Id_Field, - user_email User_Email_Field, - user_normalized_email User_NormalizedEmail_Field, - user_full_name User_FullName_Field, - user_password_hash User_PasswordHash_Field, - optional User_Create_Fields) ( - user *User, err error) { - - __now := obj.db.Hooks.Now().UTC() - __id_val := user_id.value() - __email_val := user_email.value() - __normalized_email_val := user_normalized_email.value() - __full_name_val := user_full_name.value() - __short_name_val := optional.ShortName.value() - __password_hash_val := user_password_hash.value() - __status_val := int(0) - __partner_id_val := optional.PartnerId.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastUser(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_Project(ctx context.Context, - project_id Project_Id_Field, - project_name Project_Name_Field, - project_description Project_Description_Field, - project_usage_limit Project_UsageLimit_Field, - project_owner_id Project_OwnerId_Field, - optional Project_Create_Fields) ( - project *Project, err error) { - - __now := obj.db.Hooks.Now().UTC() - __id_val := project_id.value() - __name_val := project_name.value() - __description_val := project_description.value() - __usage_limit_val := project_usage_limit.value() - __partner_id_val := optional.PartnerId.value() - __owner_id_val := project_owner_id.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastProject(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_ProjectInvoiceStamp(ctx context.Context, - project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field, - project_invoice_stamp_invoice_id ProjectInvoiceStamp_InvoiceId_Field, - project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field, - project_invoice_stamp_end_date ProjectInvoiceStamp_EndDate_Field, - project_invoice_stamp_created_at ProjectInvoiceStamp_CreatedAt_Field) ( - project_invoice_stamp *ProjectInvoiceStamp, err error) { - __project_id_val := project_invoice_stamp_project_id.value() - __invoice_id_val := project_invoice_stamp_invoice_id.value() - __start_date_val := project_invoice_stamp_start_date.value() - __end_date_val := project_invoice_stamp_end_date.value() - __created_at_val := project_invoice_stamp_created_at.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invoice_stamps ( project_id, invoice_id, start_date, end_date, created_at ) VALUES ( ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __project_id_val, __invoice_id_val, __start_date_val, __end_date_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __project_id_val, __invoice_id_val, __start_date_val, __end_date_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastProjectInvoiceStamp(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_ProjectMember(ctx context.Context, - project_member_member_id ProjectMember_MemberId_Field, - project_member_project_id ProjectMember_ProjectId_Field) ( - project_member *ProjectMember, err error) { - - __now := obj.db.Hooks.Now().UTC() - __member_id_val := project_member_member_id.value() - __project_id_val := project_member_project_id.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __member_id_val, __project_id_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __member_id_val, __project_id_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastProjectMember(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_ApiKey(ctx context.Context, - api_key_id ApiKey_Id_Field, - api_key_project_id ApiKey_ProjectId_Field, - api_key_head ApiKey_Head_Field, - api_key_name ApiKey_Name_Field, - api_key_secret ApiKey_Secret_Field, - optional ApiKey_Create_Fields) ( - api_key *ApiKey, err error) { - - __now := obj.db.Hooks.Now().UTC() - __id_val := api_key_id.value() - __project_id_val := api_key_project_id.value() - __head_val := api_key_head.value() - __name_val := api_key_name.value() - __secret_val := api_key_secret.value() - __partner_id_val := optional.PartnerId.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastApiKey(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_BucketUsage(ctx context.Context, - bucket_usage_id BucketUsage_Id_Field, - bucket_usage_bucket_id BucketUsage_BucketId_Field, - bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field, - bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field, - bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field, - bucket_usage_remote_segments BucketUsage_RemoteSegments_Field, - bucket_usage_inline_segments BucketUsage_InlineSegments_Field, - bucket_usage_objects BucketUsage_Objects_Field, - bucket_usage_metadata_size BucketUsage_MetadataSize_Field, - bucket_usage_repair_egress BucketUsage_RepairEgress_Field, - bucket_usage_get_egress BucketUsage_GetEgress_Field, - bucket_usage_audit_egress BucketUsage_AuditEgress_Field) ( - bucket_usage *BucketUsage, err error) { - __id_val := bucket_usage_id.value() - __bucket_id_val := bucket_usage_bucket_id.value() - __rollup_end_time_val := bucket_usage_rollup_end_time.value() - __remote_stored_data_val := bucket_usage_remote_stored_data.value() - __inline_stored_data_val := bucket_usage_inline_stored_data.value() - __remote_segments_val := bucket_usage_remote_segments.value() - __inline_segments_val := bucket_usage_inline_segments.value() - __objects_val := bucket_usage_objects.value() - __metadata_size_val := bucket_usage_metadata_size.value() - __repair_egress_val := bucket_usage_repair_egress.value() - __get_egress_val := bucket_usage_get_egress.value() - __audit_egress_val := bucket_usage_audit_egress.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_usages ( id, bucket_id, rollup_end_time, remote_stored_data, inline_stored_data, remote_segments, inline_segments, objects, metadata_size, repair_egress, get_egress, audit_egress ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val) - - __res, err := obj.driver.Exec(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastBucketUsage(ctx, __pk) - -} - -func (obj *sqlite3Impl) CreateNoReturn_SerialNumber(ctx context.Context, - serial_number_serial_number SerialNumber_SerialNumber_Field, - serial_number_bucket_id SerialNumber_BucketId_Field, - serial_number_expires_at SerialNumber_ExpiresAt_Field) ( - err error) { - __serial_number_val := serial_number_serial_number.value() - __bucket_id_val := serial_number_bucket_id.value() - __expires_at_val := serial_number_expires_at.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO serial_numbers ( serial_number, bucket_id, expires_at ) VALUES ( ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val) - - _, err = obj.driver.Exec(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_UsedSerial(ctx context.Context, - used_serial_serial_number_id UsedSerial_SerialNumberId_Field, - used_serial_storage_node_id UsedSerial_StorageNodeId_Field) ( - err error) { - __serial_number_id_val := used_serial_serial_number_id.value() - __storage_node_id_val := used_serial_storage_node_id.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO used_serials ( serial_number_id, storage_node_id ) VALUES ( ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __serial_number_id_val, __storage_node_id_val) - - _, err = obj.driver.Exec(__stmt, __serial_number_id_val, __storage_node_id_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_BucketStorageTally(ctx context.Context, - bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field, - bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field, - bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field, - bucket_storage_tally_inline BucketStorageTally_Inline_Field, - bucket_storage_tally_remote BucketStorageTally_Remote_Field, - bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field, - bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field, - bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field, - bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) ( - err error) { - __bucket_name_val := bucket_storage_tally_bucket_name.value() - __project_id_val := bucket_storage_tally_project_id.value() - __interval_start_val := bucket_storage_tally_interval_start.value() - __inline_val := bucket_storage_tally_inline.value() - __remote_val := bucket_storage_tally_remote.value() - __remote_segments_count_val := bucket_storage_tally_remote_segments_count.value() - __inline_segments_count_val := bucket_storage_tally_inline_segments_count.value() - __object_count_val := bucket_storage_tally_object_count.value() - __metadata_size_val := bucket_storage_tally_metadata_size.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val) - - _, err = obj.driver.Exec(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_StoragenodeStorageTally(ctx context.Context, - storagenode_storage_tally_node_id StoragenodeStorageTally_NodeId_Field, - storagenode_storage_tally_interval_end_time StoragenodeStorageTally_IntervalEndTime_Field, - storagenode_storage_tally_data_total StoragenodeStorageTally_DataTotal_Field) ( - err error) { - __node_id_val := storagenode_storage_tally_node_id.value() - __interval_end_time_val := storagenode_storage_tally_interval_end_time.value() - __data_total_val := storagenode_storage_tally_data_total.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO storagenode_storage_tallies ( node_id, interval_end_time, data_total ) VALUES ( ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val) - - _, err = obj.driver.Exec(__stmt, __node_id_val, __interval_end_time_val, __data_total_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_PeerIdentity(ctx context.Context, - peer_identity_node_id PeerIdentity_NodeId_Field, - peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field, - peer_identity_chain PeerIdentity_Chain_Field) ( - err error) { - - __now := obj.db.Hooks.Now().UTC() - __node_id_val := peer_identity_node_id.value() - __leaf_serial_number_val := peer_identity_leaf_serial_number.value() - __chain_val := peer_identity_chain.value() - __updated_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val) - - _, err = obj.driver.Exec(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) Create_RegistrationToken(ctx context.Context, - registration_token_secret RegistrationToken_Secret_Field, - registration_token_project_limit RegistrationToken_ProjectLimit_Field, - optional RegistrationToken_Create_Fields) ( - registration_token *RegistrationToken, err error) { - - __now := obj.db.Hooks.Now().UTC() - __secret_val := registration_token_secret.value() - __owner_id_val := optional.OwnerId.value() - __project_limit_val := registration_token_project_limit.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastRegistrationToken(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_ResetPasswordToken(ctx context.Context, - reset_password_token_secret ResetPasswordToken_Secret_Field, - reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) ( - reset_password_token *ResetPasswordToken, err error) { - - __now := obj.db.Hooks.Now().UTC() - __secret_val := reset_password_token_secret.value() - __owner_id_val := reset_password_token_owner_id.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO reset_password_tokens ( secret, owner_id, created_at ) VALUES ( ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __secret_val, __owner_id_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __secret_val, __owner_id_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastResetPasswordToken(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_Offer(ctx context.Context, - offer_name Offer_Name_Field, - offer_description Offer_Description_Field, - offer_award_credit_in_cents Offer_AwardCreditInCents_Field, - offer_invitee_credit_in_cents Offer_InviteeCreditInCents_Field, - offer_expires_at Offer_ExpiresAt_Field, - offer_status Offer_Status_Field, - offer_type Offer_Type_Field, - optional Offer_Create_Fields) ( - offer *Offer, err error) { - - __now := obj.db.Hooks.Now().UTC() - __name_val := offer_name.value() - __description_val := offer_description.value() - __award_credit_in_cents_val := offer_award_credit_in_cents.value() - __invitee_credit_in_cents_val := offer_invitee_credit_in_cents.value() - __award_credit_duration_days_val := optional.AwardCreditDurationDays.value() - __invitee_credit_duration_days_val := optional.InviteeCreditDurationDays.value() - __redeemable_cap_val := optional.RedeemableCap.value() - __expires_at_val := offer_expires_at.value() - __created_at_val := __now - __status_val := offer_status.value() - __type_val := offer_type.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO offers ( name, description, award_credit_in_cents, invitee_credit_in_cents, award_credit_duration_days, invitee_credit_duration_days, redeemable_cap, expires_at, created_at, status, type ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __name_val, __description_val, __award_credit_in_cents_val, __invitee_credit_in_cents_val, __award_credit_duration_days_val, __invitee_credit_duration_days_val, __redeemable_cap_val, __expires_at_val, __created_at_val, __status_val, __type_val) - - __res, err := obj.driver.Exec(__stmt, __name_val, __description_val, __award_credit_in_cents_val, __invitee_credit_in_cents_val, __award_credit_duration_days_val, __invitee_credit_duration_days_val, __redeemable_cap_val, __expires_at_val, __created_at_val, __status_val, __type_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastOffer(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_UserCredit(ctx context.Context, - user_credit_user_id UserCredit_UserId_Field, - user_credit_offer_id UserCredit_OfferId_Field, - user_credit_type UserCredit_Type_Field, - user_credit_credits_earned_in_cents UserCredit_CreditsEarnedInCents_Field, - user_credit_expires_at UserCredit_ExpiresAt_Field, - optional UserCredit_Create_Fields) ( - user_credit *UserCredit, err error) { - - __now := obj.db.Hooks.Now().UTC() - __user_id_val := user_credit_user_id.value() - __offer_id_val := user_credit_offer_id.value() - __referred_by_val := optional.ReferredBy.value() - __type_val := user_credit_type.value() - __credits_earned_in_cents_val := user_credit_credits_earned_in_cents.value() - __credits_used_in_cents_val := int(0) - __expires_at_val := user_credit_expires_at.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO user_credits ( user_id, offer_id, referred_by, type, credits_earned_in_cents, credits_used_in_cents, expires_at, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __user_id_val, __offer_id_val, __referred_by_val, __type_val, __credits_earned_in_cents_val, __credits_used_in_cents_val, __expires_at_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __user_id_val, __offer_id_val, __referred_by_val, __type_val, __credits_earned_in_cents_val, __credits_used_in_cents_val, __expires_at_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastUserCredit(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_BucketMetainfo(ctx context.Context, - bucket_metainfo_id BucketMetainfo_Id_Field, - bucket_metainfo_project_id BucketMetainfo_ProjectId_Field, - bucket_metainfo_name BucketMetainfo_Name_Field, - bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field, - bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field, - bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field, - bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field, - bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field, - bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field, - bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field, - bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field, - bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field, - bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field, - optional BucketMetainfo_Create_Fields) ( - bucket_metainfo *BucketMetainfo, err error) { - - __now := obj.db.Hooks.Now().UTC() - __id_val := bucket_metainfo_id.value() - __project_id_val := bucket_metainfo_project_id.value() - __name_val := bucket_metainfo_name.value() - __partner_id_val := optional.PartnerId.value() - __path_cipher_val := bucket_metainfo_path_cipher.value() - __created_at_val := __now - __default_segment_size_val := bucket_metainfo_default_segment_size.value() - __default_encryption_cipher_suite_val := bucket_metainfo_default_encryption_cipher_suite.value() - __default_encryption_block_size_val := bucket_metainfo_default_encryption_block_size.value() - __default_redundancy_algorithm_val := bucket_metainfo_default_redundancy_algorithm.value() - __default_redundancy_share_size_val := bucket_metainfo_default_redundancy_share_size.value() - __default_redundancy_required_shares_val := bucket_metainfo_default_redundancy_required_shares.value() - __default_redundancy_repair_shares_val := bucket_metainfo_default_redundancy_repair_shares.value() - __default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value() - __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) - - __res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastBucketMetainfo(ctx, __pk) - -} - -func (obj *sqlite3Impl) CreateNoReturn_GracefulExitProgress(ctx context.Context, - graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field, - graceful_exit_progress_bytes_transferred GracefulExitProgress_BytesTransferred_Field) ( - err error) { - - __now := obj.db.Hooks.Now().UTC() - __node_id_val := graceful_exit_progress_node_id.value() - __bytes_transferred_val := graceful_exit_progress_bytes_transferred.value() - __pieces_transferred_val := int64(0) - __pieces_failed_val := int64(0) - __updated_at_val := __now.UTC() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO graceful_exit_progress ( node_id, bytes_transferred, pieces_transferred, pieces_failed, updated_at ) VALUES ( ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __node_id_val, __bytes_transferred_val, __pieces_transferred_val, __pieces_failed_val, __updated_at_val) - - _, err = obj.driver.Exec(__stmt, __node_id_val, __bytes_transferred_val, __pieces_transferred_val, __pieces_failed_val, __updated_at_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) CreateNoReturn_GracefulExitTransferQueue(ctx context.Context, - graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field, - graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field, - graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field, - graceful_exit_transfer_queue_durability_ratio GracefulExitTransferQueue_DurabilityRatio_Field, - optional GracefulExitTransferQueue_Create_Fields) ( - err error) { - - __now := obj.db.Hooks.Now().UTC() - __node_id_val := graceful_exit_transfer_queue_node_id.value() - __path_val := graceful_exit_transfer_queue_path.value() - __piece_num_val := graceful_exit_transfer_queue_piece_num.value() - __durability_ratio_val := graceful_exit_transfer_queue_durability_ratio.value() - __queued_at_val := __now.UTC() - __requested_at_val := optional.RequestedAt.value() - __last_failed_at_val := optional.LastFailedAt.value() - __last_failed_code_val := optional.LastFailedCode.value() - __failed_count_val := optional.FailedCount.value() - __finished_at_val := optional.FinishedAt.value() - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO graceful_exit_transfer_queue ( node_id, path, piece_num, durability_ratio, queued_at, requested_at, last_failed_at, last_failed_code, failed_count, finished_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __node_id_val, __path_val, __piece_num_val, __durability_ratio_val, __queued_at_val, __requested_at_val, __last_failed_at_val, __last_failed_code_val, __failed_count_val, __finished_at_val) - - _, err = obj.driver.Exec(__stmt, __node_id_val, __path_val, __piece_num_val, __durability_ratio_val, __queued_at_val, __requested_at_val, __last_failed_at_val, __last_failed_code_val, __failed_count_val, __finished_at_val) - if err != nil { - return obj.makeErr(err) - } - return nil - -} - -func (obj *sqlite3Impl) Create_StripeCustomer(ctx context.Context, - stripe_customer_user_id StripeCustomer_UserId_Field, - stripe_customer_customer_id StripeCustomer_CustomerId_Field) ( - stripe_customer *StripeCustomer, err error) { - - __now := obj.db.Hooks.Now().UTC() - __user_id_val := stripe_customer_user_id.value() - __customer_id_val := stripe_customer_customer_id.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripe_customers ( user_id, customer_id, created_at ) VALUES ( ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __user_id_val, __customer_id_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __user_id_val, __customer_id_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastStripeCustomer(ctx, __pk) - -} - -func (obj *sqlite3Impl) Create_CoinpaymentsTransaction(ctx context.Context, - coinpayments_transaction_id CoinpaymentsTransaction_Id_Field, - coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field, - coinpayments_transaction_address CoinpaymentsTransaction_Address_Field, - coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field, - coinpayments_transaction_received CoinpaymentsTransaction_Received_Field, - coinpayments_transaction_status CoinpaymentsTransaction_Status_Field, - coinpayments_transaction_key CoinpaymentsTransaction_Key_Field) ( - coinpayments_transaction *CoinpaymentsTransaction, err error) { - - __now := obj.db.Hooks.Now().UTC() - __id_val := coinpayments_transaction_id.value() - __user_id_val := coinpayments_transaction_user_id.value() - __address_val := coinpayments_transaction_address.value() - __amount_val := coinpayments_transaction_amount.value() - __received_val := coinpayments_transaction_received.value() - __status_val := coinpayments_transaction_status.value() - __key_val := coinpayments_transaction_key.value() - __created_at_val := __now - - var __embed_stmt = __sqlbundle_Literal("INSERT INTO coinpayments_transactions ( id, user_id, address, amount, received, status, key, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __created_at_val) - - __res, err := obj.driver.Exec(__stmt, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __created_at_val) - if err != nil { - return nil, obj.makeErr(err) - } - __pk, err := __res.LastInsertId() - if err != nil { - return nil, obj.makeErr(err) - } - return obj.getLastCoinpaymentsTransaction(ctx, __pk) - -} - -func (obj *sqlite3Impl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context, - value_attribution_project_id ValueAttribution_ProjectId_Field, - value_attribution_bucket_name ValueAttribution_BucketName_Field) ( - value_attribution *ValueAttribution, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?") - - var __values []interface{} - __values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - value_attribution = &ValueAttribution{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated) - if err != nil { - return nil, obj.makeErr(err) - } - return value_attribution, nil - -} - -func (obj *sqlite3Impl) Get_PendingAudits_By_NodeId(ctx context.Context, - pending_audits_node_id PendingAudits_NodeId_Field) ( - pending_audits *PendingAudits, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE pending_audits.node_id = ?") - - var __values []interface{} - __values = append(__values, pending_audits_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - pending_audits = &PendingAudits{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path) - if err != nil { - return nil, obj.makeErr(err) - } - return pending_audits, nil - -} - -func (obj *sqlite3Impl) Get_Irreparabledb_By_Segmentpath(ctx context.Context, - irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) ( - irreparabledb *Irreparabledb, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?") - - var __values []interface{} - __values = append(__values, irreparabledb_segmentpath.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - irreparabledb = &Irreparabledb{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount) - if err != nil { - return nil, obj.makeErr(err) - } - return irreparabledb, nil - -} - -func (obj *sqlite3Impl) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context, - irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field, - limit int, offset int64) ( - rows []*Irreparabledb, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath > ? ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, irreparabledb_segmentpath_greater.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - irreparabledb := &Irreparabledb{} - err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, irreparabledb) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context, - accounting_timestamps_name AccountingTimestamps_Name_Field) ( - row *Value_Row, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?") - - var __values []interface{} - __values = append(__values, accounting_timestamps_name.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - row = &Value_Row{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return row, nil - -} - -func (obj *sqlite3Impl) Get_AccountingRollup_By_Id(ctx context.Context, - accounting_rollup_id AccountingRollup_Id_Field) ( - accounting_rollup *AccountingRollup, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?") - - var __values []interface{} - __values = append(__values, accounting_rollup_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - accounting_rollup = &AccountingRollup{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal) - if err != nil { - return nil, obj.makeErr(err) - } - return accounting_rollup, nil - -} - -func (obj *sqlite3Impl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context, - accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) ( - rows []*AccountingRollup, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?") - - var __values []interface{} - __values = append(__values, accounting_rollup_start_time_greater_or_equal.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - accounting_rollup := &AccountingRollup{} - err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, accounting_rollup) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_Node_By_Id(ctx context.Context, - node_id Node_Id_Field) ( - node *Node, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?") - - var __values []interface{} - __values = append(__values, node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - node = &Node{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) - if err != nil { - return nil, obj.makeErr(err) - } - return node, nil - -} - -func (obj *sqlite3Impl) All_Node_Id(ctx context.Context) ( - rows []*Id_Row, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes") - - var __values []interface{} - __values = append(__values) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - row := &Id_Row{} - err = __rows.Scan(&row.Id) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, row) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context.Context, - node_id_greater_or_equal Node_Id_Field, - limit int, offset int64) ( - rows []*Node, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, node_id_greater_or_equal.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - node := &Node{} - err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, node) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context, - node_id_greater_or_equal Node_Id_Field, - limit int, offset int64) ( - rows []*Id_LastNet_Address_Protocol_Row, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, node_id_greater_or_equal.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - row := &Id_LastNet_Address_Protocol_Row{} - err = __rows.Scan(&row.Id, &row.LastNet, &row.Address, &row.Protocol) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, row) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) ( - rows []*Id_PieceCount_Row, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.piece_count FROM nodes WHERE nodes.piece_count != 0") - - var __values []interface{} - __values = append(__values) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - row := &Id_PieceCount_Row{} - err = __rows.Scan(&row.Id, &row.PieceCount) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, row) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context, - user_normalized_email User_NormalizedEmail_Field) ( - user *User, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") - - var __values []interface{} - __values = append(__values, user_normalized_email.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - if !__rows.Next() { - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return nil, makeErr(sql.ErrNoRows) - } - - user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - - if __rows.Next() { - return nil, tooManyRows("User_By_NormalizedEmail_And_Status_Not_Number") - } - - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - - return user, nil - -} - -func (obj *sqlite3Impl) Get_User_By_Id(ctx context.Context, - user_id User_Id_Field) ( - user *User, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?") - - var __values []interface{} - __values = append(__values, user_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - user = &User{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return user, nil - -} - -func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context, - project_id Project_Id_Field) ( - project *Project, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?") - - var __values []interface{} - __values = append(__values, project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - project = &Project{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return project, nil - -} - -func (obj *sqlite3Impl) All_Project(ctx context.Context) ( - rows []*Project, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects") - - var __values []interface{} - __values = append(__values) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, project) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context, - project_created_at_less Project_CreatedAt_Field) ( - rows []*Project, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at") - - var __values []interface{} - __values = append(__values, project_created_at_less.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, project) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context, - project_member_member_id ProjectMember_MemberId_Field) ( - rows []*Project, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name") - - var __values []interface{} - __values = append(__values, project_member_member_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - project := &Project{} - err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, project) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx context.Context, - project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field, - project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field) ( - project_invoice_stamp *ProjectInvoiceStamp, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE project_invoice_stamps.project_id = ? AND project_invoice_stamps.start_date = ? LIMIT 2") - - var __values []interface{} - __values = append(__values, project_invoice_stamp_project_id.value(), project_invoice_stamp_start_date.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - if !__rows.Next() { - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return nil, makeErr(sql.ErrNoRows) - } - - project_invoice_stamp = &ProjectInvoiceStamp{} - err = __rows.Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - - if __rows.Next() { - return nil, tooManyRows("ProjectInvoiceStamp_By_ProjectId_And_StartDate") - } - - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - - return project_invoice_stamp, nil - -} - -func (obj *sqlite3Impl) All_ProjectInvoiceStamp_By_ProjectId_OrderBy_Desc_StartDate(ctx context.Context, - project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field) ( - rows []*ProjectInvoiceStamp, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE project_invoice_stamps.project_id = ? ORDER BY project_invoice_stamps.start_date DESC") - - var __values []interface{} - __values = append(__values, project_invoice_stamp_project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - project_invoice_stamp := &ProjectInvoiceStamp{} - err = __rows.Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, project_invoice_stamp) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_ProjectMember_By_MemberId(ctx context.Context, - project_member_member_id ProjectMember_MemberId_Field) ( - rows []*ProjectMember, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?") - - var __values []interface{} - __values = append(__values, project_member_member_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - project_member := &ProjectMember{} - err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, project_member) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Limited_ProjectMember_By_ProjectId(ctx context.Context, - project_member_project_id ProjectMember_ProjectId_Field, - limit int, offset int64) ( - rows []*ProjectMember, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.project_id = ? LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, project_member_project_id.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - project_member := &ProjectMember{} - err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, project_member) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_ApiKey_By_Id(ctx context.Context, - api_key_id ApiKey_Id_Field) ( - api_key *ApiKey, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?") - - var __values []interface{} - __values = append(__values, api_key_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return api_key, nil - -} - -func (obj *sqlite3Impl) Get_ApiKey_By_Head(ctx context.Context, - api_key_head ApiKey_Head_Field) ( - api_key *ApiKey, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?") - - var __values []interface{} - __values = append(__values, api_key_head.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return api_key, nil - -} - -func (obj *sqlite3Impl) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context, - api_key_name ApiKey_Name_Field, - api_key_project_id ApiKey_ProjectId_Field) ( - api_key *ApiKey, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.name = ? AND api_keys.project_id = ?") - - var __values []interface{} - __values = append(__values, api_key_name.value(), api_key_project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return api_key, nil - -} - -func (obj *sqlite3Impl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context, - api_key_project_id ApiKey_ProjectId_Field) ( - rows []*ApiKey, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name") - - var __values []interface{} - __values = append(__values, api_key_project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - api_key := &ApiKey{} - err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, api_key) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_BucketUsage_By_Id(ctx context.Context, - bucket_usage_id BucketUsage_Id_Field) ( - bucket_usage *BucketUsage, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.id = ?") - - var __values []interface{} - __values = append(__values, bucket_usage_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - bucket_usage = &BucketUsage{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress) - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_usage, nil - -} - -func (obj *sqlite3Impl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context, - bucket_usage_bucket_id BucketUsage_BucketId_Field, - bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field, - bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field, - limit int, offset int64) ( - rows []*BucketUsage, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - bucket_usage := &BucketUsage{} - err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, bucket_usage) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context, - bucket_usage_bucket_id BucketUsage_BucketId_Field, - bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field, - bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field, - limit int, offset int64) ( - rows []*BucketUsage, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time DESC LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - bucket_usage := &BucketUsage{} - err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, bucket_usage) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Find_SerialNumber_By_SerialNumber(ctx context.Context, - serial_number_serial_number SerialNumber_SerialNumber_Field) ( - serial_number *SerialNumber, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE serial_numbers.serial_number = ? LIMIT 2") - - var __values []interface{} - __values = append(__values, serial_number_serial_number.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - if !__rows.Next() { - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return nil, nil - } - - serial_number = &SerialNumber{} - err = __rows.Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt) - if err != nil { - return nil, obj.makeErr(err) - } - - if __rows.Next() { - return nil, tooManyRows("SerialNumber_By_SerialNumber") - } - - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - - return serial_number, nil - -} - -func (obj *sqlite3Impl) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context, - bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field, - bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field, - bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field, - bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) ( - bucket_bandwidth_rollup *BucketBandwidthRollup, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_name = ? AND bucket_bandwidth_rollups.project_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?") - - var __values []interface{} - __values = append(__values, bucket_bandwidth_rollup_bucket_name.value(), bucket_bandwidth_rollup_project_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - bucket_bandwidth_rollup = &BucketBandwidthRollup{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_bandwidth_rollup, nil - -} - -func (obj *sqlite3Impl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context, - bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) ( - bucket_storage_tally *BucketStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? ORDER BY bucket_storage_tallies.interval_start DESC LIMIT 1 OFFSET 0") - - var __values []interface{} - __values = append(__values, bucket_storage_tally_project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - if !__rows.Next() { - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return nil, nil - } - - bucket_storage_tally = &BucketStorageTally{} - err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize) - if err != nil { - return nil, obj.makeErr(err) - } - - return bucket_storage_tally, nil - -} - -func (obj *sqlite3Impl) All_BucketStorageTally(ctx context.Context) ( - rows []*BucketStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies") - - var __values []interface{} - __values = append(__values) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - bucket_storage_tally := &BucketStorageTally{} - err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, bucket_storage_tally) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context, - bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field, - bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field, - bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field, - bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) ( - rows []*BucketStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC") - - var __values []interface{} - __values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - bucket_storage_tally := &BucketStorageTally{} - err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, bucket_storage_tally) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context, - storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field, - storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field, - storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) ( - storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start = ? AND storagenode_bandwidth_rollups.action = ?") - - var __values []interface{} - __values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start.value(), storagenode_bandwidth_rollup_action.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - storagenode_bandwidth_rollup = &StoragenodeBandwidthRollup{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return storagenode_bandwidth_rollup, nil - -} - -func (obj *sqlite3Impl) All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context, - storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field) ( - rows []*StoragenodeBandwidthRollup, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ?") - - var __values []interface{} - __values = append(__values, storagenode_bandwidth_rollup_interval_start_greater_or_equal.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{} - err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, storagenode_bandwidth_rollup) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_StoragenodeStorageTally_By_Id(ctx context.Context, - storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) ( - storagenode_storage_tally *StoragenodeStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.id = ?") - - var __values []interface{} - __values = append(__values, storagenode_storage_tally_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - storagenode_storage_tally = &StoragenodeStorageTally{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal) - if err != nil { - return nil, obj.makeErr(err) - } - return storagenode_storage_tally, nil - -} - -func (obj *sqlite3Impl) All_StoragenodeStorageTally(ctx context.Context) ( - rows []*StoragenodeStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies") - - var __values []interface{} - __values = append(__values) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - storagenode_storage_tally := &StoragenodeStorageTally{} - err = __rows.Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, storagenode_storage_tally) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context, - storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) ( - rows []*StoragenodeStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.interval_end_time >= ?") - - var __values []interface{} - __values = append(__values, storagenode_storage_tally_interval_end_time_greater_or_equal.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - storagenode_storage_tally := &StoragenodeStorageTally{} - err = __rows.Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, storagenode_storage_tally) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_PeerIdentity_By_NodeId(ctx context.Context, - peer_identity_node_id PeerIdentity_NodeId_Field) ( - peer_identity *PeerIdentity, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?") - - var __values []interface{} - __values = append(__values, peer_identity_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - peer_identity = &PeerIdentity{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return peer_identity, nil - -} - -func (obj *sqlite3Impl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context, - peer_identity_node_id PeerIdentity_NodeId_Field) ( - row *LeafSerialNumber_Row, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?") - - var __values []interface{} - __values = append(__values, peer_identity_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - row = &LeafSerialNumber_Row{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.LeafSerialNumber) - if err != nil { - return nil, obj.makeErr(err) - } - return row, nil - -} - -func (obj *sqlite3Impl) Get_RegistrationToken_By_Secret(ctx context.Context, - registration_token_secret RegistrationToken_Secret_Field) ( - registration_token *RegistrationToken, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?") - - var __values []interface{} - __values = append(__values, registration_token_secret.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - registration_token = &RegistrationToken{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return registration_token, nil - -} - -func (obj *sqlite3Impl) Get_RegistrationToken_By_OwnerId(ctx context.Context, - registration_token_owner_id RegistrationToken_OwnerId_Field) ( - registration_token *RegistrationToken, err error) { - - var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}} - - var __values []interface{} - __values = append(__values) - - if !registration_token_owner_id.isnull() { - __cond_0.Null = false - __values = append(__values, registration_token_owner_id.value()) - } - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - registration_token = &RegistrationToken{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return registration_token, nil - -} - -func (obj *sqlite3Impl) Get_ResetPasswordToken_By_Secret(ctx context.Context, - reset_password_token_secret ResetPasswordToken_Secret_Field) ( - reset_password_token *ResetPasswordToken, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.secret = ?") - - var __values []interface{} - __values = append(__values, reset_password_token_secret.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - reset_password_token = &ResetPasswordToken{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return reset_password_token, nil - -} - -func (obj *sqlite3Impl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context, - reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) ( - reset_password_token *ResetPasswordToken, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.owner_id = ?") - - var __values []interface{} - __values = append(__values, reset_password_token_owner_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - reset_password_token = &ResetPasswordToken{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return reset_password_token, nil - -} - -func (obj *sqlite3Impl) Get_Offer_By_Id(ctx context.Context, - offer_id Offer_Id_Field) ( - offer *Offer, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers WHERE offers.id = ?") - - var __values []interface{} - __values = append(__values, offer_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - offer = &Offer{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type) - if err != nil { - return nil, obj.makeErr(err) - } - return offer, nil - -} - -func (obj *sqlite3Impl) All_Offer_OrderBy_Asc_Id(ctx context.Context) ( - rows []*Offer, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers ORDER BY offers.id") - - var __values []interface{} - __values = append(__values) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - offer := &Offer{} - err = __rows.Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, offer) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx context.Context, - user_credit_user_id UserCredit_UserId_Field, - user_credit_expires_at_greater UserCredit_ExpiresAt_Field) ( - rows []*UserCredit, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT user_credits.id, user_credits.user_id, user_credits.offer_id, user_credits.referred_by, user_credits.type, user_credits.credits_earned_in_cents, user_credits.credits_used_in_cents, user_credits.expires_at, user_credits.created_at FROM user_credits WHERE user_credits.user_id = ? AND user_credits.expires_at > ? AND user_credits.credits_used_in_cents < user_credits.credits_earned_in_cents ORDER BY user_credits.expires_at") - - var __values []interface{} - __values = append(__values, user_credit_user_id.value(), user_credit_expires_at_greater.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - user_credit := &UserCredit{} - err = __rows.Scan(&user_credit.Id, &user_credit.UserId, &user_credit.OfferId, &user_credit.ReferredBy, &user_credit.Type, &user_credit.CreditsEarnedInCents, &user_credit.CreditsUsedInCents, &user_credit.ExpiresAt, &user_credit.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, user_credit) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Count_UserCredit_By_ReferredBy(ctx context.Context, - user_credit_referred_by UserCredit_ReferredBy_Field) ( - count int64, err error) { - - var __cond_0 = &__sqlbundle_Condition{Left: "user_credits.referred_by", Equal: true, Right: "?", Null: true} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT COUNT(*) FROM user_credits WHERE "), __cond_0}} - - var __values []interface{} - __values = append(__values) - - if !user_credit_referred_by.isnull() { - __cond_0.Null = false - __values = append(__values, user_credit_referred_by.value()) - } - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - err = obj.driver.QueryRow(__stmt, __values...).Scan(&count) - if err != nil { - return 0, obj.makeErr(err) - } - - return count, nil - -} - -func (obj *sqlite3Impl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context, - bucket_metainfo_project_id BucketMetainfo_ProjectId_Field, - bucket_metainfo_name BucketMetainfo_Name_Field) ( - bucket_metainfo *BucketMetainfo, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") - - var __values []interface{} - __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - bucket_metainfo = &BucketMetainfo{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_metainfo, nil - -} - -func (obj *sqlite3Impl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context, - bucket_metainfo_project_id BucketMetainfo_ProjectId_Field, - bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field, - limit int, offset int64) ( - rows []*BucketMetainfo, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, bucket_metainfo) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context, - bucket_metainfo_project_id BucketMetainfo_ProjectId_Field, - bucket_metainfo_name_greater BucketMetainfo_Name_Field, - limit int, offset int64) ( - rows []*BucketMetainfo, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") - - var __values []interface{} - __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value()) - - __values = append(__values, limit, offset) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __rows, err := obj.driver.Query(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - defer __rows.Close() - - for __rows.Next() { - bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) - if err != nil { - return nil, obj.makeErr(err) - } - rows = append(rows, bucket_metainfo) - } - if err := __rows.Err(); err != nil { - return nil, obj.makeErr(err) - } - return rows, nil - -} - -func (obj *sqlite3Impl) Get_GracefulExitProgress_By_NodeId(ctx context.Context, - graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) ( - graceful_exit_progress *GracefulExitProgress, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_progress.node_id, graceful_exit_progress.bytes_transferred, graceful_exit_progress.pieces_transferred, graceful_exit_progress.pieces_failed, graceful_exit_progress.updated_at FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?") - - var __values []interface{} - __values = append(__values, graceful_exit_progress_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - graceful_exit_progress = &GracefulExitProgress{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&graceful_exit_progress.NodeId, &graceful_exit_progress.BytesTransferred, &graceful_exit_progress.PiecesTransferred, &graceful_exit_progress.PiecesFailed, &graceful_exit_progress.UpdatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return graceful_exit_progress, nil - -} - -func (obj *sqlite3Impl) Get_GracefulExitTransferQueue_By_NodeId_And_Path(ctx context.Context, - graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field, - graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field) ( - graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_transfer_queue.node_id, graceful_exit_transfer_queue.path, graceful_exit_transfer_queue.piece_num, graceful_exit_transfer_queue.durability_ratio, graceful_exit_transfer_queue.queued_at, graceful_exit_transfer_queue.requested_at, graceful_exit_transfer_queue.last_failed_at, graceful_exit_transfer_queue.last_failed_code, graceful_exit_transfer_queue.failed_count, graceful_exit_transfer_queue.finished_at FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ?") - - var __values []interface{} - __values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - graceful_exit_transfer_queue = &GracefulExitTransferQueue{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&graceful_exit_transfer_queue.NodeId, &graceful_exit_transfer_queue.Path, &graceful_exit_transfer_queue.PieceNum, &graceful_exit_transfer_queue.DurabilityRatio, &graceful_exit_transfer_queue.QueuedAt, &graceful_exit_transfer_queue.RequestedAt, &graceful_exit_transfer_queue.LastFailedAt, &graceful_exit_transfer_queue.LastFailedCode, &graceful_exit_transfer_queue.FailedCount, &graceful_exit_transfer_queue.FinishedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return graceful_exit_transfer_queue, nil - -} - -func (obj *sqlite3Impl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context, - stripe_customer_user_id StripeCustomer_UserId_Field) ( - row *CustomerId_Row, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.customer_id FROM stripe_customers WHERE stripe_customers.user_id = ?") - - var __values []interface{} - __values = append(__values, stripe_customer_user_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - row = &CustomerId_Row{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.CustomerId) - if err != nil { - return nil, obj.makeErr(err) - } - return row, nil - -} - -func (obj *sqlite3Impl) Update_PendingAudits_By_NodeId(ctx context.Context, - pending_audits_node_id PendingAudits_NodeId_Field, - update PendingAudits_Update_Fields) ( - pending_audits *PendingAudits, err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE pending_audits SET "), __sets, __sqlbundle_Literal(" WHERE pending_audits.node_id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.ReverifyCount._set { - __values = append(__values, update.ReverifyCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("reverify_count = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return nil, emptyUpdate() - } - - __args = append(__args, pending_audits_node_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - pending_audits = &PendingAudits{} - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - - var __embed_stmt_get = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE pending_audits.node_id = ?") - - var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) - obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return pending_audits, nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context, - irreparabledb_segmentpath Irreparabledb_Segmentpath_Field, - update Irreparabledb_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Segmentdetail._set { - __values = append(__values, update.Segmentdetail.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?")) - } - - if update.PiecesLostCount._set { - __values = append(__values, update.PiecesLostCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?")) - } - - if update.SegDamagedUnixSec._set { - __values = append(__values, update.SegDamagedUnixSec.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?")) - } - - if update.RepairAttemptCount._set { - __values = append(__values, update.RepairAttemptCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return emptyUpdate() - } - - __args = append(__args, irreparabledb_segmentpath.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context, - accounting_timestamps_name AccountingTimestamps_Name_Field, - update AccountingTimestamps_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Value._set { - __values = append(__values, update.Value.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return emptyUpdate() - } - - __args = append(__args, accounting_timestamps_name.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) Update_Node_By_Id(ctx context.Context, - node_id Node_Id_Field, - update Node_Update_Fields) ( - node *Node, err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Address._set { - __values = append(__values, update.Address.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?")) - } - - if update.LastNet._set { - __values = append(__values, update.LastNet.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?")) - } - - if update.Protocol._set { - __values = append(__values, update.Protocol.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?")) - } - - if update.Type._set { - __values = append(__values, update.Type.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?")) - } - - if update.Email._set { - __values = append(__values, update.Email.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?")) - } - - if update.Wallet._set { - __values = append(__values, update.Wallet.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?")) - } - - if update.FreeBandwidth._set { - __values = append(__values, update.FreeBandwidth.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?")) - } - - if update.FreeDisk._set { - __values = append(__values, update.FreeDisk.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?")) - } - - if update.PieceCount._set { - __values = append(__values, update.PieceCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?")) - } - - if update.Major._set { - __values = append(__values, update.Major.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?")) - } - - if update.Minor._set { - __values = append(__values, update.Minor.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?")) - } - - if update.Patch._set { - __values = append(__values, update.Patch.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?")) - } - - if update.Hash._set { - __values = append(__values, update.Hash.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?")) - } - - if update.Timestamp._set { - __values = append(__values, update.Timestamp.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?")) - } - - if update.Release._set { - __values = append(__values, update.Release.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?")) - } - - if update.Latency90._set { - __values = append(__values, update.Latency90.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?")) - } - - if update.AuditSuccessCount._set { - __values = append(__values, update.AuditSuccessCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?")) - } - - if update.TotalAuditCount._set { - __values = append(__values, update.TotalAuditCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?")) - } - - if update.UptimeSuccessCount._set { - __values = append(__values, update.UptimeSuccessCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?")) - } - - if update.TotalUptimeCount._set { - __values = append(__values, update.TotalUptimeCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?")) - } - - if update.LastContactSuccess._set { - __values = append(__values, update.LastContactSuccess.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?")) - } - - if update.LastContactFailure._set { - __values = append(__values, update.LastContactFailure.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?")) - } - - if update.Contained._set { - __values = append(__values, update.Contained.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?")) - } - - if update.Disqualified._set { - __values = append(__values, update.Disqualified.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?")) - } - - if update.AuditReputationAlpha._set { - __values = append(__values, update.AuditReputationAlpha.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?")) - } - - if update.AuditReputationBeta._set { - __values = append(__values, update.AuditReputationBeta.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?")) - } - - if update.UptimeReputationAlpha._set { - __values = append(__values, update.UptimeReputationAlpha.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_alpha = ?")) - } - - if update.UptimeReputationBeta._set { - __values = append(__values, update.UptimeReputationBeta.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_beta = ?")) - } - - if update.ExitInitiatedAt._set { - __values = append(__values, update.ExitInitiatedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?")) - } - - if update.ExitLoopCompletedAt._set { - __values = append(__values, update.ExitLoopCompletedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?")) - } - - if update.ExitFinishedAt._set { - __values = append(__values, update.ExitFinishedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?")) - } - - if update.ExitSuccess._set { - __values = append(__values, update.ExitSuccess.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?")) - } - - __now := obj.db.Hooks.Now().UTC() - - __values = append(__values, __now) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?")) - - __args = append(__args, node_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - node = &Node{} - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - - var __embed_stmt_get = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?") - - var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) - obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return node, nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_Node_By_Id(ctx context.Context, - node_id Node_Id_Field, - update Node_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Address._set { - __values = append(__values, update.Address.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?")) - } - - if update.LastNet._set { - __values = append(__values, update.LastNet.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?")) - } - - if update.Protocol._set { - __values = append(__values, update.Protocol.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?")) - } - - if update.Type._set { - __values = append(__values, update.Type.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?")) - } - - if update.Email._set { - __values = append(__values, update.Email.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?")) - } - - if update.Wallet._set { - __values = append(__values, update.Wallet.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?")) - } - - if update.FreeBandwidth._set { - __values = append(__values, update.FreeBandwidth.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?")) - } - - if update.FreeDisk._set { - __values = append(__values, update.FreeDisk.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?")) - } - - if update.PieceCount._set { - __values = append(__values, update.PieceCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?")) - } - - if update.Major._set { - __values = append(__values, update.Major.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?")) - } - - if update.Minor._set { - __values = append(__values, update.Minor.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?")) - } - - if update.Patch._set { - __values = append(__values, update.Patch.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?")) - } - - if update.Hash._set { - __values = append(__values, update.Hash.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?")) - } - - if update.Timestamp._set { - __values = append(__values, update.Timestamp.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?")) - } - - if update.Release._set { - __values = append(__values, update.Release.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?")) - } - - if update.Latency90._set { - __values = append(__values, update.Latency90.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?")) - } - - if update.AuditSuccessCount._set { - __values = append(__values, update.AuditSuccessCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?")) - } - - if update.TotalAuditCount._set { - __values = append(__values, update.TotalAuditCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?")) - } - - if update.UptimeSuccessCount._set { - __values = append(__values, update.UptimeSuccessCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?")) - } - - if update.TotalUptimeCount._set { - __values = append(__values, update.TotalUptimeCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?")) - } - - if update.LastContactSuccess._set { - __values = append(__values, update.LastContactSuccess.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?")) - } - - if update.LastContactFailure._set { - __values = append(__values, update.LastContactFailure.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?")) - } - - if update.Contained._set { - __values = append(__values, update.Contained.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?")) - } - - if update.Disqualified._set { - __values = append(__values, update.Disqualified.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?")) - } - - if update.AuditReputationAlpha._set { - __values = append(__values, update.AuditReputationAlpha.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?")) - } - - if update.AuditReputationBeta._set { - __values = append(__values, update.AuditReputationBeta.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?")) - } - - if update.UptimeReputationAlpha._set { - __values = append(__values, update.UptimeReputationAlpha.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_alpha = ?")) - } - - if update.UptimeReputationBeta._set { - __values = append(__values, update.UptimeReputationBeta.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_beta = ?")) - } - - if update.ExitInitiatedAt._set { - __values = append(__values, update.ExitInitiatedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?")) - } - - if update.ExitLoopCompletedAt._set { - __values = append(__values, update.ExitLoopCompletedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?")) - } - - if update.ExitFinishedAt._set { - __values = append(__values, update.ExitFinishedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?")) - } - - if update.ExitSuccess._set { - __values = append(__values, update.ExitSuccess.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?")) - } - - __now := obj.db.Hooks.Now().UTC() - - __values = append(__values, __now) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?")) - - __args = append(__args, node_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) Update_User_By_Id(ctx context.Context, - user_id User_Id_Field, - update User_Update_Fields) ( - user *User, err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Email._set { - __values = append(__values, update.Email.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?")) - } - - if update.NormalizedEmail._set { - __values = append(__values, update.NormalizedEmail.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("normalized_email = ?")) - } - - if update.FullName._set { - __values = append(__values, update.FullName.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("full_name = ?")) - } - - if update.ShortName._set { - __values = append(__values, update.ShortName.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("short_name = ?")) - } - - if update.PasswordHash._set { - __values = append(__values, update.PasswordHash.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?")) - } - - if update.Status._set { - __values = append(__values, update.Status.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return nil, emptyUpdate() - } - - __args = append(__args, user_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - user = &User{} - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - - var __embed_stmt_get = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?") - - var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) - obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return user, nil -} - -func (obj *sqlite3Impl) Update_Project_By_Id(ctx context.Context, - project_id Project_Id_Field, - update Project_Update_Fields) ( - project *Project, err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Description._set { - __values = append(__values, update.Description.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?")) - } - - if update.UsageLimit._set { - __values = append(__values, update.UsageLimit.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return nil, emptyUpdate() - } - - __args = append(__args, project_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - project = &Project{} - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - - var __embed_stmt_get = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?") - - var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) - obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return project, nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_ApiKey_By_Id(ctx context.Context, - api_key_id ApiKey_Id_Field, - update ApiKey_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Name._set { - __values = append(__values, update.Name.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return emptyUpdate() - } - - __args = append(__args, api_key_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context, - peer_identity_node_id PeerIdentity_NodeId_Field, - update PeerIdentity_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.LeafSerialNumber._set { - __values = append(__values, update.LeafSerialNumber.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?")) - } - - if update.Chain._set { - __values = append(__values, update.Chain.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?")) - } - - __now := obj.db.Hooks.Now().UTC() - - __values = append(__values, __now) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?")) - - __args = append(__args, peer_identity_node_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) Update_RegistrationToken_By_Secret(ctx context.Context, - registration_token_secret RegistrationToken_Secret_Field, - update RegistrationToken_Update_Fields) ( - registration_token *RegistrationToken, err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.OwnerId._set { - __values = append(__values, update.OwnerId.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return nil, emptyUpdate() - } - - __args = append(__args, registration_token_secret.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - registration_token = &RegistrationToken{} - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - - var __embed_stmt_get = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?") - - var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) - obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return registration_token, nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_Offer_By_Id(ctx context.Context, - offer_id Offer_Id_Field, - update Offer_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE offers SET "), __sets, __sqlbundle_Literal(" WHERE offers.id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.Name._set { - __values = append(__values, update.Name.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?")) - } - - if update.Description._set { - __values = append(__values, update.Description.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?")) - } - - if update.AwardCreditInCents._set { - __values = append(__values, update.AwardCreditInCents.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("award_credit_in_cents = ?")) - } - - if update.InviteeCreditInCents._set { - __values = append(__values, update.InviteeCreditInCents.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("invitee_credit_in_cents = ?")) - } - - if update.AwardCreditDurationDays._set { - __values = append(__values, update.AwardCreditDurationDays.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("award_credit_duration_days = ?")) - } - - if update.InviteeCreditDurationDays._set { - __values = append(__values, update.InviteeCreditDurationDays.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("invitee_credit_duration_days = ?")) - } - - if update.RedeemableCap._set { - __values = append(__values, update.RedeemableCap.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("redeemable_cap = ?")) - } - - if update.ExpiresAt._set { - __values = append(__values, update.ExpiresAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("expires_at = ?")) - } - - if update.Status._set { - __values = append(__values, update.Status.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?")) - } - - if update.Type._set { - __values = append(__values, update.Type.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return emptyUpdate() - } - - __args = append(__args, offer_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context, - bucket_metainfo_project_id BucketMetainfo_ProjectId_Field, - bucket_metainfo_name BucketMetainfo_Name_Field, - update BucketMetainfo_Update_Fields) ( - bucket_metainfo *BucketMetainfo, err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.PartnerId._set { - __values = append(__values, update.PartnerId.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?")) - } - - if update.DefaultSegmentSize._set { - __values = append(__values, update.DefaultSegmentSize.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?")) - } - - if update.DefaultEncryptionCipherSuite._set { - __values = append(__values, update.DefaultEncryptionCipherSuite.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?")) - } - - if update.DefaultEncryptionBlockSize._set { - __values = append(__values, update.DefaultEncryptionBlockSize.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?")) - } - - if update.DefaultRedundancyAlgorithm._set { - __values = append(__values, update.DefaultRedundancyAlgorithm.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?")) - } - - if update.DefaultRedundancyShareSize._set { - __values = append(__values, update.DefaultRedundancyShareSize.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?")) - } - - if update.DefaultRedundancyRequiredShares._set { - __values = append(__values, update.DefaultRedundancyRequiredShares.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?")) - } - - if update.DefaultRedundancyRepairShares._set { - __values = append(__values, update.DefaultRedundancyRepairShares.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?")) - } - - if update.DefaultRedundancyOptimalShares._set { - __values = append(__values, update.DefaultRedundancyOptimalShares.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?")) - } - - if update.DefaultRedundancyTotalShares._set { - __values = append(__values, update.DefaultRedundancyTotalShares.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return nil, emptyUpdate() - } - - __args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - bucket_metainfo = &BucketMetainfo{} - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return nil, obj.makeErr(err) - } - - var __embed_stmt_get = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") - - var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) - obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_metainfo, nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_GracefulExitProgress_By_NodeId(ctx context.Context, - graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field, - update GracefulExitProgress_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_progress SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_progress.node_id = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.BytesTransferred._set { - __values = append(__values, update.BytesTransferred.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("bytes_transferred = ?")) - } - - if update.PiecesTransferred._set { - __values = append(__values, update.PiecesTransferred.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_transferred = ?")) - } - - if update.PiecesFailed._set { - __values = append(__values, update.PiecesFailed.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_failed = ?")) - } - - __now := obj.db.Hooks.Now().UTC() - - __values = append(__values, __now.UTC()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?")) - - __args = append(__args, graceful_exit_progress_node_id.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path(ctx context.Context, - graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field, - graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field, - update GracefulExitTransferQueue_Update_Fields) ( - err error) { - var __sets = &__sqlbundle_Hole{} - - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_transfer_queue SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ?")}} - - __sets_sql := __sqlbundle_Literals{Join: ", "} - var __values []interface{} - var __args []interface{} - - if update.DurabilityRatio._set { - __values = append(__values, update.DurabilityRatio.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("durability_ratio = ?")) - } - - if update.RequestedAt._set { - __values = append(__values, update.RequestedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("requested_at = ?")) - } - - if update.LastFailedAt._set { - __values = append(__values, update.LastFailedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_at = ?")) - } - - if update.LastFailedCode._set { - __values = append(__values, update.LastFailedCode.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_code = ?")) - } - - if update.FailedCount._set { - __values = append(__values, update.FailedCount.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("failed_count = ?")) - } - - if update.FinishedAt._set { - __values = append(__values, update.FinishedAt.value()) - __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("finished_at = ?")) - } - - if len(__sets_sql.SQLs) == 0 { - return emptyUpdate() - } - - __args = append(__args, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value()) - - __values = append(__values, __args...) - __sets.SQL = __sets_sql - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - _, err = obj.driver.Exec(__stmt, __values...) - if err != nil { - return obj.makeErr(err) - } - return nil -} - -func (obj *sqlite3Impl) Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context, - value_attribution_project_id ValueAttribution_ProjectId_Field, - value_attribution_bucket_name ValueAttribution_BucketName_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?") - - var __values []interface{} - __values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_PendingAudits_By_NodeId(ctx context.Context, - pending_audits_node_id PendingAudits_NodeId_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?") - - var __values []interface{} - __values = append(__values, pending_audits_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context, - irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?") - - var __values []interface{} - __values = append(__values, irreparabledb_segmentpath.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_AccountingRollup_By_Id(ctx context.Context, - accounting_rollup_id AccountingRollup_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?") - - var __values []interface{} - __values = append(__values, accounting_rollup_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_Node_By_Id(ctx context.Context, - node_id Node_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?") - - var __values []interface{} - __values = append(__values, node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_User_By_Id(ctx context.Context, - user_id User_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?") - - var __values []interface{} - __values = append(__values, user_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_Project_By_Id(ctx context.Context, - project_id Project_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?") - - var __values []interface{} - __values = append(__values, project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context, - project_member_member_id ProjectMember_MemberId_Field, - project_member_project_id ProjectMember_ProjectId_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?") - - var __values []interface{} - __values = append(__values, project_member_member_id.value(), project_member_project_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_ApiKey_By_Id(ctx context.Context, - api_key_id ApiKey_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?") - - var __values []interface{} - __values = append(__values, api_key_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_BucketUsage_By_Id(ctx context.Context, - bucket_usage_id BucketUsage_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_usages WHERE bucket_usages.id = ?") - - var __values []interface{} - __values = append(__values, bucket_usage_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context, - serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) ( - count int64, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM serial_numbers WHERE serial_numbers.expires_at <= ?") - - var __values []interface{} - __values = append(__values, serial_number_expires_at_less_or_equal.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return 0, obj.makeErr(err) - } - - count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - - return count, nil - -} - -func (obj *sqlite3Impl) Delete_StoragenodeStorageTally_By_Id(ctx context.Context, - storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.id = ?") - - var __values []interface{} - __values = append(__values, storagenode_storage_tally_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_ResetPasswordToken_By_Secret(ctx context.Context, - reset_password_token_secret ResetPasswordToken_Secret_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM reset_password_tokens WHERE reset_password_tokens.secret = ?") - - var __values []interface{} - __values = append(__values, reset_password_token_secret.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context, - bucket_metainfo_project_id BucketMetainfo_ProjectId_Field, - bucket_metainfo_name BucketMetainfo_Name_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") - - var __values []interface{} - __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_GracefulExitProgress_By_NodeId(ctx context.Context, - graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?") - - var __values []interface{} - __values = append(__values, graceful_exit_progress_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context, - graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) ( - count int64, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ?") - - var __values []interface{} - __values = append(__values, graceful_exit_transfer_queue_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return 0, obj.makeErr(err) - } - - count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - - return count, nil - -} - -func (obj *sqlite3Impl) Delete_GracefulExitTransferQueue_By_NodeId_And_Path(ctx context.Context, - graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field, - graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field) ( - deleted bool, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ?") - - var __values []interface{} - __values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return false, obj.makeErr(err) - } - - __count, err := __res.RowsAffected() - if err != nil { - return false, obj.makeErr(err) - } - - return __count > 0, nil - -} - -func (obj *sqlite3Impl) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context, - graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) ( - count int64, err error) { - - var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.finished_at is not NULL") - - var __values []interface{} - __values = append(__values, graceful_exit_transfer_queue_node_id.value()) - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __values...) - - __res, err := obj.driver.Exec(__stmt, __values...) - if err != nil { - return 0, obj.makeErr(err) - } - - count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - - return count, nil - -} - -func (obj *sqlite3Impl) getLastValueAttribution(ctx context.Context, - pk int64) ( - value_attribution *ValueAttribution, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated FROM value_attributions WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - value_attribution = &ValueAttribution{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated) - if err != nil { - return nil, obj.makeErr(err) - } - return value_attribution, nil - -} - -func (obj *sqlite3Impl) getLastPendingAudits(ctx context.Context, - pk int64) ( - pending_audits *PendingAudits, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - pending_audits = &PendingAudits{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path) - if err != nil { - return nil, obj.makeErr(err) - } - return pending_audits, nil - -} - -func (obj *sqlite3Impl) getLastIrreparabledb(ctx context.Context, - pk int64) ( - irreparabledb *Irreparabledb, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - irreparabledb = &Irreparabledb{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount) - if err != nil { - return nil, obj.makeErr(err) - } - return irreparabledb, nil - -} - -func (obj *sqlite3Impl) getLastAccountingTimestamps(ctx context.Context, - pk int64) ( - accounting_timestamps *AccountingTimestamps, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.name, accounting_timestamps.value FROM accounting_timestamps WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - accounting_timestamps = &AccountingTimestamps{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&accounting_timestamps.Name, &accounting_timestamps.Value) - if err != nil { - return nil, obj.makeErr(err) - } - return accounting_timestamps, nil - -} - -func (obj *sqlite3Impl) getLastAccountingRollup(ctx context.Context, - pk int64) ( - accounting_rollup *AccountingRollup, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - accounting_rollup = &AccountingRollup{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal) - if err != nil { - return nil, obj.makeErr(err) - } - return accounting_rollup, nil - -} - -func (obj *sqlite3Impl) getLastNode(ctx context.Context, - pk int64) ( - node *Node, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - node = &Node{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) - if err != nil { - return nil, obj.makeErr(err) - } - return node, nil - -} - -func (obj *sqlite3Impl) getLastUser(ctx context.Context, - pk int64) ( - user *User, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - user = &User{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return user, nil - -} - -func (obj *sqlite3Impl) getLastProject(ctx context.Context, - pk int64) ( - project *Project, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - project = &Project{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return project, nil - -} - -func (obj *sqlite3Impl) getLastProjectInvoiceStamp(ctx context.Context, - pk int64) ( - project_invoice_stamp *ProjectInvoiceStamp, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - project_invoice_stamp = &ProjectInvoiceStamp{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return project_invoice_stamp, nil - -} - -func (obj *sqlite3Impl) getLastProjectMember(ctx context.Context, - pk int64) ( - project_member *ProjectMember, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - project_member = &ProjectMember{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return project_member, nil - -} - -func (obj *sqlite3Impl) getLastApiKey(ctx context.Context, - pk int64) ( - api_key *ApiKey, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - api_key = &ApiKey{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return api_key, nil - -} - -func (obj *sqlite3Impl) getLastBucketUsage(ctx context.Context, - pk int64) ( - bucket_usage *BucketUsage, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - bucket_usage = &BucketUsage{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress) - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_usage, nil - -} - -func (obj *sqlite3Impl) getLastSerialNumber(ctx context.Context, - pk int64) ( - serial_number *SerialNumber, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - serial_number = &SerialNumber{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt) - if err != nil { - return nil, obj.makeErr(err) - } - return serial_number, nil - -} - -func (obj *sqlite3Impl) getLastUsedSerial(ctx context.Context, - pk int64) ( - used_serial *UsedSerial, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT used_serials.serial_number_id, used_serials.storage_node_id FROM used_serials WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - used_serial = &UsedSerial{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&used_serial.SerialNumberId, &used_serial.StorageNodeId) - if err != nil { - return nil, obj.makeErr(err) - } - return used_serial, nil - -} - -func (obj *sqlite3Impl) getLastBucketStorageTally(ctx context.Context, - pk int64) ( - bucket_storage_tally *BucketStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - bucket_storage_tally = &BucketStorageTally{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize) - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_storage_tally, nil - -} - -func (obj *sqlite3Impl) getLastStoragenodeStorageTally(ctx context.Context, - pk int64) ( - storagenode_storage_tally *StoragenodeStorageTally, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - storagenode_storage_tally = &StoragenodeStorageTally{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal) - if err != nil { - return nil, obj.makeErr(err) - } - return storagenode_storage_tally, nil - -} - -func (obj *sqlite3Impl) getLastPeerIdentity(ctx context.Context, - pk int64) ( - peer_identity *PeerIdentity, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - peer_identity = &PeerIdentity{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return peer_identity, nil - -} - -func (obj *sqlite3Impl) getLastRegistrationToken(ctx context.Context, - pk int64) ( - registration_token *RegistrationToken, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - registration_token = &RegistrationToken{} - err = obj.driver.QueryRow(__stmt, pk).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return registration_token, nil - -} - -func (obj *sqlite3Impl) getLastResetPasswordToken(ctx context.Context, - pk int64) ( - reset_password_token *ResetPasswordToken, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - reset_password_token = &ResetPasswordToken{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return reset_password_token, nil - -} - -func (obj *sqlite3Impl) getLastOffer(ctx context.Context, - pk int64) ( - offer *Offer, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - offer = &Offer{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type) - if err != nil { - return nil, obj.makeErr(err) - } - return offer, nil - -} - -func (obj *sqlite3Impl) getLastUserCredit(ctx context.Context, - pk int64) ( - user_credit *UserCredit, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT user_credits.id, user_credits.user_id, user_credits.offer_id, user_credits.referred_by, user_credits.type, user_credits.credits_earned_in_cents, user_credits.credits_used_in_cents, user_credits.expires_at, user_credits.created_at FROM user_credits WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - user_credit = &UserCredit{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&user_credit.Id, &user_credit.UserId, &user_credit.OfferId, &user_credit.ReferredBy, &user_credit.Type, &user_credit.CreditsEarnedInCents, &user_credit.CreditsUsedInCents, &user_credit.ExpiresAt, &user_credit.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return user_credit, nil - -} - -func (obj *sqlite3Impl) getLastBucketMetainfo(ctx context.Context, - pk int64) ( - bucket_metainfo *BucketMetainfo, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - bucket_metainfo = &BucketMetainfo{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares) - if err != nil { - return nil, obj.makeErr(err) - } - return bucket_metainfo, nil - -} - -func (obj *sqlite3Impl) getLastGracefulExitProgress(ctx context.Context, - pk int64) ( - graceful_exit_progress *GracefulExitProgress, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_progress.node_id, graceful_exit_progress.bytes_transferred, graceful_exit_progress.pieces_transferred, graceful_exit_progress.pieces_failed, graceful_exit_progress.updated_at FROM graceful_exit_progress WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - graceful_exit_progress = &GracefulExitProgress{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&graceful_exit_progress.NodeId, &graceful_exit_progress.BytesTransferred, &graceful_exit_progress.PiecesTransferred, &graceful_exit_progress.PiecesFailed, &graceful_exit_progress.UpdatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return graceful_exit_progress, nil - -} - -func (obj *sqlite3Impl) getLastGracefulExitTransferQueue(ctx context.Context, - pk int64) ( - graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_transfer_queue.node_id, graceful_exit_transfer_queue.path, graceful_exit_transfer_queue.piece_num, graceful_exit_transfer_queue.durability_ratio, graceful_exit_transfer_queue.queued_at, graceful_exit_transfer_queue.requested_at, graceful_exit_transfer_queue.last_failed_at, graceful_exit_transfer_queue.last_failed_code, graceful_exit_transfer_queue.failed_count, graceful_exit_transfer_queue.finished_at FROM graceful_exit_transfer_queue WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - graceful_exit_transfer_queue = &GracefulExitTransferQueue{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&graceful_exit_transfer_queue.NodeId, &graceful_exit_transfer_queue.Path, &graceful_exit_transfer_queue.PieceNum, &graceful_exit_transfer_queue.DurabilityRatio, &graceful_exit_transfer_queue.QueuedAt, &graceful_exit_transfer_queue.RequestedAt, &graceful_exit_transfer_queue.LastFailedAt, &graceful_exit_transfer_queue.LastFailedCode, &graceful_exit_transfer_queue.FailedCount, &graceful_exit_transfer_queue.FinishedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return graceful_exit_transfer_queue, nil - -} - -func (obj *sqlite3Impl) getLastStripeCustomer(ctx context.Context, - pk int64) ( - stripe_customer *StripeCustomer, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at FROM stripe_customers WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - stripe_customer = &StripeCustomer{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return stripe_customer, nil - -} - -func (obj *sqlite3Impl) getLastCoinpaymentsTransaction(ctx context.Context, - pk int64) ( - coinpayments_transaction *CoinpaymentsTransaction, err error) { - - var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE _rowid_ = ?") - - var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, pk) - - coinpayments_transaction = &CoinpaymentsTransaction{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.CreatedAt) - if err != nil { - return nil, obj.makeErr(err) - } - return coinpayments_transaction, nil - -} - -func (impl sqlite3Impl) isConstraintError(err error) ( - constraint string, ok bool) { - if e, ok := err.(sqlite3.Error); ok { - if e.Code == sqlite3.ErrConstraint { - msg := err.Error() - colon := strings.LastIndex(msg, ":") - if colon != -1 { - return strings.TrimSpace(msg[colon:]), true - } - return "", true - } - } - return "", false -} - -func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error) { - var __res sql.Result - var __count int64 - __res, err = obj.driver.Exec("DELETE FROM user_credits;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM used_serials;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM project_members;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM project_invoice_stamps;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM bucket_metainfos;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM api_keys;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM value_attributions;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM users;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM stripe_customers;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM storagenode_storage_tallies;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM storagenode_bandwidth_rollups;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM serial_numbers;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM reset_password_tokens;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM registration_tokens;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM projects;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM pending_audits;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM peer_identities;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM offers;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM nodes;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM irreparabledbs;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM injuredsegments;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM graceful_exit_transfer_queue;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM graceful_exit_progress;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM coinpayments_transactions;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM bucket_usages;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM bucket_storage_tallies;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM bucket_bandwidth_rollups;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM accounting_timestamps;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - __res, err = obj.driver.Exec("DELETE FROM accounting_rollups;") - if err != nil { - return 0, obj.makeErr(err) - } - - __count, err = __res.RowsAffected() - if err != nil { - return 0, obj.makeErr(err) - } - count += __count - - return count, nil - -} - type Rx struct { db *DB tx *Tx @@ -17238,36 +12077,3 @@ type dbMethods interface { func openpostgres(source string) (*sql.DB, error) { return sql.Open("postgres", source) } - -var sqlite3DriverName = func() string { - var id [16]byte - rand.Read(id[:]) - return fmt.Sprintf("sqlite3_%x", string(id[:])) -}() - -func init() { - sql.Register(sqlite3DriverName, &sqlite3.SQLiteDriver{ - ConnectHook: sqlite3SetupConn, - }) -} - -// SQLite3JournalMode controls the journal_mode pragma for all new connections. -// Since it is read without a mutex, it must be changed to the value you want -// before any Open calls. -var SQLite3JournalMode = "WAL" - -func sqlite3SetupConn(conn *sqlite3.SQLiteConn) (err error) { - _, err = conn.Exec("PRAGMA foreign_keys = ON", nil) - if err != nil { - return makeErr(err) - } - _, err = conn.Exec("PRAGMA journal_mode = "+SQLite3JournalMode, nil) - if err != nil { - return makeErr(err) - } - return nil -} - -func opensqlite3(source string) (*sql.DB, error) { - return sql.Open(sqlite3DriverName, source) -} diff --git a/satellite/satellitedb/gracefulexit.go b/satellite/satellitedb/gracefulexit.go index 29d578b9c..d80fa0ffd 100644 --- a/satellite/satellitedb/gracefulexit.go +++ b/satellite/satellitedb/gracefulexit.go @@ -11,7 +11,6 @@ import ( "time" "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" "github.com/zeebo/errs" "storj.io/storj/pkg/storj" @@ -71,51 +70,31 @@ func (db *gracefulexitDB) GetProgress(ctx context.Context, nodeID storj.NodeID) func (db *gracefulexitDB) Enqueue(ctx context.Context, items []gracefulexit.TransferQueueItem) (err error) { defer mon.Task()(&ctx)(&err) - switch t := db.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - statement := db.db.Rebind( - `INSERT INTO graceful_exit_transfer_queue(node_id, path, piece_num, durability_ratio, queued_at) - VALUES (?, ?, ?, ?, ?) ON CONFLICT DO NOTHING;`, - ) - for _, item := range items { - _, err = db.db.ExecContext(ctx, statement, - item.NodeID.Bytes(), item.Path, item.PieceNum, item.DurabilityRatio, time.Now().UTC()) - if err != nil { - return Error.Wrap(err) - } + sort.Slice(items, func(i, k int) bool { + compare := bytes.Compare(items[i].NodeID.Bytes(), items[k].NodeID.Bytes()) + if compare == 0 { + return bytes.Compare(items[i].Path, items[k].Path) < 0 } - case *pq.Driver: - sort.Slice(items, func(i, k int) bool { - compare := bytes.Compare(items[i].NodeID.Bytes(), items[k].NodeID.Bytes()) - if compare == 0 { - return bytes.Compare(items[i].Path, items[k].Path) < 0 - } - return compare < 0 - }) + return compare < 0 + }) - var nodeIDs []storj.NodeID - var paths [][]byte - var pieceNums []int32 - var durabilities []float64 - for _, item := range items { - nodeIDs = append(nodeIDs, item.NodeID) - paths = append(paths, item.Path) - pieceNums = append(pieceNums, item.PieceNum) - durabilities = append(durabilities, item.DurabilityRatio) - } - - _, err := db.db.ExecContext(ctx, ` - INSERT INTO graceful_exit_transfer_queue(node_id, path, piece_num, durability_ratio, queued_at) - SELECT unnest($1::bytea[]), unnest($2::bytea[]), unnest($3::integer[]), unnest($4::float8[]), $5 - ON CONFLICT DO NOTHING;`, postgresNodeIDList(nodeIDs), pq.ByteaArray(paths), pq.Array(pieceNums), pq.Array(durabilities), time.Now().UTC()) - if err != nil { - return Error.Wrap(err) - } - default: - return Error.New("Unsupported database %t", t) + var nodeIDs []storj.NodeID + var paths [][]byte + var pieceNums []int32 + var durabilities []float64 + for _, item := range items { + nodeIDs = append(nodeIDs, item.NodeID) + paths = append(paths, item.Path) + pieceNums = append(pieceNums, item.PieceNum) + durabilities = append(durabilities, item.DurabilityRatio) } - return nil + _, err = db.db.ExecContext(ctx, db.db.Rebind(` + INSERT INTO graceful_exit_transfer_queue(node_id, path, piece_num, durability_ratio, queued_at) + SELECT unnest($1::bytea[]), unnest($2::bytea[]), unnest($3::integer[]), unnest($4::float8[]), $5 + ON CONFLICT DO NOTHING;`), postgresNodeIDList(nodeIDs), pq.ByteaArray(paths), pq.Array(pieceNums), pq.Array(durabilities), time.Now().UTC()) + + return Error.Wrap(err) } // UpdateTransferQueueItem creates a graceful exit transfer queue entry. diff --git a/satellite/satellitedb/locked.go b/satellite/satellitedb/locked.go deleted file mode 100644 index 32af0c0b3..000000000 --- a/satellite/satellitedb/locked.go +++ /dev/null @@ -1,1275 +0,0 @@ -// Code generated by lockedgen using 'go generate'. DO NOT EDIT. - -// Copyright (C) 2019 Storj Labs, Inc. -// See LICENSE for copying information. - -package satellitedb - -import ( - "context" - "sync" - "time" - - "github.com/skyrings/skyring-common/tools/uuid" - - "storj.io/storj/internal/memory" - "storj.io/storj/pkg/identity" - "storj.io/storj/pkg/macaroon" - "storj.io/storj/pkg/pb" - "storj.io/storj/pkg/storj" - "storj.io/storj/satellite" - "storj.io/storj/satellite/accounting" - "storj.io/storj/satellite/attribution" - "storj.io/storj/satellite/audit" - "storj.io/storj/satellite/console" - "storj.io/storj/satellite/gracefulexit" - "storj.io/storj/satellite/metainfo" - "storj.io/storj/satellite/orders" - "storj.io/storj/satellite/overlay" - "storj.io/storj/satellite/payments/stripecoinpayments" - "storj.io/storj/satellite/repair/irreparable" - "storj.io/storj/satellite/repair/queue" - "storj.io/storj/satellite/rewards" -) - -// locked implements a locking wrapper around satellite.DB. -type locked struct { - sync.Locker - db satellite.DB -} - -// newLocked returns database wrapped with locker. -func newLocked(db satellite.DB) satellite.DB { - return &locked{&sync.Mutex{}, db} -} - -// Attribution returns database for partner keys information -func (m *locked) Attribution() attribution.DB { - m.Lock() - defer m.Unlock() - return &lockedAttribution{m.Locker, m.db.Attribution()} -} - -// lockedAttribution implements locking wrapper for attribution.DB -type lockedAttribution struct { - sync.Locker - db attribution.DB -} - -// Get retrieves attribution info using project id and bucket name. -func (m *lockedAttribution) Get(ctx context.Context, projectID uuid.UUID, bucketName []byte) (*attribution.Info, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, projectID, bucketName) -} - -// Insert creates and stores new Info -func (m *lockedAttribution) Insert(ctx context.Context, info *attribution.Info) (*attribution.Info, error) { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, info) -} - -// QueryAttribution queries partner bucket attribution data -func (m *lockedAttribution) QueryAttribution(ctx context.Context, partnerID uuid.UUID, start time.Time, end time.Time) ([]*attribution.CSVRow, error) { - m.Lock() - defer m.Unlock() - return m.db.QueryAttribution(ctx, partnerID, start, end) -} - -// Buckets returns the database to interact with buckets -func (m *locked) Buckets() metainfo.BucketsDB { - m.Lock() - defer m.Unlock() - return &lockedBuckets{m.Locker, m.db.Buckets()} -} - -// lockedBuckets implements locking wrapper for metainfo.BucketsDB -type lockedBuckets struct { - sync.Locker - db metainfo.BucketsDB -} - -// Create creates a new bucket -func (m *lockedBuckets) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) { - m.Lock() - defer m.Unlock() - return m.db.CreateBucket(ctx, bucket) -} - -// Delete deletes a bucket -func (m *lockedBuckets) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) { - m.Lock() - defer m.Unlock() - return m.db.DeleteBucket(ctx, bucketName, projectID) -} - -// Get returns an existing bucket -func (m *lockedBuckets) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error) { - m.Lock() - defer m.Unlock() - return m.db.GetBucket(ctx, bucketName, projectID) -} - -// List returns all buckets for a project -func (m *lockedBuckets) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error) { - m.Lock() - defer m.Unlock() - return m.db.ListBuckets(ctx, projectID, listOpts, allowedBuckets) -} - -// UpdateBucket updates an existing bucket -func (m *lockedBuckets) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateBucket(ctx, bucket) -} - -// Close closes the database -func (m *locked) Close() error { - m.Lock() - defer m.Unlock() - return m.db.Close() -} - -// CoinpaymentsTransactions returns db for storing coinpayments transactions. -func (m *locked) CoinpaymentsTransactions() stripecoinpayments.TransactionsDB { - m.Lock() - defer m.Unlock() - return &lockedCoinpaymentsTransactions{m.Locker, m.db.CoinpaymentsTransactions()} -} - -// lockedCoinpaymentsTransactions implements locking wrapper for stripecoinpayments.TransactionsDB -type lockedCoinpaymentsTransactions struct { - sync.Locker - db stripecoinpayments.TransactionsDB -} - -// Insert inserts new coinpayments transaction into DB. -func (m *lockedCoinpaymentsTransactions) Insert(ctx context.Context, tx stripecoinpayments.Transaction) (*stripecoinpayments.Transaction, error) { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, tx) -} - -// Console returns database for satellite console -func (m *locked) Console() console.DB { - m.Lock() - defer m.Unlock() - return &lockedConsole{m.Locker, m.db.Console()} -} - -// lockedConsole implements locking wrapper for console.DB -type lockedConsole struct { - sync.Locker - db console.DB -} - -// APIKeys is a getter for APIKeys repository -func (m *lockedConsole) APIKeys() console.APIKeys { - m.Lock() - defer m.Unlock() - return &lockedAPIKeys{m.Locker, m.db.APIKeys()} -} - -// lockedAPIKeys implements locking wrapper for console.APIKeys -type lockedAPIKeys struct { - sync.Locker - db console.APIKeys -} - -// Create creates and stores new APIKeyInfo -func (m *lockedAPIKeys) Create(ctx context.Context, head []byte, info console.APIKeyInfo) (*console.APIKeyInfo, error) { - m.Lock() - defer m.Unlock() - return m.db.Create(ctx, head, info) -} - -// Delete deletes APIKeyInfo from store -func (m *lockedAPIKeys) Delete(ctx context.Context, id uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, id) -} - -// Get retrieves APIKeyInfo with given ID -func (m *lockedAPIKeys) Get(ctx context.Context, id uuid.UUID) (*console.APIKeyInfo, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, id) -} - -// GetByHead retrieves APIKeyInfo for given key head -func (m *lockedAPIKeys) GetByHead(ctx context.Context, head []byte) (*console.APIKeyInfo, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByHead(ctx, head) -} - -// GetByNameAndProjectID retrieves APIKeyInfo for given key name and projectID -func (m *lockedAPIKeys) GetByNameAndProjectID(ctx context.Context, name string, projectID uuid.UUID) (*console.APIKeyInfo, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByNameAndProjectID(ctx, name, projectID) -} - -// GetPagedByProjectID is a method for querying API keys from the database by projectID and cursor -func (m *lockedAPIKeys) GetPagedByProjectID(ctx context.Context, projectID uuid.UUID, cursor console.APIKeyCursor) (akp *console.APIKeyPage, err error) { - m.Lock() - defer m.Unlock() - return m.db.GetPagedByProjectID(ctx, projectID, cursor) -} - -// Update updates APIKeyInfo in store -func (m *lockedAPIKeys) Update(ctx context.Context, key console.APIKeyInfo) error { - m.Lock() - defer m.Unlock() - return m.db.Update(ctx, key) -} - -// BucketUsage is a getter for accounting.BucketUsage repository -func (m *lockedConsole) BucketUsage() accounting.BucketUsage { - m.Lock() - defer m.Unlock() - return &lockedBucketUsage{m.Locker, m.db.BucketUsage()} -} - -// lockedBucketUsage implements locking wrapper for accounting.BucketUsage -type lockedBucketUsage struct { - sync.Locker - db accounting.BucketUsage -} - -func (m *lockedBucketUsage) Create(ctx context.Context, rollup accounting.BucketRollup) (*accounting.BucketRollup, error) { - m.Lock() - defer m.Unlock() - return m.db.Create(ctx, rollup) -} - -func (m *lockedBucketUsage) Delete(ctx context.Context, id uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, id) -} - -func (m *lockedBucketUsage) Get(ctx context.Context, id uuid.UUID) (*accounting.BucketRollup, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, id) -} - -func (m *lockedBucketUsage) GetPaged(ctx context.Context, cursor *accounting.BucketRollupCursor) ([]accounting.BucketRollup, error) { - m.Lock() - defer m.Unlock() - return m.db.GetPaged(ctx, cursor) -} - -// ProjectMembers is a getter for ProjectMembers repository -func (m *lockedConsole) ProjectMembers() console.ProjectMembers { - m.Lock() - defer m.Unlock() - return &lockedProjectMembers{m.Locker, m.db.ProjectMembers()} -} - -// lockedProjectMembers implements locking wrapper for console.ProjectMembers -type lockedProjectMembers struct { - sync.Locker - db console.ProjectMembers -} - -// Delete is a method for deleting project member by memberID and projectID from the database. -func (m *lockedProjectMembers) Delete(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, memberID, projectID) -} - -// GetByMemberID is a method for querying project members from the database by memberID. -func (m *lockedProjectMembers) GetByMemberID(ctx context.Context, memberID uuid.UUID) ([]console.ProjectMember, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByMemberID(ctx, memberID) -} - -// GetPagedByProjectID is a method for querying project members from the database by projectID and cursor -func (m *lockedProjectMembers) GetPagedByProjectID(ctx context.Context, projectID uuid.UUID, cursor console.ProjectMembersCursor) (*console.ProjectMembersPage, error) { - m.Lock() - defer m.Unlock() - return m.db.GetPagedByProjectID(ctx, projectID, cursor) -} - -// Insert is a method for inserting project member into the database. -func (m *lockedProjectMembers) Insert(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) (*console.ProjectMember, error) { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, memberID, projectID) -} - -// Projects is a getter for Projects repository -func (m *lockedConsole) Projects() console.Projects { - m.Lock() - defer m.Unlock() - return &lockedProjects{m.Locker, m.db.Projects()} -} - -// lockedProjects implements locking wrapper for console.Projects -type lockedProjects struct { - sync.Locker - db console.Projects -} - -// Delete is a method for deleting project by Id from the database. -func (m *lockedProjects) Delete(ctx context.Context, id uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, id) -} - -// Get is a method for querying project from the database by id. -func (m *lockedProjects) Get(ctx context.Context, id uuid.UUID) (*console.Project, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, id) -} - -// GetAll is a method for querying all projects from the database. -func (m *lockedProjects) GetAll(ctx context.Context) ([]console.Project, error) { - m.Lock() - defer m.Unlock() - return m.db.GetAll(ctx) -} - -// GetByUserID is a method for querying all projects from the database by userID. -func (m *lockedProjects) GetByUserID(ctx context.Context, userID uuid.UUID) ([]console.Project, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByUserID(ctx, userID) -} - -// GetCreatedBefore retrieves all projects created before provided date -func (m *lockedProjects) GetCreatedBefore(ctx context.Context, before time.Time) ([]console.Project, error) { - m.Lock() - defer m.Unlock() - return m.db.GetCreatedBefore(ctx, before) -} - -// Insert is a method for inserting project into the database. -func (m *lockedProjects) Insert(ctx context.Context, project *console.Project) (*console.Project, error) { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, project) -} - -// Update is a method for updating project entity. -func (m *lockedProjects) Update(ctx context.Context, project *console.Project) error { - m.Lock() - defer m.Unlock() - return m.db.Update(ctx, project) -} - -// RegistrationTokens is a getter for RegistrationTokens repository -func (m *lockedConsole) RegistrationTokens() console.RegistrationTokens { - m.Lock() - defer m.Unlock() - return &lockedRegistrationTokens{m.Locker, m.db.RegistrationTokens()} -} - -// lockedRegistrationTokens implements locking wrapper for console.RegistrationTokens -type lockedRegistrationTokens struct { - sync.Locker - db console.RegistrationTokens -} - -// Create creates new registration token -func (m *lockedRegistrationTokens) Create(ctx context.Context, projectLimit int) (*console.RegistrationToken, error) { - m.Lock() - defer m.Unlock() - return m.db.Create(ctx, projectLimit) -} - -// GetByOwnerID retrieves RegTokenInfo by ownerID -func (m *lockedRegistrationTokens) GetByOwnerID(ctx context.Context, ownerID uuid.UUID) (*console.RegistrationToken, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByOwnerID(ctx, ownerID) -} - -// GetBySecret retrieves RegTokenInfo with given Secret -func (m *lockedRegistrationTokens) GetBySecret(ctx context.Context, secret console.RegistrationSecret) (*console.RegistrationToken, error) { - m.Lock() - defer m.Unlock() - return m.db.GetBySecret(ctx, secret) -} - -// UpdateOwner updates registration token's owner -func (m *lockedRegistrationTokens) UpdateOwner(ctx context.Context, secret console.RegistrationSecret, ownerID uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateOwner(ctx, secret, ownerID) -} - -// ResetPasswordTokens is a getter for ResetPasswordTokens repository -func (m *lockedConsole) ResetPasswordTokens() console.ResetPasswordTokens { - m.Lock() - defer m.Unlock() - return &lockedResetPasswordTokens{m.Locker, m.db.ResetPasswordTokens()} -} - -// lockedResetPasswordTokens implements locking wrapper for console.ResetPasswordTokens -type lockedResetPasswordTokens struct { - sync.Locker - db console.ResetPasswordTokens -} - -// Create creates new reset password token -func (m *lockedResetPasswordTokens) Create(ctx context.Context, ownerID uuid.UUID) (*console.ResetPasswordToken, error) { - m.Lock() - defer m.Unlock() - return m.db.Create(ctx, ownerID) -} - -// Delete deletes ResetPasswordToken by ResetPasswordSecret -func (m *lockedResetPasswordTokens) Delete(ctx context.Context, secret console.ResetPasswordSecret) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, secret) -} - -// GetByOwnerID retrieves ResetPasswordToken by ownerID -func (m *lockedResetPasswordTokens) GetByOwnerID(ctx context.Context, ownerID uuid.UUID) (*console.ResetPasswordToken, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByOwnerID(ctx, ownerID) -} - -// GetBySecret retrieves ResetPasswordToken with given secret -func (m *lockedResetPasswordTokens) GetBySecret(ctx context.Context, secret console.ResetPasswordSecret) (*console.ResetPasswordToken, error) { - m.Lock() - defer m.Unlock() - return m.db.GetBySecret(ctx, secret) -} - -// UsageRollups is a getter for UsageRollups repository -func (m *lockedConsole) UsageRollups() console.UsageRollups { - m.Lock() - defer m.Unlock() - return &lockedUsageRollups{m.Locker, m.db.UsageRollups()} -} - -// lockedUsageRollups implements locking wrapper for console.UsageRollups -type lockedUsageRollups struct { - sync.Locker - db console.UsageRollups -} - -func (m *lockedUsageRollups) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor console.BucketUsageCursor, since time.Time, before time.Time) (*console.BucketUsagePage, error) { - m.Lock() - defer m.Unlock() - return m.db.GetBucketTotals(ctx, projectID, cursor, since, before) -} - -func (m *lockedUsageRollups) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) ([]console.BucketUsageRollup, error) { - m.Lock() - defer m.Unlock() - return m.db.GetBucketUsageRollups(ctx, projectID, since, before) -} - -func (m *lockedUsageRollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) (*console.ProjectUsage, error) { - m.Lock() - defer m.Unlock() - return m.db.GetProjectTotal(ctx, projectID, since, before) -} - -// UserCredits is a getter for UserCredits repository -func (m *lockedConsole) UserCredits() console.UserCredits { - m.Lock() - defer m.Unlock() - return &lockedUserCredits{m.Locker, m.db.UserCredits()} -} - -// lockedUserCredits implements locking wrapper for console.UserCredits -type lockedUserCredits struct { - sync.Locker - db console.UserCredits -} - -func (m *lockedUserCredits) Create(ctx context.Context, userCredit console.CreateCredit) error { - m.Lock() - defer m.Unlock() - return m.db.Create(ctx, userCredit) -} - -func (m *lockedUserCredits) GetCreditUsage(ctx context.Context, userID uuid.UUID, expirationEndDate time.Time) (*console.UserCreditUsage, error) { - m.Lock() - defer m.Unlock() - return m.db.GetCreditUsage(ctx, userID, expirationEndDate) -} - -func (m *lockedUserCredits) UpdateAvailableCredits(ctx context.Context, creditsToCharge int, id uuid.UUID, billingStartDate time.Time) (remainingCharge int, err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateAvailableCredits(ctx, creditsToCharge, id, billingStartDate) -} - -func (m *lockedUserCredits) UpdateEarnedCredits(ctx context.Context, userID uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateEarnedCredits(ctx, userID) -} - -// Users is a getter for Users repository -func (m *lockedConsole) Users() console.Users { - m.Lock() - defer m.Unlock() - return &lockedUsers{m.Locker, m.db.Users()} -} - -// lockedUsers implements locking wrapper for console.Users -type lockedUsers struct { - sync.Locker - db console.Users -} - -// Delete is a method for deleting user by Id from the database. -func (m *lockedUsers) Delete(ctx context.Context, id uuid.UUID) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, id) -} - -// Get is a method for querying user from the database by id. -func (m *lockedUsers) Get(ctx context.Context, id uuid.UUID) (*console.User, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, id) -} - -// GetByEmail is a method for querying user by email from the database. -func (m *lockedUsers) GetByEmail(ctx context.Context, email string) (*console.User, error) { - m.Lock() - defer m.Unlock() - return m.db.GetByEmail(ctx, email) -} - -// Insert is a method for inserting user into the database. -func (m *lockedUsers) Insert(ctx context.Context, user *console.User) (*console.User, error) { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, user) -} - -// Update is a method for updating user entity. -func (m *lockedUsers) Update(ctx context.Context, user *console.User) error { - m.Lock() - defer m.Unlock() - return m.db.Update(ctx, user) -} - -// Containment returns database for containment -func (m *locked) Containment() audit.Containment { - m.Lock() - defer m.Unlock() - return &lockedContainment{m.Locker, m.db.Containment()} -} - -// lockedContainment implements locking wrapper for audit.Containment -type lockedContainment struct { - sync.Locker - db audit.Containment -} - -func (m *lockedContainment) Delete(ctx context.Context, nodeID storj.NodeID) (bool, error) { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, nodeID) -} - -func (m *lockedContainment) Get(ctx context.Context, nodeID storj.NodeID) (*audit.PendingAudit, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, nodeID) -} - -func (m *lockedContainment) IncrementPending(ctx context.Context, pendingAudit *audit.PendingAudit) error { - m.Lock() - defer m.Unlock() - return m.db.IncrementPending(ctx, pendingAudit) -} - -// CreateSchema sets the schema -func (m *locked) CreateSchema(schema string) error { - m.Lock() - defer m.Unlock() - return m.db.CreateSchema(schema) -} - -// CreateTables initializes the database -func (m *locked) CreateTables() error { - m.Lock() - defer m.Unlock() - return m.db.CreateTables() -} - -// StripeCustomers returns table for storing stripe customers -func (m *locked) Customers() stripecoinpayments.CustomersDB { - m.Lock() - defer m.Unlock() - return &lockedCustomers{m.Locker, m.db.Customers()} -} - -// lockedCustomers implements locking wrapper for stripecoinpayments.CustomersDB -type lockedCustomers struct { - sync.Locker - db stripecoinpayments.CustomersDB -} - -// GetCustomerID return stripe customers id. -func (m *lockedCustomers) GetCustomerID(ctx context.Context, userID uuid.UUID) (string, error) { - m.Lock() - defer m.Unlock() - return m.db.GetCustomerID(ctx, userID) -} - -// Insert inserts a stripe customer into the database. -func (m *lockedCustomers) Insert(ctx context.Context, userID uuid.UUID, customerID string) error { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, userID, customerID) -} - -// DropSchema drops the schema -func (m *locked) DropSchema(schema string) error { - m.Lock() - defer m.Unlock() - return m.db.DropSchema(schema) -} - -// GracefulExit returns database for graceful exit -func (m *locked) GracefulExit() gracefulexit.DB { - m.Lock() - defer m.Unlock() - return &lockedGracefulExit{m.Locker, m.db.GracefulExit()} -} - -// lockedGracefulExit implements locking wrapper for gracefulexit.DB -type lockedGracefulExit struct { - sync.Locker - db gracefulexit.DB -} - -// DeleteFinishedTransferQueueItem deletes finiahed graceful exit transfer queue entries. -func (m *lockedGracefulExit) DeleteFinishedTransferQueueItems(ctx context.Context, nodeID storj.NodeID) error { - m.Lock() - defer m.Unlock() - return m.db.DeleteFinishedTransferQueueItems(ctx, nodeID) -} - -// DeleteTransferQueueItem deletes a graceful exit transfer queue entry. -func (m *lockedGracefulExit) DeleteTransferQueueItem(ctx context.Context, nodeID storj.NodeID, path []byte) error { - m.Lock() - defer m.Unlock() - return m.db.DeleteTransferQueueItem(ctx, nodeID, path) -} - -// DeleteTransferQueueItem deletes a graceful exit transfer queue entries by nodeID. -func (m *lockedGracefulExit) DeleteTransferQueueItems(ctx context.Context, nodeID storj.NodeID) error { - m.Lock() - defer m.Unlock() - return m.db.DeleteTransferQueueItems(ctx, nodeID) -} - -// Enqueue batch inserts graceful exit transfer queue entries it does not exist. -func (m *lockedGracefulExit) Enqueue(ctx context.Context, items []gracefulexit.TransferQueueItem) error { - m.Lock() - defer m.Unlock() - return m.db.Enqueue(ctx, items) -} - -// GetIncomplete gets incomplete graceful exit transfer queue entries ordered by durability ratio and queued date ascending. -func (m *lockedGracefulExit) GetIncomplete(ctx context.Context, nodeID storj.NodeID, limit int, offset int64) ([]*gracefulexit.TransferQueueItem, error) { - m.Lock() - defer m.Unlock() - return m.db.GetIncomplete(ctx, nodeID, limit, offset) -} - -// GetIncompleteNotFailed gets incomplete graceful exit transfer queue entries that have failed <= maxFailures times, ordered by durability ratio and queued date ascending. -func (m *lockedGracefulExit) GetIncompleteFailed(ctx context.Context, nodeID storj.NodeID, maxFailures int, limit int, offset int64) ([]*gracefulexit.TransferQueueItem, error) { - m.Lock() - defer m.Unlock() - return m.db.GetIncompleteFailed(ctx, nodeID, maxFailures, limit, offset) -} - -// GetIncompleteNotFailed gets incomplete graceful exit transfer queue entries in the database ordered by durability ratio and queued date ascending. -func (m *lockedGracefulExit) GetIncompleteNotFailed(ctx context.Context, nodeID storj.NodeID, limit int, offset int64) ([]*gracefulexit.TransferQueueItem, error) { - m.Lock() - defer m.Unlock() - return m.db.GetIncompleteNotFailed(ctx, nodeID, limit, offset) -} - -// GetProgress gets a graceful exit progress entry. -func (m *lockedGracefulExit) GetProgress(ctx context.Context, nodeID storj.NodeID) (*gracefulexit.Progress, error) { - m.Lock() - defer m.Unlock() - return m.db.GetProgress(ctx, nodeID) -} - -// GetTransferQueueItem gets a graceful exit transfer queue entry. -func (m *lockedGracefulExit) GetTransferQueueItem(ctx context.Context, nodeID storj.NodeID, path []byte) (*gracefulexit.TransferQueueItem, error) { - m.Lock() - defer m.Unlock() - return m.db.GetTransferQueueItem(ctx, nodeID, path) -} - -// IncrementProgress increments transfer stats for a node. -func (m *lockedGracefulExit) IncrementProgress(ctx context.Context, nodeID storj.NodeID, bytes int64, successfulTransfers int64, failedTransfers int64) error { - m.Lock() - defer m.Unlock() - return m.db.IncrementProgress(ctx, nodeID, bytes, successfulTransfers, failedTransfers) -} - -// UpdateTransferQueueItem creates a graceful exit transfer queue entry. -func (m *lockedGracefulExit) UpdateTransferQueueItem(ctx context.Context, item gracefulexit.TransferQueueItem) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateTransferQueueItem(ctx, item) -} - -// Irreparable returns database for failed repairs -func (m *locked) Irreparable() irreparable.DB { - m.Lock() - defer m.Unlock() - return &lockedIrreparable{m.Locker, m.db.Irreparable()} -} - -// lockedIrreparable implements locking wrapper for irreparable.DB -type lockedIrreparable struct { - sync.Locker - db irreparable.DB -} - -// Delete removes irreparable segment info based on segmentPath. -func (m *lockedIrreparable) Delete(ctx context.Context, segmentPath []byte) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, segmentPath) -} - -// Get returns irreparable segment info based on segmentPath. -func (m *lockedIrreparable) Get(ctx context.Context, segmentPath []byte) (*pb.IrreparableSegment, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, segmentPath) -} - -// GetLimited returns a list of irreparable segment info starting after the last segment info we retrieved -func (m *lockedIrreparable) GetLimited(ctx context.Context, limit int, lastSeenSegmentPath []byte) ([]*pb.IrreparableSegment, error) { - m.Lock() - defer m.Unlock() - return m.db.GetLimited(ctx, limit, lastSeenSegmentPath) -} - -// IncrementRepairAttempts increments the repair attempts. -func (m *lockedIrreparable) IncrementRepairAttempts(ctx context.Context, segmentInfo *pb.IrreparableSegment) error { - m.Lock() - defer m.Unlock() - return m.db.IncrementRepairAttempts(ctx, segmentInfo) -} - -// Orders returns database for orders -func (m *locked) Orders() orders.DB { - m.Lock() - defer m.Unlock() - return &lockedOrders{m.Locker, m.db.Orders()} -} - -// lockedOrders implements locking wrapper for orders.DB -type lockedOrders struct { - sync.Locker - db orders.DB -} - -// CreateSerialInfo creates serial number entry in database -func (m *lockedOrders) CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.CreateSerialInfo(ctx, serialNumber, bucketID, limitExpiration) -} - -// DeleteExpiredSerials deletes all expired serials in serial_number and used_serials table. -func (m *lockedOrders) DeleteExpiredSerials(ctx context.Context, now time.Time) (_ int, err error) { - m.Lock() - defer m.Unlock() - return m.db.DeleteExpiredSerials(ctx, now) -} - -// GetBucketBandwidth gets total bucket bandwidth from period of time -func (m *lockedOrders) GetBucketBandwidth(ctx context.Context, projectID uuid.UUID, bucketName []byte, from time.Time, to time.Time) (int64, error) { - m.Lock() - defer m.Unlock() - return m.db.GetBucketBandwidth(ctx, projectID, bucketName, from, to) -} - -// GetStorageNodeBandwidth gets total storage node bandwidth from period of time -func (m *lockedOrders) GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from time.Time, to time.Time) (int64, error) { - m.Lock() - defer m.Unlock() - return m.db.GetStorageNodeBandwidth(ctx, nodeID, from, to) -} - -// ProcessOrders takes a list of order requests and processes them in a batch -func (m *lockedOrders) ProcessOrders(ctx context.Context, requests []*orders.ProcessOrderRequest) (responses []*orders.ProcessOrderResponse, err error) { - m.Lock() - defer m.Unlock() - return m.db.ProcessOrders(ctx, requests) -} - -// UnuseSerialNumber removes pair serial number -> storage node id from database -func (m *lockedOrders) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error { - m.Lock() - defer m.Unlock() - return m.db.UnuseSerialNumber(ctx, serialNumber, storageNodeID) -} - -// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket -func (m *lockedOrders) UpdateBucketBandwidthAllocation(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateBucketBandwidthAllocation(ctx, projectID, bucketName, action, amount, intervalStart) -} - -// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket -func (m *lockedOrders) UpdateBucketBandwidthInline(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateBucketBandwidthInline(ctx, projectID, bucketName, action, amount, intervalStart) -} - -// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket -func (m *lockedOrders) UpdateBucketBandwidthSettle(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateBucketBandwidthSettle(ctx, projectID, bucketName, action, amount, intervalStart) -} - -// UpdateStoragenodeBandwidthAllocation updates 'allocated' bandwidth for given storage nodes -func (m *lockedOrders) UpdateStoragenodeBandwidthAllocation(ctx context.Context, storageNodes []storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateStoragenodeBandwidthAllocation(ctx, storageNodes, action, amount, intervalStart) -} - -// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node -func (m *lockedOrders) UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateStoragenodeBandwidthSettle(ctx, storageNode, action, amount, intervalStart) -} - -// UseSerialNumber creates serial number entry in database -func (m *lockedOrders) UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) ([]byte, error) { - m.Lock() - defer m.Unlock() - return m.db.UseSerialNumber(ctx, serialNumber, storageNodeID) -} - -// OverlayCache returns database for caching overlay information -func (m *locked) OverlayCache() overlay.DB { - m.Lock() - defer m.Unlock() - return &lockedOverlayCache{m.Locker, m.db.OverlayCache()} -} - -// lockedOverlayCache implements locking wrapper for overlay.DB -type lockedOverlayCache struct { - sync.Locker - db overlay.DB -} - -// AllPieceCounts returns a map of node IDs to piece counts from the db. -func (m *lockedOverlayCache) AllPieceCounts(ctx context.Context) (pieceCounts map[storj.NodeID]int, err error) { - m.Lock() - defer m.Unlock() - return m.db.AllPieceCounts(ctx) -} - -// BatchUpdateStats updates multiple storagenode's stats in one transaction -func (m *lockedOverlayCache) BatchUpdateStats(ctx context.Context, updateRequests []*overlay.UpdateRequest, batchSize int) (failed storj.NodeIDList, err error) { - m.Lock() - defer m.Unlock() - return m.db.BatchUpdateStats(ctx, updateRequests, batchSize) -} - -// Get looks up the node by nodeID -func (m *lockedOverlayCache) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, nodeID) -} - -func (m *lockedOverlayCache) GetExitStatus(ctx context.Context, nodeID storj.NodeID) (exitStatus *overlay.ExitStatus, err error) { - m.Lock() - defer m.Unlock() - return m.db.GetExitStatus(ctx, nodeID) -} - -// GetExitingNodes returns nodes who have initiated a graceful exit, but have not completed it. -func (m *lockedOverlayCache) GetExitingNodes(ctx context.Context) (exitingNodes storj.NodeIDList, err error) { - m.Lock() - defer m.Unlock() - return m.db.GetExitingNodes(ctx) -} - -// GetExitingNodesLoopIncomplete returns exiting nodes who haven't completed the metainfo loop iteration. -func (m *lockedOverlayCache) GetExitingNodesLoopIncomplete(ctx context.Context) (exitingNodes storj.NodeIDList, err error) { - m.Lock() - defer m.Unlock() - return m.db.GetExitingNodesLoopIncomplete(ctx) -} - -// KnownOffline filters a set of nodes to offline nodes -func (m *lockedOverlayCache) KnownOffline(ctx context.Context, a1 *overlay.NodeCriteria, a2 storj.NodeIDList) (storj.NodeIDList, error) { - m.Lock() - defer m.Unlock() - return m.db.KnownOffline(ctx, a1, a2) -} - -// KnownUnreliableOrOffline filters a set of nodes to unhealth or offlines node, independent of new -func (m *lockedOverlayCache) KnownUnreliableOrOffline(ctx context.Context, a1 *overlay.NodeCriteria, a2 storj.NodeIDList) (storj.NodeIDList, error) { - m.Lock() - defer m.Unlock() - return m.db.KnownUnreliableOrOffline(ctx, a1, a2) -} - -// Paginate will page through the database nodes -func (m *lockedOverlayCache) Paginate(ctx context.Context, offset int64, limit int) ([]*overlay.NodeDossier, bool, error) { - m.Lock() - defer m.Unlock() - return m.db.Paginate(ctx, offset, limit) -} - -// PaginateQualified will page through the qualified nodes -func (m *lockedOverlayCache) PaginateQualified(ctx context.Context, offset int64, limit int) ([]*pb.Node, bool, error) { - m.Lock() - defer m.Unlock() - return m.db.PaginateQualified(ctx, offset, limit) -} - -// Reliable returns all nodes that are reliable -func (m *lockedOverlayCache) Reliable(ctx context.Context, a1 *overlay.NodeCriteria) (storj.NodeIDList, error) { - m.Lock() - defer m.Unlock() - return m.db.Reliable(ctx, a1) -} - -// SelectNewStorageNodes looks up nodes based on new node criteria -func (m *lockedOverlayCache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) ([]*pb.Node, error) { - m.Lock() - defer m.Unlock() - return m.db.SelectNewStorageNodes(ctx, count, criteria) -} - -// SelectStorageNodes looks up nodes based on criteria -func (m *lockedOverlayCache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) ([]*pb.Node, error) { - m.Lock() - defer m.Unlock() - return m.db.SelectStorageNodes(ctx, count, criteria) -} - -// Update updates node address -func (m *lockedOverlayCache) UpdateAddress(ctx context.Context, value *pb.Node, defaults overlay.NodeSelectionConfig) error { - m.Lock() - defer m.Unlock() - return m.db.UpdateAddress(ctx, value, defaults) -} - -// UpdateCheckIn updates a single storagenode's check-in stats. -func (m *lockedOverlayCache) UpdateCheckIn(ctx context.Context, node overlay.NodeCheckInInfo, config overlay.NodeSelectionConfig) (err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateCheckIn(ctx, node, config) -} - -// UpdateExitStatus is used to update a node's graceful exit status. -func (m *lockedOverlayCache) UpdateExitStatus(ctx context.Context, request *overlay.ExitStatusRequest) (stats *overlay.NodeStats, err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateExitStatus(ctx, request) -} - -// UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version. -func (m *lockedOverlayCache) UpdateNodeInfo(ctx context.Context, node storj.NodeID, nodeInfo *pb.InfoResponse) (stats *overlay.NodeDossier, err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateNodeInfo(ctx, node, nodeInfo) -} - -// UpdatePieceCounts sets the piece count field for the given node IDs. -func (m *lockedOverlayCache) UpdatePieceCounts(ctx context.Context, pieceCounts map[storj.NodeID]int) (err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdatePieceCounts(ctx, pieceCounts) -} - -// UpdateStats all parts of single storagenode's stats. -func (m *lockedOverlayCache) UpdateStats(ctx context.Context, request *overlay.UpdateRequest) (stats *overlay.NodeStats, err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateStats(ctx, request) -} - -// UpdateUptime updates a single storagenode's uptime stats. -func (m *lockedOverlayCache) UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool, lambda float64, weight float64, uptimeDQ float64) (stats *overlay.NodeStats, err error) { - m.Lock() - defer m.Unlock() - return m.db.UpdateUptime(ctx, nodeID, isUp, lambda, weight, uptimeDQ) -} - -// PeerIdentities returns a storage for peer identities -func (m *locked) PeerIdentities() overlay.PeerIdentities { - m.Lock() - defer m.Unlock() - return &lockedPeerIdentities{m.Locker, m.db.PeerIdentities()} -} - -// lockedPeerIdentities implements locking wrapper for overlay.PeerIdentities -type lockedPeerIdentities struct { - sync.Locker - db overlay.PeerIdentities -} - -// BatchGet gets all nodes peer identities in a transaction -func (m *lockedPeerIdentities) BatchGet(ctx context.Context, a1 storj.NodeIDList) ([]*identity.PeerIdentity, error) { - m.Lock() - defer m.Unlock() - return m.db.BatchGet(ctx, a1) -} - -// Get gets peer identity -func (m *lockedPeerIdentities) Get(ctx context.Context, a1 storj.NodeID) (*identity.PeerIdentity, error) { - m.Lock() - defer m.Unlock() - return m.db.Get(ctx, a1) -} - -// Set adds a peer identity entry for a node -func (m *lockedPeerIdentities) Set(ctx context.Context, a1 storj.NodeID, a2 *identity.PeerIdentity) error { - m.Lock() - defer m.Unlock() - return m.db.Set(ctx, a1, a2) -} - -// ProjectAccounting returns database for storing information about project data use -func (m *locked) ProjectAccounting() accounting.ProjectAccounting { - m.Lock() - defer m.Unlock() - return &lockedProjectAccounting{m.Locker, m.db.ProjectAccounting()} -} - -// lockedProjectAccounting implements locking wrapper for accounting.ProjectAccounting -type lockedProjectAccounting struct { - sync.Locker - db accounting.ProjectAccounting -} - -// CreateStorageTally creates a record for BucketStorageTally in the accounting DB table -func (m *lockedProjectAccounting) CreateStorageTally(ctx context.Context, tally accounting.BucketStorageTally) error { - m.Lock() - defer m.Unlock() - return m.db.CreateStorageTally(ctx, tally) -} - -// GetAllocatedBandwidthTotal returns the sum of GET bandwidth usage allocated for a projectID in the past time frame -func (m *lockedProjectAccounting) GetAllocatedBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (int64, error) { - m.Lock() - defer m.Unlock() - return m.db.GetAllocatedBandwidthTotal(ctx, projectID, from) -} - -// GetProjectUsageLimits returns project usage limit -func (m *lockedProjectAccounting) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error) { - m.Lock() - defer m.Unlock() - return m.db.GetProjectUsageLimits(ctx, projectID) -} - -// GetStorageTotals returns the current inline and remote storage usage for a projectID -func (m *lockedProjectAccounting) GetStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) { - m.Lock() - defer m.Unlock() - return m.db.GetStorageTotals(ctx, projectID) -} - -// GetTallies retrieves all tallies -func (m *lockedProjectAccounting) GetTallies(ctx context.Context) ([]accounting.BucketTally, error) { - m.Lock() - defer m.Unlock() - return m.db.GetTallies(ctx) -} - -// SaveTallies saves the latest project info -func (m *lockedProjectAccounting) SaveTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) error { - m.Lock() - defer m.Unlock() - return m.db.SaveTallies(ctx, intervalStart, bucketTallies) -} - -// RepairQueue returns queue for segments that need repairing -func (m *locked) RepairQueue() queue.RepairQueue { - m.Lock() - defer m.Unlock() - return &lockedRepairQueue{m.Locker, m.db.RepairQueue()} -} - -// lockedRepairQueue implements locking wrapper for queue.RepairQueue -type lockedRepairQueue struct { - sync.Locker - db queue.RepairQueue -} - -// Count counts the number of segments in the repair queue. -func (m *lockedRepairQueue) Count(ctx context.Context) (count int, err error) { - m.Lock() - defer m.Unlock() - return m.db.Count(ctx) -} - -// Delete removes an injured segment. -func (m *lockedRepairQueue) Delete(ctx context.Context, s *pb.InjuredSegment) error { - m.Lock() - defer m.Unlock() - return m.db.Delete(ctx, s) -} - -// Insert adds an injured segment. -func (m *lockedRepairQueue) Insert(ctx context.Context, s *pb.InjuredSegment) error { - m.Lock() - defer m.Unlock() - return m.db.Insert(ctx, s) -} - -// Select gets an injured segment. -func (m *lockedRepairQueue) Select(ctx context.Context) (*pb.InjuredSegment, error) { - m.Lock() - defer m.Unlock() - return m.db.Select(ctx) -} - -// SelectN lists limit amount of injured segments. -func (m *lockedRepairQueue) SelectN(ctx context.Context, limit int) ([]pb.InjuredSegment, error) { - m.Lock() - defer m.Unlock() - return m.db.SelectN(ctx, limit) -} - -// returns database for marketing admin GUI -func (m *locked) Rewards() rewards.DB { - m.Lock() - defer m.Unlock() - return &lockedRewards{m.Locker, m.db.Rewards()} -} - -// lockedRewards implements locking wrapper for rewards.DB -type lockedRewards struct { - sync.Locker - db rewards.DB -} - -func (m *lockedRewards) Create(ctx context.Context, offer *rewards.NewOffer) (*rewards.Offer, error) { - m.Lock() - defer m.Unlock() - return m.db.Create(ctx, offer) -} - -func (m *lockedRewards) Finish(ctx context.Context, offerID int) error { - m.Lock() - defer m.Unlock() - return m.db.Finish(ctx, offerID) -} - -func (m *lockedRewards) GetActiveOffersByType(ctx context.Context, offerType rewards.OfferType) (rewards.Offers, error) { - m.Lock() - defer m.Unlock() - return m.db.GetActiveOffersByType(ctx, offerType) -} - -func (m *lockedRewards) ListAll(ctx context.Context) (rewards.Offers, error) { - m.Lock() - defer m.Unlock() - return m.db.ListAll(ctx) -} - -// StoragenodeAccounting returns database for storing information about storagenode use -func (m *locked) StoragenodeAccounting() accounting.StoragenodeAccounting { - m.Lock() - defer m.Unlock() - return &lockedStoragenodeAccounting{m.Locker, m.db.StoragenodeAccounting()} -} - -// lockedStoragenodeAccounting implements locking wrapper for accounting.StoragenodeAccounting -type lockedStoragenodeAccounting struct { - sync.Locker - db accounting.StoragenodeAccounting -} - -// DeleteTalliesBefore deletes all tallies prior to some time -func (m *lockedStoragenodeAccounting) DeleteTalliesBefore(ctx context.Context, latestRollup time.Time) error { - m.Lock() - defer m.Unlock() - return m.db.DeleteTalliesBefore(ctx, latestRollup) -} - -// GetBandwidthSince retrieves all bandwidth rollup entires since latestRollup -func (m *lockedStoragenodeAccounting) GetBandwidthSince(ctx context.Context, latestRollup time.Time) ([]*accounting.StoragenodeBandwidthRollup, error) { - m.Lock() - defer m.Unlock() - return m.db.GetBandwidthSince(ctx, latestRollup) -} - -// GetTallies retrieves all tallies -func (m *lockedStoragenodeAccounting) GetTallies(ctx context.Context) ([]*accounting.StoragenodeStorageTally, error) { - m.Lock() - defer m.Unlock() - return m.db.GetTallies(ctx) -} - -// GetTalliesSince retrieves all tallies since latestRollup -func (m *lockedStoragenodeAccounting) GetTalliesSince(ctx context.Context, latestRollup time.Time) ([]*accounting.StoragenodeStorageTally, error) { - m.Lock() - defer m.Unlock() - return m.db.GetTalliesSince(ctx, latestRollup) -} - -// LastTimestamp records and returns the latest last tallied time. -func (m *lockedStoragenodeAccounting) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) { - m.Lock() - defer m.Unlock() - return m.db.LastTimestamp(ctx, timestampType) -} - -// QueryPaymentInfo queries Nodes and Accounting_Rollup on nodeID -func (m *lockedStoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) ([]*accounting.CSVRow, error) { - m.Lock() - defer m.Unlock() - return m.db.QueryPaymentInfo(ctx, start, end) -} - -// QueryStorageNodeUsage returns slice of StorageNodeUsage for given period -func (m *lockedStoragenodeAccounting) QueryStorageNodeUsage(ctx context.Context, nodeID storj.NodeID, start time.Time, end time.Time) ([]accounting.StorageNodeUsage, error) { - m.Lock() - defer m.Unlock() - return m.db.QueryStorageNodeUsage(ctx, nodeID, start, end) -} - -// SaveRollup records tally and bandwidth rollup aggregations to the database -func (m *lockedStoragenodeAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error { - m.Lock() - defer m.Unlock() - return m.db.SaveRollup(ctx, latestTally, stats) -} - -// SaveTallies records tallies of data at rest -func (m *lockedStoragenodeAccounting) SaveTallies(ctx context.Context, latestTally time.Time, nodeData map[storj.NodeID]float64) error { - m.Lock() - defer m.Unlock() - return m.db.SaveTallies(ctx, latestTally, nodeData) -} diff --git a/satellite/satellitedb/lockedtx.go b/satellite/satellitedb/lockedtx.go deleted file mode 100644 index d336b4d98..000000000 --- a/satellite/satellitedb/lockedtx.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019 Storj Labs, Inc. -// See LICENSE for copying information. - -package satellitedb - -import ( - "context" - "sync" - - "storj.io/storj/satellite/console" -) - -// BeginTransaction is a method for opening transaction -func (m *lockedConsole) BeginTx(ctx context.Context) (console.DBTx, error) { - m.Lock() - db, err := m.db.BeginTx(ctx) - - txlocked := &lockedConsole{&sync.Mutex{}, db} - return &lockedTx{m, txlocked, db, sync.Once{}}, err -} - -// lockedTx extends Database with transaction scope -type lockedTx struct { - parent *lockedConsole - *lockedConsole - tx console.DBTx - once sync.Once -} - -// Commit is a method for committing and closing transaction -func (db *lockedTx) Commit() error { - err := db.tx.Commit() - db.once.Do(db.parent.Unlock) - return err -} - -// Rollback is a method for rollback and closing transaction -func (db *lockedTx) Rollback() error { - err := db.tx.Rollback() - db.once.Do(db.parent.Unlock) - return err -} diff --git a/satellite/satellitedb/orders.go b/satellite/satellitedb/orders.go index 7dda8bdef..0eceaffd9 100644 --- a/satellite/satellitedb/orders.go +++ b/satellite/satellitedb/orders.go @@ -10,13 +10,10 @@ import ( "sort" "time" - "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" "github.com/skyrings/skyring-common/tools/uuid" "github.com/zeebo/errs" "storj.io/storj/internal/dbutil/pgutil" - "storj.io/storj/internal/dbutil/sqliteutil" "storj.io/storj/pkg/pb" "storj.io/storj/pkg/storj" "storj.io/storj/satellite/orders" @@ -64,7 +61,7 @@ func (db *ordersDB) UseSerialNumber(ctx context.Context, serialNumber storj.Seri ) _, err = db.db.ExecContext(ctx, statement, storageNodeID.Bytes(), serialNumber.Bytes()) if err != nil { - if pgutil.IsConstraintError(err) || sqliteutil.IsConstraintError(err) { + if pgutil.IsConstraintError(err) { return nil, orders.ErrUsingSerialNumber.New("serial number already used") } return nil, err @@ -142,42 +139,18 @@ func (db *ordersDB) UpdateBucketBandwidthInline(ctx context.Context, projectID u func (db *ordersDB) UpdateStoragenodeBandwidthAllocation(ctx context.Context, storageNodes []storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) (err error) { defer mon.Task()(&ctx)(&err) - switch t := db.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - statement := db.db.Rebind( - `INSERT INTO storagenode_bandwidth_rollups (storagenode_id, interval_start, interval_seconds, action, allocated, settled) - VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT(storagenode_id, interval_start, action) - DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + excluded.allocated`, - ) - for _, storageNode := range storageNodes { - _, err = db.db.ExecContext(ctx, statement, - storageNode.Bytes(), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0, - ) - if err != nil { - return Error.Wrap(err) - } - } + // sort nodes to avoid update deadlock + sort.Sort(storj.NodeIDList(storageNodes)) - case *pq.Driver: - // sort nodes to avoid update deadlock - sort.Sort(storj.NodeIDList(storageNodes)) + _, err = db.db.ExecContext(ctx, db.db.Rebind(` + INSERT INTO storagenode_bandwidth_rollups + (storagenode_id, interval_start, interval_seconds, action, allocated, settled) + SELECT unnest($1::bytea[]), $2, $3, $4, $5, $6 + ON CONFLICT(storagenode_id, interval_start, action) + DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + excluded.allocated + `), postgresNodeIDList(storageNodes), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0) - _, err := db.db.ExecContext(ctx, ` - INSERT INTO storagenode_bandwidth_rollups - (storagenode_id, interval_start, interval_seconds, action, allocated, settled) - SELECT unnest($1::bytea[]), $2, $3, $4, $5, $6 - ON CONFLICT(storagenode_id, interval_start, action) - DO UPDATE SET allocated = storagenode_bandwidth_rollups.allocated + excluded.allocated - `, postgresNodeIDList(storageNodes), intervalStart, defaultIntervalSeconds, action, uint64(amount), 0) - if err != nil { - return Error.Wrap(err) - } - default: - return Error.New("Unsupported database %t", t) - } - - return nil + return Error.Wrap(err) } // UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node for the given intervalStart time diff --git a/satellite/satellitedb/overlaycache.go b/satellite/satellitedb/overlaycache.go index e1dba1d2d..4247a8b8e 100644 --- a/satellite/satellitedb/overlaycache.go +++ b/satellite/satellitedb/overlaycache.go @@ -13,7 +13,6 @@ import ( "time" "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" "github.com/zeebo/errs" monkit "gopkg.in/spacemonkeygo/monkit.v2" @@ -205,83 +204,6 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) { defer mon.Task()(&ctx)(&err) - switch t := cache.db.DB.Driver().(type) { - case *sqlite3.SQLiteDriver: - return cache.sqliteQueryNodesDistinct(ctx, excludedNodes, excludedIPs, count, safeQuery, distinctIP, args...) - case *pq.Driver: - return cache.postgresQueryNodesDistinct(ctx, excludedNodes, excludedIPs, count, safeQuery, distinctIP, args...) - default: - return []*pb.Node{}, Error.New("Unsupported database %t", t) - } -} - -func (cache *overlaycache) sqliteQueryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) { - defer mon.Task()(&ctx)(&err) - - if count == 0 { - return nil, nil - } - - safeExcludeNodes := "" - if len(excludedNodes) > 0 { - safeExcludeNodes = ` AND id NOT IN (?` + strings.Repeat(", ?", len(excludedNodes)-1) + `)` - for _, id := range excludedNodes { - args = append(args, id.Bytes()) - } - } - - safeExcludeIPs := "" - if len(excludedIPs) > 0 { - safeExcludeIPs = ` AND last_net NOT IN (?` + strings.Repeat(", ?", len(excludedIPs)-1) + `)` - for _, ip := range excludedIPs { - args = append(args, ip) - } - } - - args = append(args, count) - - rows, err := cache.db.Query(cache.db.Rebind(`SELECT id, type, address, last_net, - free_bandwidth, free_disk, total_audit_count, audit_success_count, - total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha, - audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta - FROM (SELECT *, Row_number() OVER(PARTITION BY last_net ORDER BY RANDOM()) rn - FROM nodes - `+safeQuery+safeExcludeNodes+safeExcludeIPs+`) n - WHERE rn = 1 - ORDER BY RANDOM() - LIMIT ?`), args...) - - if err != nil { - return nil, err - } - defer func() { err = errs.Combine(err, rows.Close()) }() - var nodes []*pb.Node - for rows.Next() { - dbNode := &dbx.Node{} - err = rows.Scan(&dbNode.Id, &dbNode.Type, - &dbNode.Address, &dbNode.LastNet, &dbNode.FreeBandwidth, &dbNode.FreeDisk, - &dbNode.TotalAuditCount, &dbNode.AuditSuccessCount, - &dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount, &dbNode.Disqualified, - &dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta, - &dbNode.UptimeReputationAlpha, &dbNode.UptimeReputationBeta, - ) - if err != nil { - return nil, err - } - - dossier, err := convertDBNode(ctx, dbNode) - if err != nil { - return nil, err - } - nodes = append(nodes, &dossier.Node) - } - - return nodes, rows.Err() -} - -func (cache *overlaycache) postgresQueryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) { - defer mon.Task()(&ctx)(&err) - if count == 0 { return nil, nil } @@ -375,41 +297,18 @@ func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.N // get offline nodes var rows *sql.Rows - switch t := cache.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - args := make([]interface{}, 0, len(nodeIds)+1) - for i := range nodeIds { - args = append(args, nodeIds[i].Bytes()) - } - args = append(args, time.Now().Add(-criteria.OnlineWindow)) - - rows, err = cache.db.Query(cache.db.Rebind(` - SELECT id FROM nodes - WHERE id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`) + rows, err = cache.db.Query(cache.db.Rebind(` + SELECT id FROM nodes + WHERE id = any($1::bytea[]) AND ( - last_contact_success < last_contact_failure AND last_contact_success < ? + last_contact_success < last_contact_failure AND last_contact_success < $2 ) - `), args...) - - case *pq.Driver: - rows, err = cache.db.Query(` - SELECT id FROM nodes - WHERE id = any($1::bytea[]) - AND ( - last_contact_success < last_contact_failure AND last_contact_success < $2 - ) - `, postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow), - ) - default: - return nil, Error.New("Unsupported database %t", t) - } - + `), postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow), + ) if err != nil { return nil, err } - defer func() { - err = errs.Combine(err, rows.Close()) - }() + defer func() { err = errs.Combine(err, rows.Close()) }() for rows.Next() { var id storj.NodeID @@ -432,39 +331,17 @@ func (cache *overlaycache) KnownUnreliableOrOffline(ctx context.Context, criteri // get reliable and online nodes var rows *sql.Rows - switch t := cache.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - args := make([]interface{}, 0, len(nodeIds)+3) - for i := range nodeIds { - args = append(args, nodeIds[i].Bytes()) - } - args = append(args, time.Now().Add(-criteria.OnlineWindow)) - - rows, err = cache.db.Query(cache.db.Rebind(` - SELECT id FROM nodes - WHERE id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`) + rows, err = cache.db.Query(cache.db.Rebind(` + SELECT id FROM nodes + WHERE id = any($1::bytea[]) AND disqualified IS NULL - AND (last_contact_success > ? OR last_contact_success > last_contact_failure) - `), args...) - - case *pq.Driver: - rows, err = cache.db.Query(` - SELECT id FROM nodes - WHERE id = any($1::bytea[]) - AND disqualified IS NULL - AND (last_contact_success > $2 OR last_contact_success > last_contact_failure) - `, postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow), - ) - default: - return nil, Error.New("Unsupported database %t", t) - } - + AND (last_contact_success > $2 OR last_contact_success > last_contact_failure) + `), postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow), + ) if err != nil { return nil, err } - defer func() { - err = errs.Combine(err, rows.Close()) - }() + defer func() { err = errs.Combine(err, rows.Close()) }() goodNodes := make(map[storj.NodeID]struct{}, len(nodeIds)) for rows.Next() { @@ -932,38 +809,22 @@ func (cache *overlaycache) UpdatePieceCounts(ctx context.Context, pieceCounts ma return counts[i].ID.Less(counts[k].ID) }) - switch t := cache.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - err = cache.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error { - query := tx.Rebind(`UPDATE nodes SET piece_count = ? WHERE id = ?`) - for _, count := range counts { - _, err := tx.Tx.ExecContext(ctx, query, count.Count, count.ID) - if err != nil { - return Error.Wrap(err) - } - } - return nil - }) - case *pq.Driver: - var nodeIDs []storj.NodeID - var countNumbers []int64 - for _, count := range counts { - nodeIDs = append(nodeIDs, count.ID) - countNumbers = append(countNumbers, count.Count) - } - - _, err = cache.db.ExecContext(ctx, ` - UPDATE nodes - SET piece_count = update.count - FROM ( - SELECT unnest($1::bytea[]) as id, unnest($2::bigint[]) as count - ) as update - WHERE nodes.id = update.id - `, postgresNodeIDList(nodeIDs), pq.Array(countNumbers)) - default: - return Error.New("Unsupported database %t", t) + var nodeIDs []storj.NodeID + var countNumbers []int64 + for _, count := range counts { + nodeIDs = append(nodeIDs, count.ID) + countNumbers = append(countNumbers, count.Count) } + _, err = cache.db.ExecContext(ctx, ` + UPDATE nodes + SET piece_count = update.count + FROM ( + SELECT unnest($1::bytea[]) as id, unnest($2::bigint[]) as count + ) as update + WHERE nodes.id = update.id + `, postgresNodeIDList(nodeIDs), pq.Array(countNumbers)) + return Error.Wrap(err) } @@ -1282,16 +1143,9 @@ func buildUpdateStatement(db *dbx.DB, update updateNodeStats) string { return "" } hexNodeID := hex.EncodeToString(update.NodeID.Bytes()) - switch db.DB.Driver().(type) { - case *sqlite3.SQLiteDriver: - sql += fmt.Sprintf(" WHERE nodes.id = X'%v';\n", hexNodeID) - sql += fmt.Sprintf("DELETE FROM pending_audits WHERE pending_audits.node_id = X'%v';\n", hexNodeID) - case *pq.Driver: - sql += fmt.Sprintf(" WHERE nodes.id = decode('%v', 'hex');\n", hexNodeID) - sql += fmt.Sprintf("DELETE FROM pending_audits WHERE pending_audits.node_id = decode('%v', 'hex');\n", hexNodeID) - default: - return "" - } + + sql += fmt.Sprintf(" WHERE nodes.id = decode('%v', 'hex');\n", hexNodeID) + sql += fmt.Sprintf("DELETE FROM pending_audits WHERE pending_audits.node_id = decode('%v', 'hex');\n", hexNodeID) return sql } @@ -1449,47 +1303,20 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC return Error.New("error UpdateCheckIn: missing the storage node address") } - switch t := cache.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - value := pb.Node{ - Id: node.NodeID, - Address: node.Address, - LastIp: node.LastIP, - } - err := cache.UpdateAddress(ctx, &value, config) - if err != nil { - return Error.Wrap(err) - } + // v is a single feedback value that allows us to update both alpha and beta + var v float64 = -1 + if node.IsUp { + v = 1 + } - _, err = cache.UpdateUptime(ctx, node.NodeID, node.IsUp, config.UptimeReputationLambda, config.UptimeReputationWeight, config.UptimeReputationDQ) - if err != nil { - return Error.Wrap(err) - } + uptimeReputationAlpha := config.UptimeReputationLambda*config.UptimeReputationAlpha0 + config.UptimeReputationWeight*(1+v)/2 + uptimeReputationBeta := config.UptimeReputationLambda*config.UptimeReputationBeta0 + config.UptimeReputationWeight*(1-v)/2 + semVer, err := version.NewSemVer(node.Version.GetVersion()) + if err != nil { + return Error.New("unable to convert version to semVer") + } - pbInfo := pb.InfoResponse{ - Operator: node.Operator, - Capacity: node.Capacity, - Type: pb.NodeType_STORAGE, - Version: node.Version, - } - _, err = cache.UpdateNodeInfo(ctx, node.NodeID, &pbInfo) - if err != nil { - return Error.Wrap(err) - } - case *pq.Driver: - // v is a single feedback value that allows us to update both alpha and beta - var v float64 = -1 - if node.IsUp { - v = 1 - } - uptimeReputationAlpha := config.UptimeReputationLambda*config.UptimeReputationAlpha0 + config.UptimeReputationWeight*(1+v)/2 - uptimeReputationBeta := config.UptimeReputationLambda*config.UptimeReputationBeta0 + config.UptimeReputationWeight*(1-v)/2 - semVer, err := version.NewSemVer(node.Version.GetVersion()) - if err != nil { - return Error.New("unable to convert version to semVer") - } - start := time.Now() - query := ` + query := ` INSERT INTO nodes ( id, address, last_net, protocol, type, @@ -1543,26 +1370,22 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC ELSE nodes.disqualified END; ` - _, err = cache.db.ExecContext(ctx, query, - // args $1 - $5 - node.NodeID.Bytes(), node.Address.GetAddress(), node.LastIP, node.Address.GetTransport(), int(pb.NodeType_STORAGE), - // args $6 - $9 - node.Operator.GetEmail(), node.Operator.GetWallet(), node.Capacity.GetFreeBandwidth(), node.Capacity.GetFreeDisk(), - // args $10 - node.IsUp, - // args $11 - $14 - config.AuditReputationAlpha0, config.AuditReputationBeta0, uptimeReputationAlpha, uptimeReputationBeta, - // args $15 - $17 - config.UptimeReputationDQ, config.UptimeReputationLambda, config.UptimeReputationWeight, - // args $18 - $23 - semVer.Major, semVer.Minor, semVer.Patch, node.Version.GetCommitHash(), node.Version.Timestamp, node.Version.GetRelease(), - ) - if err != nil { - return Error.Wrap(err) - } - mon.FloatVal("UpdateCheckIn query execution time (seconds)").Observe(time.Since(start).Seconds()) - default: - return Error.New("Unsupported database %t", t) + _, err = cache.db.ExecContext(ctx, query, + // args $1 - $5 + node.NodeID.Bytes(), node.Address.GetAddress(), node.LastIP, node.Address.GetTransport(), int(pb.NodeType_STORAGE), + // args $6 - $9 + node.Operator.GetEmail(), node.Operator.GetWallet(), node.Capacity.GetFreeBandwidth(), node.Capacity.GetFreeDisk(), + // args $10 + node.IsUp, + // args $11 - $14 + config.AuditReputationAlpha0, config.AuditReputationBeta0, uptimeReputationAlpha, uptimeReputationBeta, + // args $15 - $17 + config.UptimeReputationDQ, config.UptimeReputationLambda, config.UptimeReputationWeight, + // args $18 - $23 + semVer.Major, semVer.Minor, semVer.Patch, node.Version.GetCommitHash(), node.Version.Timestamp, node.Version.GetRelease(), + ) + if err != nil { + return Error.Wrap(err) } return nil diff --git a/satellite/satellitedb/repairqueue.go b/satellite/satellitedb/repairqueue.go index 7d7449c17..8e67dfca1 100644 --- a/satellite/satellitedb/repairqueue.go +++ b/satellite/satellitedb/repairqueue.go @@ -6,13 +6,8 @@ package satellitedb import ( "context" "database/sql" - "fmt" - - "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" "storj.io/storj/internal/dbutil/pgutil" - "storj.io/storj/internal/dbutil/sqliteutil" "storj.io/storj/pkg/pb" dbx "storj.io/storj/satellite/satellitedb/dbx" "storj.io/storj/storage" @@ -26,7 +21,7 @@ func (r *repairQueue) Insert(ctx context.Context, seg *pb.InjuredSegment) (err e defer mon.Task()(&ctx)(&err) _, err = r.db.ExecContext(ctx, r.db.Rebind(`INSERT INTO injuredsegments ( path, data ) VALUES ( ?, ? )`), seg.Path, seg) if err != nil { - if pgutil.IsConstraintError(err) || sqliteutil.IsConstraintError(err) { + if pgutil.IsConstraintError(err) { return nil // quietly fail on reinsert } return err @@ -34,63 +29,21 @@ func (r *repairQueue) Insert(ctx context.Context, seg *pb.InjuredSegment) (err e return nil } -func (r *repairQueue) postgresSelect(ctx context.Context) (seg *pb.InjuredSegment, err error) { +func (r *repairQueue) Select(ctx context.Context) (seg *pb.InjuredSegment, err error) { defer mon.Task()(&ctx)(&err) err = r.db.QueryRowContext(ctx, ` - UPDATE injuredsegments SET attempted = timezone('utc', now()) WHERE path = ( - SELECT path FROM injuredsegments - WHERE attempted IS NULL OR attempted < timezone('utc', now()) - interval '1 hour' - ORDER BY attempted NULLS FIRST FOR UPDATE SKIP LOCKED LIMIT 1 - ) RETURNING data`).Scan(&seg) - if err == sql.ErrNoRows { - err = storage.ErrEmptyQueue.New("") - } - return -} + UPDATE injuredsegments SET attempted = timezone('utc', now()) WHERE path = ( + SELECT path FROM injuredsegments + WHERE attempted IS NULL OR attempted < timezone('utc', now()) - interval '1 hour' + ORDER BY attempted NULLS FIRST FOR UPDATE SKIP LOCKED LIMIT 1 + ) RETURNING data`).Scan(&seg) -func (r *repairQueue) sqliteSelect(ctx context.Context) (seg *pb.InjuredSegment, err error) { - defer mon.Task()(&ctx)(&err) - err = r.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error { - var path []byte - err = tx.Tx.QueryRowContext(ctx, r.db.Rebind(` - SELECT path, data FROM injuredsegments - WHERE attempted IS NULL - OR attempted < datetime('now','-1 hours') - ORDER BY attempted LIMIT 1`)).Scan(&path, &seg) - if err != nil { - return err - } - res, err := tx.Tx.ExecContext(ctx, r.db.Rebind(`UPDATE injuredsegments SET attempted = datetime('now') WHERE path = ?`), path) - if err != nil { - return err - } - count, err := res.RowsAffected() - if err != nil { - return err - } - if count != 1 { - return fmt.Errorf("Expected 1, got %d segments deleted", count) - } - return nil - }) if err == sql.ErrNoRows { err = storage.ErrEmptyQueue.New("") } return seg, err } -func (r *repairQueue) Select(ctx context.Context) (seg *pb.InjuredSegment, err error) { - defer mon.Task()(&ctx)(&err) - switch t := r.db.DB.Driver().(type) { - case *sqlite3.SQLiteDriver: - return r.sqliteSelect(ctx) - case *pq.Driver: - return r.postgresSelect(ctx) - default: - return seg, fmt.Errorf("Unsupported database %t", t) - } -} - func (r *repairQueue) Delete(ctx context.Context, seg *pb.InjuredSegment) (err error) { defer mon.Task()(&ctx)(&err) _, err = r.db.ExecContext(ctx, r.db.Rebind(`DELETE FROM injuredsegments WHERE path = ?`), seg.Path) diff --git a/satellite/satellitedb/usercredits.go b/satellite/satellitedb/usercredits.go index 01e8f3468..c234031f2 100644 --- a/satellite/satellitedb/usercredits.go +++ b/satellite/satellitedb/usercredits.go @@ -8,8 +8,6 @@ import ( "database/sql" "time" - "github.com/lib/pq" - "github.com/mattn/go-sqlite3" "github.com/skyrings/skyring-common/tools/uuid" "github.com/zeebo/errs" @@ -91,28 +89,23 @@ func (c *usercredits) Create(ctx context.Context, userCredit console.CreateCredi result sql.Result statement string ) - switch t := c.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - statement = ` - INSERT INTO user_credits (user_id, offer_id, credits_earned_in_cents, credits_used_in_cents, expires_at, referred_by, type, created_at) - SELECT * FROM (VALUES (?, ?, ?, 0, ?, ?, ?, time('now'))) AS v - WHERE COALESCE((SELECT COUNT(offer_id) FROM user_credits WHERE offer_id = ? AND referred_by IS NOT NULL ) < NULLIF(?, 0) , ?); - ` - result, err = dbExec.ExecContext(ctx, c.db.Rebind(statement), userCredit.UserID[:], userCredit.OfferID, userCredit.CreditsEarned.Cents(), userCredit.ExpiresAt, referrerID, userCredit.Type, userCredit.OfferID, userCredit.OfferInfo.RedeemableCap, shouldCreate) - case *pq.Driver: - statement = ` + statement = ` INSERT INTO user_credits (user_id, offer_id, credits_earned_in_cents, credits_used_in_cents, expires_at, referred_by, type, created_at) SELECT * FROM (VALUES (?::bytea, ?::int, ?::int, 0, ?::timestamp, NULLIF(?::bytea, ?::bytea), ?::text, now())) AS v WHERE COALESCE((SELECT COUNT(offer_id) FROM user_credits WHERE offer_id = ? AND referred_by IS NOT NULL ) < NULLIF(?, 0), ?); ` - result, err = dbExec.ExecContext(ctx, c.db.Rebind(statement), userCredit.UserID[:], userCredit.OfferID, userCredit.CreditsEarned.Cents(), userCredit.ExpiresAt, referrerID, new([]byte), userCredit.Type, userCredit.OfferID, userCredit.OfferInfo.RedeemableCap, shouldCreate) - default: - return errs.New("unsupported database: %t", t) - } + result, err = dbExec.ExecContext(ctx, c.db.Rebind(statement), + userCredit.UserID[:], + userCredit.OfferID, + userCredit.CreditsEarned.Cents(), + userCredit.ExpiresAt, referrerID, new([]byte), + userCredit.Type, + userCredit.OfferID, + userCredit.OfferInfo.RedeemableCap, shouldCreate) if err != nil { // check to see if there's a constraint error - if pgutil.IsConstraintError(err) || err == sqlite3.ErrConstraint { + if pgutil.IsConstraintError(err) { _, err := dbExec.ExecContext(ctx, c.db.Rebind(`UPDATE offers SET status = ? AND expires_at = ? WHERE id = ?`), rewards.Done, time.Now().UTC(), userCredit.OfferID) if err != nil { return errs.Wrap(err) @@ -138,24 +131,11 @@ func (c *usercredits) Create(ctx context.Context, userCredit console.CreateCredi // UpdateEarnedCredits updates user credits after user activated their account func (c *usercredits) UpdateEarnedCredits(ctx context.Context, userID uuid.UUID) error { - var statement string - - switch t := c.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - statement = ` - UPDATE user_credits - SET credits_earned_in_cents = - (SELECT invitee_credit_in_cents FROM offers WHERE id = offer_id) - WHERE user_id = ? AND credits_earned_in_cents = 0` - case *pq.Driver: - statement = ` - UPDATE user_credits SET credits_earned_in_cents = offers.invitee_credit_in_cents - FROM offers - WHERE user_id = ? AND credits_earned_in_cents = 0 AND offer_id = offers.id - ` - default: - return errs.New("Unsupported database %t", t) - } + statement := ` + UPDATE user_credits SET credits_earned_in_cents = offers.invitee_credit_in_cents + FROM offers + WHERE user_id = ? AND credits_earned_in_cents = 0 AND offer_id = offers.id + ` result, err := c.db.DB.ExecContext(ctx, c.db.Rebind(statement), userID[:]) if err != nil { @@ -215,18 +195,9 @@ func (c *usercredits) UpdateAvailableCredits(ctx context.Context, creditsToCharg values = append(values, rowIds...) - var statement string - switch t := c.db.Driver().(type) { - case *sqlite3.SQLiteDriver: - statement = generateQuery(len(availableCredits), false) - case *pq.Driver: - statement = generateQuery(len(availableCredits), true) - default: - return creditsToCharge, errs.New("Unsupported database %t", t) - } + statement := generateQuery(len(availableCredits), true) - _, err = tx.Tx.ExecContext(ctx, c.db.Rebind(`UPDATE user_credits SET - credits_used_in_cents = CASE `+statement), values...) + _, err = tx.Tx.ExecContext(ctx, c.db.Rebind(`UPDATE user_credits SET credits_used_in_cents = CASE `+statement), values...) if err != nil { return creditsToCharge, errs.Wrap(errs.Combine(err, tx.Rollback())) } diff --git a/scripts/lockedgen.go b/scripts/lockedgen.go deleted file mode 100644 index eaeba0129..000000000 --- a/scripts/lockedgen.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright (C) 2019 Storj Labs, Inc. -// See LICENSE for copying information. - -// +build ignore - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/token" - "go/types" - "io/ioutil" - "os" - "path" - "sort" - "strings" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/imports" -) - -func main() { - var outputPath string - var packageName string - var typeFullyQualifedName string - - flag.StringVar(&outputPath, "o", "", "output file name") - flag.StringVar(&packageName, "p", "", "output package name") - flag.StringVar(&typeFullyQualifedName, "i", "", "interface to generate code for") - flag.Parse() - - if outputPath == "" || packageName == "" || typeFullyQualifedName == "" { - fmt.Println("missing argument") - os.Exit(1) - } - - var code Code - - code.Imports = map[string]bool{} - code.Ignore = map[string]bool{ - "error": true, - } - code.IgnoreMethods = map[string]bool{ - "BeginTx": true, - } - code.OutputPackage = packageName - code.Config = &packages.Config{ - Mode: packages.LoadAllSyntax, - } - code.Wrapped = map[string]bool{} - code.AdditionalNesting = map[string]int{"Console": 1} - - // e.g. storj.io/storj/satellite.DB - p := strings.LastIndexByte(typeFullyQualifedName, '.') - code.Package = typeFullyQualifedName[:p] // storj.io/storj/satellite - code.Type = typeFullyQualifedName[p+1:] // DB - code.QualifiedType = path.Base(code.Package) + "." + code.Type - - var err error - code.Roots, err = packages.Load(code.Config, code.Package) - if err != nil { - panic(err) - } - - code.PrintLocked() - code.PrintPreamble() - - unformatted := code.Bytes() - - imports.LocalPrefix = "storj.io" - formatted, err := imports.Process(outputPath, unformatted, nil) - if err != nil { - fmt.Println(string(unformatted)) - panic(err) - } - - if outputPath == "" { - fmt.Println(string(formatted)) - return - } - - err = ioutil.WriteFile(outputPath, formatted, 0644) - if err != nil { - panic(err) - } -} - -// Methods is the common interface for types having methods. -type Methods interface { - Method(i int) *types.Func - NumMethods() int -} - -// Code is the information for generating the code. -type Code struct { - Config *packages.Config - Package string - Type string - QualifiedType string - Roots []*packages.Package - - OutputPackage string - - Imports map[string]bool - Ignore map[string]bool - IgnoreMethods map[string]bool - Wrapped map[string]bool - AdditionalNesting map[string]int - - Preamble bytes.Buffer - Source bytes.Buffer -} - -// Bytes returns all code merged together -func (code *Code) Bytes() []byte { - var all bytes.Buffer - all.Write(code.Preamble.Bytes()) - all.Write(code.Source.Bytes()) - return all.Bytes() -} - -// PrintPreamble creates package header and imports. -func (code *Code) PrintPreamble() { - w := &code.Preamble - fmt.Fprintf(w, "// Code generated by lockedgen using 'go generate'. DO NOT EDIT.\n\n") - fmt.Fprintf(w, "// Copyright (C) 2019 Storj Labs, Inc.\n") - fmt.Fprintf(w, "// See LICENSE for copying information.\n\n") - fmt.Fprintf(w, "package %v\n\n", code.OutputPackage) - fmt.Fprintf(w, "import (\n") - - var imports []string - for imp := range code.Imports { - imports = append(imports, imp) - } - sort.Strings(imports) - for _, imp := range imports { - fmt.Fprintf(w, " %q\n", imp) - } - fmt.Fprintf(w, ")\n\n") -} - -// PrintLocked writes locked wrapper and methods. -func (code *Code) PrintLocked() { - code.Imports["sync"] = true - code.Imports["storj.io/statellite"] = true - - code.Printf("// locked implements a locking wrapper around satellite.DB.\n") - code.Printf("type locked struct {\n") - code.Printf(" sync.Locker\n") - code.Printf(" db %v\n", code.QualifiedType) - code.Printf("}\n\n") - - code.Printf("// newLocked returns database wrapped with locker.\n") - code.Printf("func newLocked(db %v) %v {\n", code.QualifiedType, code.QualifiedType) - code.Printf(" return &locked{&sync.Mutex{}, db}\n") - code.Printf("}\n\n") - - // find the satellite.DB type info - dbObject := code.Roots[0].Types.Scope().Lookup(code.Type) - methods := dbObject.Type().Underlying().(Methods) - - for i := 0; i < methods.NumMethods(); i++ { - code.PrintLockedFunc("locked", methods.Method(i), code.AdditionalNesting[methods.Method(i).Name()]+1) - } -} - -// Printf writes formatted text to source. -func (code *Code) Printf(format string, a ...interface{}) { - fmt.Fprintf(&code.Source, format, a...) -} - -// PrintSignature prints method signature. -func (code *Code) PrintSignature(sig *types.Signature) { - code.PrintSignatureTuple(sig.Params(), true) - if sig.Results().Len() > 0 { - code.Printf(" ") - code.PrintSignatureTuple(sig.Results(), false) - } -} - -// PrintSignatureTuple prints method tuple, params or results. -func (code *Code) PrintSignatureTuple(tuple *types.Tuple, needsNames bool) { - code.Printf("(") - defer code.Printf(")") - - for i := 0; i < tuple.Len(); i++ { - if i > 0 { - code.Printf(", ") - } - - param := tuple.At(i) - if code.PrintName(tuple.At(i), i, needsNames) { - code.Printf(" ") - } - code.PrintType(param.Type()) - } -} - -// PrintCall prints a call using the specified signature. -func (code *Code) PrintCall(sig *types.Signature) { - code.Printf("(") - defer code.Printf(")") - - params := sig.Params() - for i := 0; i < params.Len(); i++ { - if i != 0 { - code.Printf(", ") - } - code.PrintName(params.At(i), i, true) - } -} - -// PrintName prints an appropriate name from signature tuple. -func (code *Code) PrintName(v *types.Var, index int, needsNames bool) bool { - name := v.Name() - if needsNames && name == "" { - if v.Type().String() == "context.Context" { - code.Printf("ctx") - return true - } - code.Printf("a%d", index) - return true - } - code.Printf("%s", name) - return name != "" -} - -// PrintType prints short form of type t. -func (code *Code) PrintType(t types.Type) { - types.WriteType(&code.Source, t, (*types.Package).Name) -} - -func typeName(typ types.Type) string { - var body bytes.Buffer - types.WriteType(&body, typ, (*types.Package).Name) - return body.String() -} - -// IncludeImports imports all types referenced in the signature. -func (code *Code) IncludeImports(sig *types.Signature) { - var tmp bytes.Buffer - types.WriteSignature(&tmp, sig, func(p *types.Package) string { - code.Imports[p.Path()] = true - return p.Name() - }) -} - -// NeedsWrapper checks whether method result needs a wrapper type. -func (code *Code) NeedsWrapper(method *types.Func) bool { - if code.IgnoreMethods[method.Name()] { - return false - } - - sig := method.Type().Underlying().(*types.Signature) - return sig.Results().Len() == 1 && !code.Ignore[sig.Results().At(0).Type().String()] -} - -// WrapperTypeName returns an appropriate name for the wrapper type. -func (code *Code) WrapperTypeName(method *types.Func) string { - return "locked" + method.Name() -} - -// PrintLockedFunc prints a method with locking and defers the actual logic to method. -func (code *Code) PrintLockedFunc(receiverType string, method *types.Func, nestingDepth int) { - if code.IgnoreMethods[method.Name()] { - return - } - - sig := method.Type().Underlying().(*types.Signature) - code.IncludeImports(sig) - - doc := strings.TrimSpace(code.MethodDoc(method)) - if doc != "" { - for _, line := range strings.Split(doc, "\n") { - code.Printf("// %s\n", line) - } - } - code.Printf("func (m *%s) %s", receiverType, method.Name()) - code.PrintSignature(sig) - code.Printf(" {\n") - - code.Printf(" m.Lock(); defer m.Unlock()\n") - if !code.NeedsWrapper(method) { - code.Printf(" return m.db.%s", method.Name()) - code.PrintCall(sig) - code.Printf("\n") - code.Printf("}\n\n") - return - } - - code.Printf(" return &%s{m.Locker, ", code.WrapperTypeName(method)) - code.Printf("m.db.%s", method.Name()) - code.PrintCall(sig) - code.Printf("}\n") - code.Printf("}\n\n") - - if nestingDepth > 0 { - code.PrintWrapper(method, nestingDepth-1) - } -} - -// PrintWrapper prints wrapper for the result type of method. -func (code *Code) PrintWrapper(method *types.Func, nestingDepth int) { - sig := method.Type().Underlying().(*types.Signature) - results := sig.Results() - result := results.At(0).Type() - - receiverType := code.WrapperTypeName(method) - - if code.Wrapped[receiverType] { - return - } - code.Wrapped[receiverType] = true - - code.Printf("// %s implements locking wrapper for %s\n", receiverType, typeName(result)) - code.Printf("type %s struct {\n", receiverType) - code.Printf(" sync.Locker\n") - code.Printf(" db %s\n", typeName(result)) - code.Printf("}\n\n") - - methods := result.Underlying().(Methods) - for i := 0; i < methods.NumMethods(); i++ { - code.PrintLockedFunc(receiverType, methods.Method(i), nestingDepth) - } -} - -// MethodDoc finds documentation for the specified method. -func (code *Code) MethodDoc(method *types.Func) string { - file := code.FindASTFile(method.Pos()) - if file == nil { - return "" - } - - path, exact := astutil.PathEnclosingInterval(file, method.Pos(), method.Pos()) - if !exact { - return "" - } - - for _, p := range path { - switch decl := p.(type) { - case *ast.Field: - return decl.Doc.Text() - case *ast.GenDecl: - return decl.Doc.Text() - case *ast.FuncDecl: - return decl.Doc.Text() - } - } - - return "" -} - -// FindASTFile finds the *ast.File at the specified position. -func (code *Code) FindASTFile(pos token.Pos) *ast.File { - seen := map[*packages.Package]bool{} - - // find searches pos recursively from p and its dependencies. - var find func(p *packages.Package) *ast.File - find = func(p *packages.Package) *ast.File { - if seen[p] { - return nil - } - seen[p] = true - - for _, file := range p.Syntax { - if file.Pos() <= pos && pos <= file.End() { - return file - } - } - - for _, dep := range p.Imports { - if file := find(dep); file != nil { - return file - } - } - - return nil - } - - for _, root := range code.Roots { - if file := find(root); file != nil { - return file - } - } - return nil -}